storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/data-usage-cache.go (about) 1 /* 2 * MinIO Cloud Storage, (C) 2020 MinIO, Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package cmd 18 19 import ( 20 "context" 21 "errors" 22 "fmt" 23 "io" 24 "net/http" 25 "path" 26 "path/filepath" 27 "strings" 28 "time" 29 30 "github.com/cespare/xxhash/v2" 31 "github.com/klauspost/compress/zstd" 32 "github.com/tinylib/msgp/msgp" 33 34 "storj.io/minio/cmd/logger" 35 "storj.io/minio/pkg/bucket/lifecycle" 36 "storj.io/minio/pkg/hash" 37 "storj.io/minio/pkg/madmin" 38 ) 39 40 //go:generate msgp -file $GOFILE -unexported 41 42 // dataUsageHash is the hash type used. 43 type dataUsageHash string 44 45 // sizeHistogram is a size histogram. 46 type sizeHistogram [dataUsageBucketLen]uint64 47 48 //msgp:tuple dataUsageEntry 49 type dataUsageEntry struct { 50 Children dataUsageHashMap 51 // These fields do no include any children. 52 Size int64 53 Objects uint64 54 ObjSizes sizeHistogram 55 ReplicationStats replicationStats 56 } 57 58 //msgp:tuple replicationStats 59 type replicationStats struct { 60 PendingSize uint64 61 ReplicatedSize uint64 62 FailedSize uint64 63 ReplicaSize uint64 64 FailedCount uint64 65 PendingCount uint64 66 MissedThresholdSize uint64 67 AfterThresholdSize uint64 68 MissedThresholdCount uint64 69 AfterThresholdCount uint64 70 } 71 72 //msgp:tuple dataUsageEntryV2 73 type dataUsageEntryV2 struct { 74 // These fields do no include any children. 75 Size int64 76 Objects uint64 77 ObjSizes sizeHistogram 78 Children dataUsageHashMap 79 } 80 81 //msgp:tuple dataUsageEntryV3 82 type dataUsageEntryV3 struct { 83 // These fields do no include any children. 84 Size int64 85 ReplicatedSize uint64 86 ReplicationPendingSize uint64 87 ReplicationFailedSize uint64 88 ReplicaSize uint64 89 Objects uint64 90 ObjSizes sizeHistogram 91 Children dataUsageHashMap 92 } 93 94 // dataUsageCache contains a cache of data usage entries latest version 4. 95 type dataUsageCache struct { 96 Info dataUsageCacheInfo 97 Cache map[string]dataUsageEntry 98 Disks []string 99 } 100 101 // dataUsageCacheV2 contains a cache of data usage entries version 2. 102 type dataUsageCacheV2 struct { 103 Info dataUsageCacheInfo 104 Disks []string 105 Cache map[string]dataUsageEntryV2 106 } 107 108 // dataUsageCache contains a cache of data usage entries version 3. 109 type dataUsageCacheV3 struct { 110 Info dataUsageCacheInfo 111 Disks []string 112 Cache map[string]dataUsageEntryV3 113 } 114 115 //msgp:ignore dataUsageEntryInfo 116 type dataUsageEntryInfo struct { 117 Name string 118 Parent string 119 Entry dataUsageEntry 120 } 121 122 type dataUsageCacheInfo struct { 123 // Name of the bucket. Also root element. 124 Name string 125 NextCycle uint32 126 LastUpdate time.Time 127 // indicates if the disk is being healed and scanner 128 // should skip healing the disk 129 SkipHealing bool 130 BloomFilter []byte `msg:"BloomFilter,omitempty"` 131 lifeCycle *lifecycle.Lifecycle `msg:"-"` 132 } 133 134 func (e *dataUsageEntry) addSizes(summary sizeSummary) { 135 e.Size += summary.totalSize 136 e.ReplicationStats.ReplicatedSize += uint64(summary.replicatedSize) 137 e.ReplicationStats.FailedSize += uint64(summary.failedSize) 138 e.ReplicationStats.PendingSize += uint64(summary.pendingSize) 139 e.ReplicationStats.ReplicaSize += uint64(summary.replicaSize) 140 e.ReplicationStats.PendingCount += uint64(summary.pendingCount) 141 e.ReplicationStats.FailedCount += uint64(summary.failedCount) 142 143 } 144 145 // merge other data usage entry into this, excluding children. 146 func (e *dataUsageEntry) merge(other dataUsageEntry) { 147 e.Objects += other.Objects 148 e.Size += other.Size 149 e.ReplicationStats.PendingSize += other.ReplicationStats.PendingSize 150 e.ReplicationStats.FailedSize += other.ReplicationStats.FailedSize 151 e.ReplicationStats.ReplicatedSize += other.ReplicationStats.ReplicatedSize 152 e.ReplicationStats.ReplicaSize += other.ReplicationStats.ReplicaSize 153 e.ReplicationStats.PendingCount += other.ReplicationStats.PendingCount 154 e.ReplicationStats.FailedCount += other.ReplicationStats.FailedCount 155 156 for i, v := range other.ObjSizes[:] { 157 e.ObjSizes[i] += v 158 } 159 } 160 161 // mod returns true if the hash mod cycles == cycle. 162 // If cycles is 0 false is always returned. 163 // If cycles is 1 true is always returned (as expected). 164 func (h dataUsageHash) mod(cycle uint32, cycles uint32) bool { 165 if cycles <= 1 { 166 return cycles == 1 167 } 168 return uint32(xxhash.Sum64String(string(h)))%cycles == cycle%cycles 169 } 170 171 // addChildString will add a child based on its name. 172 // If it already exists it will not be added again. 173 func (e *dataUsageEntry) addChildString(name string) { 174 e.addChild(hashPath(name)) 175 } 176 177 // addChild will add a child based on its hash. 178 // If it already exists it will not be added again. 179 func (e *dataUsageEntry) addChild(hash dataUsageHash) { 180 if _, ok := e.Children[hash.Key()]; ok { 181 return 182 } 183 if e.Children == nil { 184 e.Children = make(dataUsageHashMap, 1) 185 } 186 e.Children[hash.Key()] = struct{}{} 187 } 188 189 // find a path in the cache. 190 // Returns nil if not found. 191 func (d *dataUsageCache) find(path string) *dataUsageEntry { 192 due, ok := d.Cache[hashPath(path).Key()] 193 if !ok { 194 return nil 195 } 196 return &due 197 } 198 199 // findChildrenCopy returns a copy of the children of the supplied hash. 200 func (d *dataUsageCache) findChildrenCopy(h dataUsageHash) dataUsageHashMap { 201 ch := d.Cache[h.String()].Children 202 res := make(dataUsageHashMap, len(ch)) 203 for k := range ch { 204 res[k] = struct{}{} 205 } 206 return res 207 } 208 209 // Returns nil if not found. 210 func (d *dataUsageCache) subCache(path string) dataUsageCache { 211 dst := dataUsageCache{Info: dataUsageCacheInfo{ 212 Name: path, 213 LastUpdate: d.Info.LastUpdate, 214 BloomFilter: d.Info.BloomFilter, 215 }} 216 dst.copyWithChildren(d, dataUsageHash(hashPath(path).Key()), nil) 217 return dst 218 } 219 220 func (d *dataUsageCache) deleteRecursive(h dataUsageHash) { 221 if existing, ok := d.Cache[h.String()]; ok { 222 // Delete first if there should be a loop. 223 delete(d.Cache, h.Key()) 224 for child := range existing.Children { 225 d.deleteRecursive(dataUsageHash(child)) 226 } 227 } 228 } 229 230 // replaceRootChild will replace the child of root in d with the root of 'other'. 231 func (d *dataUsageCache) replaceRootChild(other dataUsageCache) { 232 otherRoot := other.root() 233 if otherRoot == nil { 234 logger.LogIf(GlobalContext, errors.New("replaceRootChild: Source has no root")) 235 return 236 } 237 thisRoot := d.root() 238 if thisRoot == nil { 239 logger.LogIf(GlobalContext, errors.New("replaceRootChild: Root of current not found")) 240 return 241 } 242 thisRootHash := d.rootHash() 243 otherRootHash := other.rootHash() 244 if thisRootHash == otherRootHash { 245 logger.LogIf(GlobalContext, errors.New("replaceRootChild: Root of child matches root of destination")) 246 return 247 } 248 d.deleteRecursive(other.rootHash()) 249 d.copyWithChildren(&other, other.rootHash(), &thisRootHash) 250 } 251 252 // keepBuckets will keep only the buckets specified specified by delete all others. 253 func (d *dataUsageCache) keepBuckets(b []BucketInfo) { 254 lu := make(map[dataUsageHash]struct{}) 255 for _, v := range b { 256 lu[hashPath(v.Name)] = struct{}{} 257 } 258 d.keepRootChildren(lu) 259 } 260 261 // keepRootChildren will keep the root children specified by delete all others. 262 func (d *dataUsageCache) keepRootChildren(list map[dataUsageHash]struct{}) { 263 if d.root() == nil { 264 return 265 } 266 rh := d.rootHash() 267 for k := range d.Cache { 268 h := dataUsageHash(k) 269 if h == rh { 270 continue 271 } 272 if _, ok := list[h]; !ok { 273 delete(d.Cache, k) 274 d.deleteRecursive(h) 275 } 276 } 277 } 278 279 // dui converts the flattened version of the path to madmin.DataUsageInfo. 280 // As a side effect d will be flattened, use a clone if this is not ok. 281 func (d *dataUsageCache) dui(path string, buckets []BucketInfo) madmin.DataUsageInfo { 282 e := d.find(path) 283 if e == nil { 284 // No entry found, return empty. 285 return madmin.DataUsageInfo{} 286 } 287 flat := d.flatten(*e) 288 return madmin.DataUsageInfo{ 289 LastUpdate: d.Info.LastUpdate, 290 ObjectsTotalCount: flat.Objects, 291 ObjectsTotalSize: uint64(flat.Size), 292 ReplicatedSize: flat.ReplicationStats.ReplicatedSize, 293 ReplicationFailedSize: flat.ReplicationStats.FailedSize, 294 ReplicationPendingSize: flat.ReplicationStats.PendingSize, 295 ReplicaSize: flat.ReplicationStats.ReplicaSize, 296 ReplicationPendingCount: flat.ReplicationStats.PendingCount, 297 ReplicationFailedCount: flat.ReplicationStats.FailedCount, 298 BucketsCount: uint64(len(e.Children)), 299 BucketsUsage: d.bucketsUsageInfo(buckets), 300 } 301 } 302 303 // replace will add or replace an entry in the cache. 304 // If a parent is specified it will be added to that if not already there. 305 // If the parent does not exist, it will be added. 306 func (d *dataUsageCache) replace(path, parent string, e dataUsageEntry) { 307 hash := hashPath(path) 308 if d.Cache == nil { 309 d.Cache = make(map[string]dataUsageEntry, 100) 310 } 311 d.Cache[hash.Key()] = e 312 if parent != "" { 313 phash := hashPath(parent) 314 p := d.Cache[phash.Key()] 315 p.addChild(hash) 316 d.Cache[phash.Key()] = p 317 } 318 } 319 320 // replaceHashed add or replaces an entry to the cache based on its hash. 321 // If a parent is specified it will be added to that if not already there. 322 // If the parent does not exist, it will be added. 323 func (d *dataUsageCache) replaceHashed(hash dataUsageHash, parent *dataUsageHash, e dataUsageEntry) { 324 if d.Cache == nil { 325 d.Cache = make(map[string]dataUsageEntry, 100) 326 } 327 d.Cache[hash.Key()] = e 328 if parent != nil { 329 p := d.Cache[parent.Key()] 330 p.addChild(hash) 331 d.Cache[parent.Key()] = p 332 } 333 } 334 335 // copyWithChildren will copy entry with hash from src if it exists along with any children. 336 // If a parent is specified it will be added to that if not already there. 337 // If the parent does not exist, it will be added. 338 func (d *dataUsageCache) copyWithChildren(src *dataUsageCache, hash dataUsageHash, parent *dataUsageHash) { 339 if d.Cache == nil { 340 d.Cache = make(map[string]dataUsageEntry, 100) 341 } 342 e, ok := src.Cache[hash.String()] 343 if !ok { 344 return 345 } 346 d.Cache[hash.Key()] = e 347 for ch := range e.Children { 348 if ch == hash.Key() { 349 logger.LogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference")) 350 return 351 } 352 d.copyWithChildren(src, dataUsageHash(ch), &hash) 353 } 354 if parent != nil { 355 p := d.Cache[parent.Key()] 356 p.addChild(hash) 357 d.Cache[parent.Key()] = p 358 } 359 } 360 361 // StringAll returns a detailed string representation of all entries in the cache. 362 func (d *dataUsageCache) StringAll() string { 363 s := fmt.Sprintf("info:%+v\n", d.Info) 364 for k, v := range d.Cache { 365 s += fmt.Sprintf("\t%v: %+v\n", k, v) 366 } 367 return strings.TrimSpace(s) 368 } 369 370 // String returns a human readable representation of the string. 371 func (h dataUsageHash) String() string { 372 return string(h) 373 } 374 375 // String returns a human readable representation of the string. 376 func (h dataUsageHash) Key() string { 377 return string(h) 378 } 379 380 // flatten all children of the root into the root element and return it. 381 func (d *dataUsageCache) flatten(root dataUsageEntry) dataUsageEntry { 382 for id := range root.Children { 383 e := d.Cache[id] 384 if len(e.Children) > 0 { 385 e = d.flatten(e) 386 } 387 root.merge(e) 388 } 389 root.Children = nil 390 return root 391 } 392 393 // add a size to the histogram. 394 func (h *sizeHistogram) add(size int64) { 395 // Fetch the histogram interval corresponding 396 // to the passed object size. 397 for i, interval := range ObjectsHistogramIntervals { 398 if size >= interval.start && size <= interval.end { 399 h[i]++ 400 break 401 } 402 } 403 } 404 405 // toMap returns the map to a map[string]uint64. 406 func (h *sizeHistogram) toMap() map[string]uint64 { 407 res := make(map[string]uint64, dataUsageBucketLen) 408 for i, count := range h { 409 res[ObjectsHistogramIntervals[i].name] = count 410 } 411 return res 412 } 413 414 // bucketsUsageInfo returns the buckets usage info as a map, with 415 // key as bucket name 416 func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]madmin.BucketUsageInfo { 417 var dst = make(map[string]madmin.BucketUsageInfo, len(buckets)) 418 for _, bucket := range buckets { 419 e := d.find(bucket.Name) 420 if e == nil { 421 continue 422 } 423 flat := d.flatten(*e) 424 dst[bucket.Name] = madmin.BucketUsageInfo{ 425 Size: uint64(flat.Size), 426 ObjectsCount: flat.Objects, 427 ReplicationPendingSize: flat.ReplicationStats.PendingSize, 428 ReplicatedSize: flat.ReplicationStats.ReplicatedSize, 429 ReplicationFailedSize: flat.ReplicationStats.FailedSize, 430 ReplicationPendingCount: flat.ReplicationStats.PendingCount, 431 ReplicationFailedCount: flat.ReplicationStats.FailedCount, 432 ReplicaSize: flat.ReplicationStats.ReplicaSize, 433 ObjectSizesHistogram: flat.ObjSizes.toMap(), 434 } 435 } 436 return dst 437 } 438 439 // bucketUsageInfo returns the buckets usage info. 440 // If not found all values returned are zero values. 441 func (d *dataUsageCache) bucketUsageInfo(bucket string) madmin.BucketUsageInfo { 442 e := d.find(bucket) 443 if e == nil { 444 return madmin.BucketUsageInfo{} 445 } 446 flat := d.flatten(*e) 447 return madmin.BucketUsageInfo{ 448 Size: uint64(flat.Size), 449 ObjectsCount: flat.Objects, 450 ReplicationPendingSize: flat.ReplicationStats.PendingSize, 451 ReplicationPendingCount: flat.ReplicationStats.PendingCount, 452 ReplicatedSize: flat.ReplicationStats.ReplicatedSize, 453 ReplicationFailedSize: flat.ReplicationStats.FailedSize, 454 ReplicationFailedCount: flat.ReplicationStats.FailedCount, 455 ReplicaSize: flat.ReplicationStats.ReplicaSize, 456 ObjectSizesHistogram: flat.ObjSizes.toMap(), 457 } 458 } 459 460 // sizeRecursive returns the path as a flattened entry. 461 func (d *dataUsageCache) sizeRecursive(path string) *dataUsageEntry { 462 root := d.find(path) 463 if root == nil || len(root.Children) == 0 { 464 return root 465 } 466 flat := d.flatten(*root) 467 return &flat 468 } 469 470 // root returns the root of the cache. 471 func (d *dataUsageCache) root() *dataUsageEntry { 472 return d.find(d.Info.Name) 473 } 474 475 // rootHash returns the root of the cache. 476 func (d *dataUsageCache) rootHash() dataUsageHash { 477 return hashPath(d.Info.Name) 478 } 479 480 // clone returns a copy of the cache with no references to the existing. 481 func (d *dataUsageCache) clone() dataUsageCache { 482 clone := dataUsageCache{ 483 Info: d.Info, 484 Cache: make(map[string]dataUsageEntry, len(d.Cache)), 485 } 486 for k, v := range d.Cache { 487 clone.Cache[k] = v 488 } 489 return clone 490 } 491 492 // merge root of other into d. 493 // children of root will be flattened before being merged. 494 // Last update time will be set to the last updated. 495 func (d *dataUsageCache) merge(other dataUsageCache) { 496 existingRoot := d.root() 497 otherRoot := other.root() 498 if existingRoot == nil && otherRoot == nil { 499 return 500 } 501 if otherRoot == nil { 502 return 503 } 504 if existingRoot == nil { 505 *d = other.clone() 506 return 507 } 508 if other.Info.LastUpdate.After(d.Info.LastUpdate) { 509 d.Info.LastUpdate = other.Info.LastUpdate 510 } 511 existingRoot.merge(*otherRoot) 512 eHash := d.rootHash() 513 for key := range otherRoot.Children { 514 entry := other.Cache[key] 515 flat := other.flatten(entry) 516 existing := d.Cache[key] 517 // If not found, merging simply adds. 518 existing.merge(flat) 519 d.replaceHashed(dataUsageHash(key), &eHash, existing) 520 } 521 } 522 523 type objectIO interface { 524 GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (reader *GetObjectReader, err error) 525 PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) 526 } 527 528 // load the cache content with name from minioMetaBackgroundOpsBucket. 529 // Only backend errors are returned as errors. 530 // If the object is not found or unable to deserialize d is cleared and nil error is returned. 531 func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error { 532 r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, readLock, ObjectOptions{}) 533 if err != nil { 534 switch err.(type) { 535 case ObjectNotFound: 536 case BucketNotFound: 537 case InsufficientReadQuorum: 538 default: 539 return toObjectErr(err, dataUsageBucket, name) 540 } 541 *d = dataUsageCache{} 542 return nil 543 } 544 defer r.Close() 545 if err := d.deserialize(r); err != nil { 546 *d = dataUsageCache{} 547 logger.LogOnceIf(ctx, err, err.Error()) 548 } 549 return nil 550 } 551 552 // save the content of the cache to minioMetaBackgroundOpsBucket with the provided name. 553 func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) error { 554 pr, pw := io.Pipe() 555 go func() { 556 pw.CloseWithError(d.serializeTo(pw)) 557 }() 558 defer pr.Close() 559 560 r, err := hash.NewReader(pr, -1, "", "", -1) 561 if err != nil { 562 return err 563 } 564 565 _, err = store.PutObject(ctx, 566 dataUsageBucket, 567 name, 568 NewPutObjReader(r), 569 ObjectOptions{}) 570 if isErrBucketNotFound(err) { 571 return nil 572 } 573 return err 574 } 575 576 // dataUsageCacheVer indicates the cache version. 577 // Bumping the cache version will drop data from previous versions 578 // and write new data with the new version. 579 const ( 580 dataUsageCacheVerV4 = 4 581 dataUsageCacheVerV3 = 3 582 dataUsageCacheVerV2 = 2 583 dataUsageCacheVerV1 = 1 584 ) 585 586 // serialize the contents of the cache. 587 func (d *dataUsageCache) serializeTo(dst io.Writer) error { 588 // Add version and compress. 589 _, err := dst.Write([]byte{dataUsageCacheVerV4}) 590 if err != nil { 591 return err 592 } 593 enc, err := zstd.NewWriter(dst, 594 zstd.WithEncoderLevel(zstd.SpeedFastest), 595 zstd.WithWindowSize(1<<20), 596 zstd.WithEncoderConcurrency(2)) 597 if err != nil { 598 return err 599 } 600 mEnc := msgp.NewWriter(enc) 601 err = d.EncodeMsg(mEnc) 602 if err != nil { 603 return err 604 } 605 err = mEnc.Flush() 606 if err != nil { 607 return err 608 } 609 err = enc.Close() 610 if err != nil { 611 return err 612 } 613 return nil 614 } 615 616 // deserialize the supplied byte slice into the cache. 617 func (d *dataUsageCache) deserialize(r io.Reader) error { 618 var b [1]byte 619 n, _ := r.Read(b[:]) 620 if n != 1 { 621 return io.ErrUnexpectedEOF 622 } 623 switch b[0] { 624 case dataUsageCacheVerV1: 625 return errors.New("cache version deprecated (will autoupdate)") 626 case dataUsageCacheVerV2: 627 // Zstd compressed. 628 dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) 629 if err != nil { 630 return err 631 } 632 defer dec.Close() 633 634 dold := &dataUsageCacheV2{} 635 if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { 636 return err 637 } 638 d.Info = dold.Info 639 d.Disks = dold.Disks 640 d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) 641 for k, v := range dold.Cache { 642 d.Cache[k] = dataUsageEntry{ 643 Size: v.Size, 644 Objects: v.Objects, 645 ObjSizes: v.ObjSizes, 646 Children: v.Children, 647 } 648 } 649 return nil 650 case dataUsageCacheVerV3: 651 // Zstd compressed. 652 dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) 653 if err != nil { 654 return err 655 } 656 defer dec.Close() 657 dold := &dataUsageCacheV3{} 658 if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { 659 return err 660 } 661 d.Info = dold.Info 662 d.Disks = dold.Disks 663 d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) 664 for k, v := range dold.Cache { 665 d.Cache[k] = dataUsageEntry{ 666 Size: v.Size, 667 Objects: v.Objects, 668 ObjSizes: v.ObjSizes, 669 Children: v.Children, 670 ReplicationStats: replicationStats{ 671 ReplicatedSize: v.ReplicatedSize, 672 ReplicaSize: v.ReplicaSize, 673 FailedSize: v.ReplicationFailedSize, 674 PendingSize: v.ReplicationPendingSize, 675 }, 676 } 677 } 678 return nil 679 case dataUsageCacheVerV4: 680 // Zstd compressed. 681 dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) 682 if err != nil { 683 return err 684 } 685 defer dec.Close() 686 687 return d.DecodeMsg(msgp.NewReader(dec)) 688 } 689 return fmt.Errorf("dataUsageCache: unknown version: %d", int(b[0])) 690 } 691 692 // Trim this from start+end of hashes. 693 var hashPathCutSet = dataUsageRoot 694 695 func init() { 696 if dataUsageRoot != string(filepath.Separator) { 697 hashPathCutSet = dataUsageRoot + string(filepath.Separator) 698 } 699 } 700 701 // hashPath calculates a hash of the provided string. 702 func hashPath(data string) dataUsageHash { 703 if data != dataUsageRoot { 704 data = strings.Trim(data, hashPathCutSet) 705 } 706 return dataUsageHash(path.Clean(data)) 707 } 708 709 //msgp:ignore dataUsageHashMap 710 type dataUsageHashMap map[string]struct{} 711 712 // DecodeMsg implements msgp.Decodable 713 func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) { 714 var zb0002 uint32 715 zb0002, err = dc.ReadArrayHeader() 716 if err != nil { 717 err = msgp.WrapError(err) 718 return 719 } 720 *z = make(dataUsageHashMap, zb0002) 721 for i := uint32(0); i < zb0002; i++ { 722 { 723 var zb0003 string 724 zb0003, err = dc.ReadString() 725 if err != nil { 726 err = msgp.WrapError(err) 727 return 728 } 729 (*z)[zb0003] = struct{}{} 730 } 731 } 732 return 733 } 734 735 // EncodeMsg implements msgp.Encodable 736 func (z dataUsageHashMap) EncodeMsg(en *msgp.Writer) (err error) { 737 err = en.WriteArrayHeader(uint32(len(z))) 738 if err != nil { 739 err = msgp.WrapError(err) 740 return 741 } 742 for zb0004 := range z { 743 err = en.WriteString(zb0004) 744 if err != nil { 745 err = msgp.WrapError(err, zb0004) 746 return 747 } 748 } 749 return 750 } 751 752 // MarshalMsg implements msgp.Marshaler 753 func (z dataUsageHashMap) MarshalMsg(b []byte) (o []byte, err error) { 754 o = msgp.Require(b, z.Msgsize()) 755 o = msgp.AppendArrayHeader(o, uint32(len(z))) 756 for zb0004 := range z { 757 o = msgp.AppendString(o, zb0004) 758 } 759 return 760 } 761 762 // UnmarshalMsg implements msgp.Unmarshaler 763 func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) { 764 var zb0002 uint32 765 zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) 766 if err != nil { 767 err = msgp.WrapError(err) 768 return 769 } 770 *z = make(dataUsageHashMap, zb0002) 771 for i := uint32(0); i < zb0002; i++ { 772 { 773 var zb0003 string 774 zb0003, bts, err = msgp.ReadStringBytes(bts) 775 if err != nil { 776 err = msgp.WrapError(err) 777 return 778 } 779 (*z)[zb0003] = struct{}{} 780 } 781 } 782 o = bts 783 return 784 } 785 786 // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message 787 func (z dataUsageHashMap) Msgsize() (s int) { 788 s = msgp.ArrayHeaderSize 789 for zb0004 := range z { 790 s += msgp.StringPrefixSize + len(zb0004) 791 } 792 return 793 }