github.com/minio/minio@v0.0.0-20240328213742-3f72439b8a27/docs/debugging/xl-meta/main.go (about) 1 // Copyright (c) 2015-2021 MinIO, Inc. 2 // 3 // This file is part of MinIO Object Storage stack 4 // 5 // This program is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Affero General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // This program is distributed in the hope that it will be useful 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Affero General Public License for more details. 14 // 15 // You should have received a copy of the GNU Affero General Public License 16 // along with this program. If not, see <http://www.gnu.org/licenses/>. 17 18 package main 19 20 import ( 21 "bytes" 22 "encoding/binary" 23 "encoding/hex" 24 "encoding/json" 25 "errors" 26 "fmt" 27 "io" 28 "log" 29 "os" 30 "path/filepath" 31 "sort" 32 "strings" 33 "time" 34 35 "github.com/google/uuid" 36 "github.com/klauspost/compress/zip" 37 "github.com/klauspost/filepathx" 38 "github.com/klauspost/reedsolomon" 39 "github.com/minio/cli" 40 "github.com/minio/highwayhash" 41 "github.com/tinylib/msgp/msgp" 42 ) 43 44 func main() { 45 app := cli.NewApp() 46 app.Copyright = "MinIO, Inc." 47 app.Usage = "xl.meta to JSON" 48 app.HideVersion = true 49 app.CustomAppHelpTemplate = `NAME: 50 {{.Name}} - {{.Usage}} 51 52 USAGE: 53 {{.Name}} {{if .VisibleFlags}}[FLAGS]{{end}} METAFILES... 54 55 Multiple files can be added. Files ending in '.zip' will be searched 56 for 'xl.meta' files. Wildcards are accepted: 'testdir/*.txt' will compress 57 all files in testdir ending with '.txt', directories can be wildcards 58 as well. 'testdir/*/*.txt' will match 'testdir/subdir/b.txt', double stars 59 means full recursive. 'testdir/**/xl.meta' will search for all xl.meta 60 recursively. 61 62 FLAGS: 63 {{range .VisibleFlags}}{{.}} 64 {{end}} 65 ` 66 67 app.HideHelpCommand = true 68 69 app.Flags = []cli.Flag{ 70 cli.BoolFlag{ 71 Usage: "print each file as a separate line without formatting", 72 Name: "ndjson", 73 Hidden: true, 74 }, 75 cli.BoolFlag{ 76 Usage: "display inline data keys and sizes", 77 Name: "data", 78 }, 79 cli.BoolFlag{ 80 Usage: "export inline data", 81 Name: "export", 82 }, 83 cli.BoolFlag{ 84 Usage: "combine inline data", 85 Name: "combine", 86 }, 87 } 88 89 app.Action = func(c *cli.Context) error { 90 ndjson := c.Bool("ndjson") 91 if c.Bool("data") && c.Bool("combine") { 92 return errors.New("cannot combine --data and --combine") 93 } 94 // file / version / file 95 filemap := make(map[string]map[string]string) 96 // versionID -> 97 combineFiles := make(map[string][]string) 98 decode := func(r io.Reader, file string) ([]byte, error) { 99 b, err := io.ReadAll(r) 100 if err != nil { 101 return nil, err 102 } 103 b, _, minor, err := checkXL2V1(b) 104 if err != nil { 105 return nil, err 106 } 107 filemap[file] = make(map[string]string) 108 buf := bytes.NewBuffer(nil) 109 var data xlMetaInlineData 110 switch minor { 111 case 0: 112 _, err = msgp.CopyToJSON(buf, bytes.NewReader(b)) 113 if err != nil { 114 return nil, err 115 } 116 case 1, 2: 117 v, b, err := msgp.ReadBytesZC(b) 118 if err != nil { 119 return nil, err 120 } 121 if _, nbuf, err := msgp.ReadUint32Bytes(b); err == nil { 122 // Read metadata CRC (added in v2, ignore if not found) 123 b = nbuf 124 } 125 126 _, err = msgp.CopyToJSON(buf, bytes.NewReader(v)) 127 if err != nil { 128 return nil, err 129 } 130 data = b 131 case 3: 132 v, b, err := msgp.ReadBytesZC(b) 133 if err != nil { 134 return nil, err 135 } 136 if _, nbuf, err := msgp.ReadUint32Bytes(b); err == nil { 137 // Read metadata CRC (added in v2, ignore if not found) 138 b = nbuf 139 } 140 141 nVers, v, err := decodeXLHeaders(v) 142 if err != nil { 143 return nil, err 144 } 145 type version struct { 146 Idx int 147 Header json.RawMessage 148 Metadata json.RawMessage 149 } 150 versions := make([]version, nVers) 151 err = decodeVersions(v, nVers, func(idx int, hdr, meta []byte) error { 152 var header xlMetaV2VersionHeaderV2 153 if _, err := header.UnmarshalMsg(hdr); err != nil { 154 return err 155 } 156 b, err := header.MarshalJSON() 157 if err != nil { 158 return err 159 } 160 var buf bytes.Buffer 161 if _, err := msgp.UnmarshalAsJSON(&buf, meta); err != nil { 162 return err 163 } 164 versions[idx] = version{ 165 Idx: idx, 166 Header: b, 167 Metadata: buf.Bytes(), 168 } 169 type erasureInfo struct { 170 V2Obj *struct { 171 EcDist []int 172 EcIndex int 173 EcM int 174 EcN int 175 } 176 } 177 var ei erasureInfo 178 if err := json.Unmarshal(buf.Bytes(), &ei); err == nil && ei.V2Obj != nil { 179 verID := uuid.UUID(header.VersionID).String() 180 idx := ei.V2Obj.EcIndex 181 filemap[file][verID] = fmt.Sprintf("%s/shard-%02d-of-%02d", verID, idx, ei.V2Obj.EcN+ei.V2Obj.EcM) 182 filemap[file][verID+".json"] = buf.String() 183 } 184 return nil 185 }) 186 if err != nil { 187 return nil, err 188 } 189 enc := json.NewEncoder(buf) 190 if err := enc.Encode(struct { 191 Versions []version 192 }{Versions: versions}); err != nil { 193 return nil, err 194 } 195 data = b 196 default: 197 return nil, fmt.Errorf("unknown metadata version %d", minor) 198 } 199 200 if c.Bool("data") { 201 b, err := data.json() 202 if err != nil { 203 return nil, err 204 } 205 buf = bytes.NewBuffer(b) 206 } 207 if c.Bool("export") { 208 file := file 209 if !c.Bool("combine") { 210 file = strings.Map(func(r rune) rune { 211 switch { 212 case r >= 'a' && r <= 'z': 213 return r 214 case r >= 'A' && r <= 'Z': 215 return r 216 case r >= '0' && r <= '9': 217 return r 218 case strings.ContainsAny(string(r), "+=-_()!@."): 219 return r 220 default: 221 return '_' 222 } 223 }, file) 224 } 225 err := data.files(func(name string, data []byte) { 226 fn := fmt.Sprintf("%s-%s.data", file, name) 227 if c.Bool("combine") { 228 f := filemap[file][name] 229 if f != "" { 230 fn = f + ".data" 231 os.MkdirAll(filepath.Dir(fn), os.ModePerm) 232 err = os.WriteFile(fn+".json", []byte(filemap[file][name+".json"]), os.ModePerm) 233 combineFiles[name] = append(combineFiles[name], fn) 234 if err != nil { 235 fmt.Println("ERR:", err) 236 } 237 _ = os.WriteFile(filepath.Dir(fn)+"/filename.txt", []byte(file), os.ModePerm) 238 } 239 } 240 err = os.WriteFile(fn, data, os.ModePerm) 241 if err != nil { 242 fmt.Println(err) 243 } 244 }) 245 if err != nil { 246 return nil, err 247 } 248 } 249 if ndjson { 250 return buf.Bytes(), nil 251 } 252 var msi map[string]interface{} 253 dec := json.NewDecoder(buf) 254 // Use number to preserve integers. 255 dec.UseNumber() 256 err = dec.Decode(&msi) 257 if err != nil { 258 return nil, err 259 } 260 b, err = json.MarshalIndent(msi, "", " ") 261 if err != nil { 262 return nil, err 263 } 264 return b, nil 265 } 266 267 args := c.Args() 268 if len(args) == 0 { 269 // If no args, assume xl.meta 270 args = []string{"xl.meta"} 271 } 272 var files []string 273 274 for _, pattern := range args { 275 if pattern == "-" { 276 files = append(files, pattern) 277 continue 278 } 279 found, err := filepathx.Glob(pattern) 280 if err != nil { 281 return err 282 } 283 if len(found) == 0 { 284 return fmt.Errorf("unable to find file %v", pattern) 285 } 286 files = append(files, found...) 287 } 288 if len(files) == 0 { 289 return fmt.Errorf("no files found") 290 } 291 if len(files) > 1 || strings.HasSuffix(files[0], ".zip") { 292 ndjson = true 293 } 294 295 toPrint := make([]string, 0, 16) 296 for _, file := range files { 297 var r io.Reader 298 var sz int64 299 switch file { 300 case "-": 301 r = os.Stdin 302 default: 303 f, err := os.Open(file) 304 if err != nil { 305 return err 306 } 307 if st, err := f.Stat(); err == nil { 308 sz = st.Size() 309 } 310 defer f.Close() 311 r = f 312 } 313 if strings.HasSuffix(file, ".zip") { 314 zr, err := zip.NewReader(r.(io.ReaderAt), sz) 315 if err != nil { 316 return err 317 } 318 for _, file := range zr.File { 319 if !file.FileInfo().IsDir() && strings.HasSuffix(file.Name, "xl.meta") { 320 r, err := file.Open() 321 if err != nil { 322 return err 323 } 324 // Quote string... 325 b, _ := json.Marshal(file.Name) 326 b2, err := decode(r, file.Name) 327 if err != nil { 328 return err 329 } 330 var tmp map[string]interface{} 331 if err := json.Unmarshal(b2, &tmp); err == nil { 332 if b3, err := json.Marshal(tmp); err == nil { 333 b2 = b3 334 } 335 } 336 toPrint = append(toPrint, fmt.Sprintf("\t%s: %s", string(b), string(b2))) 337 } 338 } 339 } else { 340 b0 := "" 341 if ndjson { 342 b, _ := json.Marshal(file) 343 b0 = fmt.Sprintf("%s: ", string(b)) 344 } 345 b, err := decode(r, file) 346 if err != nil { 347 return err 348 } 349 b = bytes.TrimSpace(b) 350 if !ndjson { 351 b = bytes.TrimFunc(b, func(r rune) bool { 352 return r == '{' || r == '}' || r == '\n' || r == '\r' 353 }) 354 } 355 356 toPrint = append(toPrint, fmt.Sprintf("%s%s", b0, string(b))) 357 } 358 } 359 sort.Strings(toPrint) 360 fmt.Printf("{\n%s\n}\n", strings.Join(toPrint, ",\n")) 361 362 if len(combineFiles) > 0 { 363 for k, v := range combineFiles { 364 if err := combine(v, k); err != nil { 365 fmt.Println("ERROR:", err) 366 } 367 } 368 } 369 370 return nil 371 } 372 err := app.Run(os.Args) 373 if err != nil { 374 log.Fatal(err) 375 } 376 } 377 378 var ( 379 // XL header specifies the format 380 xlHeader = [4]byte{'X', 'L', '2', ' '} 381 382 // Current version being written. 383 xlVersionCurrent [4]byte 384 ) 385 386 const ( 387 // Breaking changes. 388 // Newer versions cannot be read by older software. 389 // This will prevent downgrades to incompatible versions. 390 xlVersionMajor = 1 391 392 // Non breaking changes. 393 // Bumping this is informational, but should be done 394 // if any change is made to the data stored, bumping this 395 // will allow to detect the exact version later. 396 xlVersionMinor = 1 397 ) 398 399 func init() { 400 binary.LittleEndian.PutUint16(xlVersionCurrent[0:2], xlVersionMajor) 401 binary.LittleEndian.PutUint16(xlVersionCurrent[2:4], xlVersionMinor) 402 } 403 404 // checkXL2V1 will check if the metadata has correct header and is a known major version. 405 // The remaining payload and versions are returned. 406 func checkXL2V1(buf []byte) (payload []byte, major, minor uint16, err error) { 407 if len(buf) <= 8 { 408 return payload, 0, 0, fmt.Errorf("xlMeta: no data") 409 } 410 411 if !bytes.Equal(buf[:4], xlHeader[:]) { 412 return payload, 0, 0, fmt.Errorf("xlMeta: unknown XLv2 header, expected %v, got %v", xlHeader[:4], buf[:4]) 413 } 414 415 if bytes.Equal(buf[4:8], []byte("1 ")) { 416 // Set as 1,0. 417 major, minor = 1, 0 418 } else { 419 major, minor = binary.LittleEndian.Uint16(buf[4:6]), binary.LittleEndian.Uint16(buf[6:8]) 420 } 421 if major > xlVersionMajor { 422 return buf[8:], major, minor, fmt.Errorf("xlMeta: unknown major version %d found", major) 423 } 424 425 return buf[8:], major, minor, nil 426 } 427 428 const xlMetaInlineDataVer = 1 429 430 type xlMetaInlineData []byte 431 432 // afterVersion returns the payload after the version, if any. 433 func (x xlMetaInlineData) afterVersion() []byte { 434 if len(x) == 0 { 435 return x 436 } 437 return x[1:] 438 } 439 440 // versionOK returns whether the version is ok. 441 func (x xlMetaInlineData) versionOK() bool { 442 if len(x) == 0 { 443 return true 444 } 445 return x[0] > 0 && x[0] <= xlMetaInlineDataVer 446 } 447 448 func (x xlMetaInlineData) json() ([]byte, error) { 449 if len(x) == 0 { 450 return []byte("{}"), nil 451 } 452 if !x.versionOK() { 453 return nil, errors.New("xlMetaInlineData: unknown version") 454 } 455 sz, buf, err := msgp.ReadMapHeaderBytes(x.afterVersion()) 456 if err != nil { 457 return nil, err 458 } 459 res := []byte("{") 460 461 for i := uint32(0); i < sz; i++ { 462 var key, val []byte 463 key, buf, err = msgp.ReadMapKeyZC(buf) 464 if err != nil { 465 return nil, err 466 } 467 if len(key) == 0 { 468 return nil, fmt.Errorf("xlMetaInlineData: key %d is length 0", i) 469 } 470 // Skip data... 471 val, buf, err = msgp.ReadBytesZC(buf) 472 if err != nil { 473 return nil, err 474 } 475 if i > 0 { 476 res = append(res, ',') 477 } 478 s := fmt.Sprintf(`"%s": {"bytes": %d`, string(key), len(val)) 479 // Check bitrot... We should only ever have one block... 480 if len(val) >= 32 { 481 want := val[:32] 482 data := val[32:] 483 const magicHighwayHash256Key = "\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0" 484 485 hh, _ := highwayhash.New([]byte(magicHighwayHash256Key)) 486 hh.Write(data) 487 got := hh.Sum(nil) 488 if bytes.Equal(want, got) { 489 s += ", \"bitrot_valid\": true" 490 } else { 491 s += ", \"bitrot_valid\": false" 492 } 493 s += "}" 494 } 495 res = append(res, []byte(s)...) 496 } 497 res = append(res, '}') 498 return res, nil 499 } 500 501 // files returns files as callback. 502 func (x xlMetaInlineData) files(fn func(name string, data []byte)) error { 503 if len(x) == 0 { 504 return nil 505 } 506 if !x.versionOK() { 507 return errors.New("xlMetaInlineData: unknown version") 508 } 509 510 sz, buf, err := msgp.ReadMapHeaderBytes(x.afterVersion()) 511 if err != nil { 512 return err 513 } 514 515 for i := uint32(0); i < sz; i++ { 516 var key, val []byte 517 key, buf, err = msgp.ReadMapKeyZC(buf) 518 if err != nil { 519 return err 520 } 521 if len(key) == 0 { 522 return fmt.Errorf("xlMetaInlineData: key %d is length 0", i) 523 } 524 // Read data... 525 val, buf, err = msgp.ReadBytesZC(buf) 526 if err != nil { 527 return err 528 } 529 // Call back. 530 fn(string(key), val) 531 } 532 return nil 533 } 534 535 const ( 536 xlHeaderVersion = 2 537 xlMetaVersion = 2 538 ) 539 540 func decodeXLHeaders(buf []byte) (versions int, b []byte, err error) { 541 hdrVer, buf, err := msgp.ReadUintBytes(buf) 542 if err != nil { 543 return 0, buf, err 544 } 545 metaVer, buf, err := msgp.ReadUintBytes(buf) 546 if err != nil { 547 return 0, buf, err 548 } 549 if hdrVer > xlHeaderVersion { 550 return 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl header version %d", metaVer) 551 } 552 if metaVer > xlMetaVersion { 553 return 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl meta version %d", metaVer) 554 } 555 versions, buf, err = msgp.ReadIntBytes(buf) 556 if err != nil { 557 return 0, buf, err 558 } 559 if versions < 0 { 560 return 0, buf, fmt.Errorf("decodeXLHeaders: Negative version count %d", versions) 561 } 562 return versions, buf, nil 563 } 564 565 // decodeVersions will decode a number of versions from a buffer 566 // and perform a callback for each version in order, newest first. 567 // Any non-nil error is returned. 568 func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) { 569 var tHdr, tMeta []byte // Zero copy bytes 570 for i := 0; i < versions; i++ { 571 tHdr, buf, err = msgp.ReadBytesZC(buf) 572 if err != nil { 573 return err 574 } 575 tMeta, buf, err = msgp.ReadBytesZC(buf) 576 if err != nil { 577 return err 578 } 579 if err = fn(i, tHdr, tMeta); err != nil { 580 return err 581 } 582 } 583 return nil 584 } 585 586 type xlMetaV2VersionHeaderV2 struct { 587 VersionID [16]byte 588 ModTime int64 589 Signature [4]byte 590 Type uint8 591 Flags uint8 592 } 593 594 // UnmarshalMsg implements msgp.Unmarshaler 595 func (z *xlMetaV2VersionHeaderV2) UnmarshalMsg(bts []byte) (o []byte, err error) { 596 var zb0001 uint32 597 zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) 598 if err != nil { 599 err = msgp.WrapError(err) 600 return 601 } 602 if zb0001 != 5 { 603 err = msgp.ArrayError{Wanted: 5, Got: zb0001} 604 return 605 } 606 bts, err = msgp.ReadExactBytes(bts, (z.VersionID)[:]) 607 if err != nil { 608 err = msgp.WrapError(err, "VersionID") 609 return 610 } 611 z.ModTime, bts, err = msgp.ReadInt64Bytes(bts) 612 if err != nil { 613 err = msgp.WrapError(err, "ModTime") 614 return 615 } 616 bts, err = msgp.ReadExactBytes(bts, (z.Signature)[:]) 617 if err != nil { 618 err = msgp.WrapError(err, "Signature") 619 return 620 } 621 { 622 var zb0002 uint8 623 zb0002, bts, err = msgp.ReadUint8Bytes(bts) 624 if err != nil { 625 err = msgp.WrapError(err, "Type") 626 return 627 } 628 z.Type = zb0002 629 } 630 { 631 var zb0003 uint8 632 zb0003, bts, err = msgp.ReadUint8Bytes(bts) 633 if err != nil { 634 err = msgp.WrapError(err, "Flags") 635 return 636 } 637 z.Flags = zb0003 638 } 639 o = bts 640 return 641 } 642 643 func (z xlMetaV2VersionHeaderV2) MarshalJSON() (o []byte, err error) { 644 tmp := struct { 645 VersionID string 646 ModTime time.Time 647 Signature string 648 Type uint8 649 Flags uint8 650 }{ 651 VersionID: hex.EncodeToString(z.VersionID[:]), 652 ModTime: time.Unix(0, z.ModTime), 653 Signature: hex.EncodeToString(z.Signature[:]), 654 Type: z.Type, 655 Flags: z.Flags, 656 } 657 return json.Marshal(tmp) 658 } 659 660 func combine(files []string, out string) error { 661 sort.Strings(files) 662 var size, shards, data, parity int 663 mapped := make([]byte, size) 664 filled := make([]byte, size) 665 parityData := make(map[int]map[int][]byte) 666 fmt.Printf("Attempting to combine version %q.\n", out) 667 for _, file := range files { 668 b, err := os.ReadFile(file) 669 if err != nil { 670 return err 671 } 672 meta, err := os.ReadFile(file + ".json") 673 if err != nil { 674 return err 675 } 676 type erasureInfo struct { 677 V2Obj *struct { 678 EcDist []int 679 EcIndex int 680 EcM int 681 EcN int 682 Size int 683 } 684 } 685 var ei erasureInfo 686 var idx int 687 if err := json.Unmarshal(meta, &ei); err == nil && ei.V2Obj != nil { 688 if size == 0 { 689 size = ei.V2Obj.Size 690 mapped = make([]byte, size) 691 filled = make([]byte, size) 692 } 693 data = ei.V2Obj.EcM 694 parity = ei.V2Obj.EcN 695 if shards == 0 { 696 shards = data + parity 697 } 698 idx = ei.V2Obj.EcIndex - 1 699 fmt.Println("Read shard", ei.V2Obj.EcIndex, "Data shards", data, "Parity", parity, fmt.Sprintf("(%s)", file)) 700 if ei.V2Obj.Size != size { 701 return fmt.Errorf("size mismatch. Meta size: %d", ei.V2Obj.Size) 702 } 703 } else { 704 return err 705 } 706 if len(b) < 32 { 707 return fmt.Errorf("file %s too short", file) 708 } 709 // Trim hash. Fine for inline data, since only one block. 710 b = b[32:] 711 712 set := parityData[data] 713 if set == nil { 714 set = make(map[int][]byte) 715 } 716 set[idx] = b 717 parityData[data] = set 718 719 // Combine 720 start := len(b) * idx 721 if start >= len(mapped) { 722 continue 723 } 724 copy(mapped[start:], b) 725 for j := range b { 726 if j+start >= len(filled) { 727 break 728 } 729 filled[j+start] = 1 730 } 731 } 732 733 lastValid := 0 734 missing := 0 735 for i := range filled { 736 if filled[i] == 1 { 737 lastValid = i 738 } else { 739 missing++ 740 } 741 } 742 if missing > 0 && len(parityData) > 0 { 743 fmt.Println("Attempting to reconstruct using parity sets:") 744 for k, v := range parityData { 745 if missing == 0 { 746 break 747 } 748 fmt.Println("* Setup: Data shards:", k, "- Parity blocks:", len(v)) 749 rs, err := reedsolomon.New(k, shards-k) 750 if err != nil { 751 return err 752 } 753 split, err := rs.Split(mapped) 754 if err != nil { 755 return err 756 } 757 splitFilled, err := rs.Split(filled) 758 if err != nil { 759 return err 760 } 761 ok := len(splitFilled) 762 for i, sh := range splitFilled { 763 for _, v := range sh { 764 if v == 0 { 765 split[i] = nil 766 ok-- 767 break 768 } 769 } 770 } 771 hasParity := 0 772 for idx, sh := range v { 773 split[idx] = sh 774 if idx >= k && len(v) > 0 { 775 hasParity++ 776 } 777 } 778 fmt.Printf("Have %d complete remapped data shards and %d complete parity shards. ", ok, hasParity) 779 780 if err := rs.ReconstructData(split); err == nil { 781 fmt.Println("Could reconstruct completely") 782 for i, data := range split[:k] { 783 start := i * len(data) 784 copy(mapped[start:], data) 785 } 786 lastValid = size - 1 787 missing = 0 788 } else { 789 fmt.Println("Could NOT reconstruct:", err) 790 } 791 } 792 } 793 if lastValid == 0 { 794 return errors.New("no valid data found") 795 } 796 if missing > 0 { 797 out += ".truncated" 798 } else { 799 out += ".complete" 800 } 801 fmt.Println(missing, "bytes missing. Truncating", len(filled)-lastValid-1, "from end.") 802 mapped = mapped[:lastValid+1] 803 err := os.WriteFile(out, mapped, os.ModePerm) 804 if err != nil { 805 return err 806 } 807 fmt.Println("Wrote output to", out) 808 return nil 809 }