github.com/pingcap/badger@v1.5.1-0.20230103063557-828f39b09b6d/compaction.go (about) 1 /* 2 * Copyright 2017 Dgraph Labs, Inc. and Contributors 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package badger 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "encoding/json" 23 "fmt" 24 "io" 25 "net" 26 "os" 27 "sync" 28 29 "github.com/pingcap/badger/options" 30 "github.com/pingcap/badger/table" 31 "github.com/pingcap/badger/table/sstable" 32 "github.com/pingcap/badger/y" 33 "github.com/pingcap/log" 34 "golang.org/x/time/rate" 35 ) 36 37 type keyRange struct { 38 left y.Key 39 right y.Key 40 inf bool 41 } 42 43 var infRange = keyRange{inf: true} 44 45 func (r keyRange) String() string { 46 return fmt.Sprintf("[left=%x, right=%x, inf=%v]", r.left, r.right, r.inf) 47 } 48 49 func (r keyRange) equals(dst keyRange) bool { 50 return r.left.Equal(dst.left) && 51 r.right.Equal(dst.right) && 52 r.inf == dst.inf 53 } 54 55 func (r keyRange) overlapsWith(dst keyRange) bool { 56 if r.inf || dst.inf { 57 return true 58 } 59 60 // If my left is greater than dst right, we have no overlap. 61 if r.left.Compare(dst.right) > 0 { 62 return false 63 } 64 // If my right is less than dst left, we have no overlap. 65 if r.right.Compare(dst.left) < 0 { 66 return false 67 } 68 // We have overlap. 69 return true 70 } 71 72 func getKeyRange(tables []table.Table) keyRange { 73 y.Assert(len(tables) > 0) 74 smallest := tables[0].Smallest() 75 biggest := tables[0].Biggest() 76 for i := 1; i < len(tables); i++ { 77 if tables[i].Smallest().Compare(smallest) < 0 { 78 smallest = tables[i].Smallest() 79 } 80 if tables[i].Biggest().Compare(biggest) > 0 { 81 biggest = tables[i].Biggest() 82 } 83 } 84 return keyRange{ 85 left: smallest, 86 right: biggest, 87 } 88 } 89 90 type levelCompactStatus struct { 91 ranges []keyRange 92 deltaSize int64 93 } 94 95 func (lcs *levelCompactStatus) debug() string { 96 var b bytes.Buffer 97 for _, r := range lcs.ranges { 98 b.WriteString(r.String()) 99 } 100 return b.String() 101 } 102 103 func (lcs *levelCompactStatus) overlapsWith(dst keyRange) bool { 104 for _, r := range lcs.ranges { 105 if r.overlapsWith(dst) { 106 return true 107 } 108 } 109 return false 110 } 111 112 func (lcs *levelCompactStatus) remove(dst keyRange) bool { 113 final := lcs.ranges[:0] 114 var found bool 115 for _, r := range lcs.ranges { 116 if !r.equals(dst) { 117 final = append(final, r) 118 } else { 119 found = true 120 } 121 } 122 lcs.ranges = final 123 return found 124 } 125 126 type compactStatus struct { 127 sync.RWMutex 128 levels []*levelCompactStatus 129 } 130 131 func (cs *compactStatus) overlapsWith(level int, this keyRange) bool { 132 cs.RLock() 133 defer cs.RUnlock() 134 135 thisLevel := cs.levels[level] 136 return thisLevel.overlapsWith(this) 137 } 138 139 func (cs *compactStatus) deltaSize(l int) int64 { 140 cs.RLock() 141 defer cs.RUnlock() 142 return cs.levels[l].deltaSize 143 } 144 145 type thisAndNextLevelRLocked struct{} 146 147 // compareAndAdd will check whether we can run this CompactDef. That it doesn't overlap with any 148 // other running compaction. If it can be run, it would store this run in the compactStatus state. 149 func (cs *compactStatus) compareAndAdd(_ thisAndNextLevelRLocked, cd *CompactDef, thisHandler *levelHandler) bool { 150 cs.Lock() 151 defer cs.Unlock() 152 153 level := cd.Level 154 155 y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels)) 156 thisLevel := cs.levels[level] 157 nextLevel := cs.levels[level+1] 158 159 if thisLevel.overlapsWith(cd.thisRange) { 160 return false 161 } 162 if nextLevel.overlapsWith(cd.nextRange) { 163 return false 164 } 165 // Check whether this level really needs compaction or not. Otherwise, we'll end up 166 // running parallel compactions for the same level. 167 // NOTE: We can directly call thisLevel.totalSize, because we already have acquire a read lock 168 // over this and the next level. 169 if thisHandler.totalSize-thisLevel.deltaSize < thisHandler.maxTotalSize { 170 return false 171 } 172 173 thisLevel.ranges = append(thisLevel.ranges, cd.thisRange) 174 nextLevel.ranges = append(nextLevel.ranges, cd.nextRange) 175 thisLevel.deltaSize += cd.topSize 176 cd.markTablesCompacting() 177 return true 178 } 179 180 func (cs *compactStatus) delete(cd *CompactDef) { 181 cs.Lock() 182 defer cs.Unlock() 183 184 level := cd.Level 185 y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels)) 186 187 thisLevel := cs.levels[level] 188 nextLevel := cs.levels[level+1] 189 190 thisLevel.deltaSize -= cd.topSize 191 found := thisLevel.remove(cd.thisRange) 192 found = nextLevel.remove(cd.nextRange) && found 193 194 if !found { 195 this := cd.thisRange 196 next := cd.nextRange 197 fmt.Printf("Looking for: [%q, %q, %v] in this level.\n", this.left, this.right, this.inf) 198 fmt.Printf("This Level:\n%s\n", thisLevel.debug()) 199 fmt.Println() 200 fmt.Printf("Looking for: [%q, %q, %v] in next level.\n", next.left, next.right, next.inf) 201 fmt.Printf("Next Level:\n%s\n", nextLevel.debug()) 202 log.Fatal("keyRange not found") 203 } 204 } 205 206 func (cs *compactStatus) isCompacting(level int, tables ...table.Table) bool { 207 if len(tables) == 0 { 208 return false 209 } 210 kr := keyRange{ 211 left: tables[0].Smallest(), 212 right: tables[len(tables)-1].Biggest(), 213 } 214 y.Assert(!kr.left.IsEmpty()) 215 y.Assert(!kr.right.IsEmpty()) 216 return cs.overlapsWith(level, kr) 217 } 218 219 type CompactDef struct { 220 Level int 221 222 Top []table.Table 223 Bot []table.Table 224 225 SkippedTbls []table.Table 226 SafeTS uint64 227 Guards []Guard 228 Filter CompactionFilter 229 HasOverlap bool 230 Opt options.TableBuilderOptions 231 Dir string 232 AllocIDFunc func() uint64 233 Limiter *rate.Limiter 234 InMemory bool 235 236 splitHints []y.Key 237 238 thisRange keyRange 239 nextRange keyRange 240 241 topSize int64 242 topLeftIdx int 243 topRightIdx int 244 botSize int64 245 botLeftIdx int 246 botRightIdx int 247 } 248 249 func (cd *CompactDef) String() string { 250 return fmt.Sprintf("%d top:[%d:%d](%d), bot:[%d:%d](%d), skip:%d, write_amp:%.2f", 251 cd.Level, cd.topLeftIdx, cd.topRightIdx, cd.topSize, 252 cd.botLeftIdx, cd.botRightIdx, cd.botSize, len(cd.SkippedTbls), float64(cd.topSize+cd.botSize)/float64(cd.topSize)) 253 } 254 255 func (cd *CompactDef) smallest() y.Key { 256 if len(cd.Bot) > 0 && cd.nextRange.left.Compare(cd.thisRange.left) < 0 { 257 return cd.nextRange.left 258 } 259 return cd.thisRange.left 260 } 261 262 func (cd *CompactDef) biggest() y.Key { 263 if len(cd.Bot) > 0 && cd.nextRange.right.Compare(cd.thisRange.right) > 0 { 264 return cd.nextRange.right 265 } 266 return cd.thisRange.right 267 } 268 269 func (cd *CompactDef) markTablesCompacting() { 270 for _, tbl := range cd.Top { 271 tbl.MarkCompacting(true) 272 } 273 for _, tbl := range cd.Bot { 274 tbl.MarkCompacting(true) 275 } 276 for _, tbl := range cd.SkippedTbls { 277 tbl.MarkCompacting(true) 278 } 279 } 280 281 const minSkippedTableSize = 1024 * 1024 282 283 func (cd *CompactDef) fillBottomTables(overlappingTables []table.Table) { 284 for _, t := range overlappingTables { 285 // If none of the Top tables contains the range in an overlapping bottom table, 286 // we can skip it during compaction to reduce write amplification. 287 var added bool 288 for _, topTbl := range cd.Top { 289 if topTbl.HasOverlap(t.Smallest(), t.Biggest(), true) { 290 cd.Bot = append(cd.Bot, t) 291 added = true 292 break 293 } 294 } 295 if !added { 296 if t.Size() >= minSkippedTableSize { 297 // We need to limit the minimum size of the table to be skipped, 298 // otherwise the number of tables in a level will keep growing 299 // until we meet too many open files error. 300 cd.SkippedTbls = append(cd.SkippedTbls, t) 301 } else { 302 cd.Bot = append(cd.Bot, t) 303 } 304 } 305 } 306 } 307 308 func (cd *CompactDef) fillTablesL0(cs *compactStatus, thisLevel, nextLevel *levelHandler) bool { 309 cd.lockLevels(thisLevel, nextLevel) 310 defer cd.unlockLevels(thisLevel, nextLevel) 311 312 if len(thisLevel.tables) == 0 { 313 return false 314 } 315 316 cd.Top = make([]table.Table, len(thisLevel.tables)) 317 copy(cd.Top, thisLevel.tables) 318 for _, t := range cd.Top { 319 cd.topSize += t.Size() 320 } 321 cd.topRightIdx = len(cd.Top) 322 323 cd.thisRange = infRange 324 325 kr := getKeyRange(cd.Top) 326 left, right := nextLevel.overlappingTables(levelHandlerRLocked{}, kr) 327 overlappingTables := nextLevel.tables[left:right] 328 cd.botLeftIdx = left 329 cd.botRightIdx = right 330 cd.fillBottomTables(overlappingTables) 331 for _, t := range cd.Bot { 332 cd.botSize += t.Size() 333 } 334 335 if len(overlappingTables) == 0 { // the bottom-most level 336 cd.nextRange = kr 337 } else { 338 cd.nextRange = getKeyRange(overlappingTables) 339 } 340 341 if !cs.compareAndAdd(thisAndNextLevelRLocked{}, cd, thisLevel) { 342 return false 343 } 344 345 return true 346 } 347 348 const maxCompactionExpandSize = 1 << 30 // 1GB 349 350 func (cd *CompactDef) fillTables(cs *compactStatus, thisLevel, nextLevel *levelHandler) bool { 351 cd.lockLevels(thisLevel, nextLevel) 352 defer cd.unlockLevels(thisLevel, nextLevel) 353 354 if len(thisLevel.tables) == 0 { 355 return false 356 } 357 this := make([]table.Table, len(thisLevel.tables)) 358 copy(this, thisLevel.tables) 359 next := make([]table.Table, len(nextLevel.tables)) 360 copy(next, nextLevel.tables) 361 362 // First pick one table has max topSize/bottomSize ratio. 363 var candidateRatio float64 364 for i, t := range this { 365 if cs.isCompacting(thisLevel.level, t) { 366 continue 367 } 368 left, right := getTablesInRange(next, t.Smallest(), t.Biggest()) 369 if cs.isCompacting(nextLevel.level, next[left:right]...) { 370 continue 371 } 372 botSize := sumTableSize(next[left:right]) 373 ratio := calcRatio(t.Size(), botSize) 374 if ratio > candidateRatio { 375 candidateRatio = ratio 376 cd.topLeftIdx = i 377 cd.topRightIdx = i + 1 378 cd.Top = this[cd.topLeftIdx:cd.topRightIdx:cd.topRightIdx] 379 cd.topSize = t.Size() 380 cd.botLeftIdx = left 381 cd.botRightIdx = right 382 cd.botSize = botSize 383 } 384 } 385 if len(cd.Top) == 0 { 386 return false 387 } 388 bots := next[cd.botLeftIdx:cd.botRightIdx:cd.botRightIdx] 389 // Expand to left to include more tops as long as the ratio doesn't decrease and the total size 390 // do not exceeds maxCompactionExpandSize. 391 for i := cd.topLeftIdx - 1; i >= 0; i-- { 392 t := this[i] 393 if cs.isCompacting(thisLevel.level, t) { 394 break 395 } 396 left, right := getTablesInRange(next, t.Smallest(), t.Biggest()) 397 if right < cd.botLeftIdx { 398 // A bottom table is skipped, we can compact in another run. 399 break 400 } 401 if cs.isCompacting(nextLevel.level, next[left:cd.botLeftIdx]...) { 402 break 403 } 404 newTopSize := t.Size() + cd.topSize 405 newBotSize := sumTableSize(next[left:cd.botLeftIdx]) + cd.botSize 406 newRatio := calcRatio(newTopSize, newBotSize) 407 if newRatio > candidateRatio && (newTopSize+newBotSize) < maxCompactionExpandSize { 408 cd.Top = append([]table.Table{t}, cd.Top...) 409 cd.topLeftIdx-- 410 bots = append(next[left:cd.botLeftIdx:cd.botLeftIdx], bots...) 411 cd.botLeftIdx = left 412 cd.topSize = newTopSize 413 cd.botSize = newBotSize 414 } else { 415 break 416 } 417 } 418 // Expand to right to include more tops as long as the ratio doesn't decrease and the total size 419 // do not exceeds maxCompactionExpandSize. 420 for i := cd.topRightIdx; i < len(this); i++ { 421 t := this[i] 422 if cs.isCompacting(thisLevel.level, t) { 423 break 424 } 425 left, right := getTablesInRange(next, t.Smallest(), t.Biggest()) 426 if left > cd.botRightIdx { 427 // A bottom table is skipped, we can compact in another run. 428 break 429 } 430 if cs.isCompacting(nextLevel.level, next[cd.botRightIdx:right]...) { 431 break 432 } 433 newTopSize := t.Size() + cd.topSize 434 newBotSize := sumTableSize(next[cd.botRightIdx:right]) + cd.botSize 435 newRatio := calcRatio(newTopSize, newBotSize) 436 if newRatio > candidateRatio && (newTopSize+newBotSize) < maxCompactionExpandSize { 437 cd.Top = append(cd.Top, t) 438 cd.topRightIdx++ 439 bots = append(bots, next[cd.botRightIdx:right]...) 440 cd.botRightIdx = right 441 cd.topSize = newTopSize 442 cd.botSize = newBotSize 443 } else { 444 break 445 } 446 } 447 cd.thisRange = keyRange{left: cd.Top[0].Smallest(), right: cd.Top[len(cd.Top)-1].Biggest()} 448 if len(bots) > 0 { 449 cd.nextRange = keyRange{left: bots[0].Smallest(), right: bots[len(bots)-1].Biggest()} 450 } else { 451 cd.nextRange = cd.thisRange 452 } 453 cd.fillBottomTables(bots) 454 for _, t := range cd.SkippedTbls { 455 cd.botSize -= t.Size() 456 } 457 return cs.compareAndAdd(thisAndNextLevelRLocked{}, cd, thisLevel) 458 } 459 460 func (cd *CompactDef) lockLevels(this, next *levelHandler) { 461 this.RLock() 462 next.RLock() 463 } 464 465 func (cd *CompactDef) unlockLevels(this, next *levelHandler) { 466 next.RUnlock() 467 this.RUnlock() 468 } 469 470 func (cd *CompactDef) moveDown() bool { 471 return cd.Level > 0 && len(cd.Bot) == 0 && len(cd.SkippedTbls) == 0 472 } 473 474 func (cd *CompactDef) buildIterator() y.Iterator { 475 // Create iterators across all the tables involved first. 476 var iters []y.Iterator 477 if cd.Level == 0 { 478 iters = appendIteratorsReversed(iters, cd.Top, false) 479 } else { 480 iters = []y.Iterator{table.NewConcatIterator(cd.Top, false)} 481 } 482 483 // Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap. 484 iters = append(iters, table.NewConcatIterator(cd.Bot, false)) 485 it := table.NewMergeIterator(iters, false) 486 487 it.Rewind() 488 return it 489 } 490 491 type compactor interface { 492 compact(cd *CompactDef, stats *y.CompactionStats, discardStats *DiscardStats) ([]*sstable.BuildResult, error) 493 } 494 495 type localCompactor struct { 496 } 497 498 func (c *localCompactor) compact(cd *CompactDef, stats *y.CompactionStats, discardStats *DiscardStats) ([]*sstable.BuildResult, error) { 499 return CompactTables(cd, stats, discardStats) 500 } 501 502 type remoteCompactor struct { 503 remoteAddr string 504 allFiles []*os.File 505 req *CompactionReq 506 } 507 508 type CompactionReq struct { 509 Level int `json:"level"` 510 Overlap bool `json:"overlap"` 511 NumTop int `json:"num_top"` 512 FileSizes []int64 `json:"file_sizes"` 513 SafeTS uint64 `json:"safe_ts"` 514 MaxTableSize int64 `json:"max_table_size"` 515 } 516 517 type CompactionResp struct { 518 Error string `json:"error"` 519 FileSizes []int64 `json:"file_sizes"` 520 Stats *y.CompactionStats `json:"stats"` 521 NumSkip int64 `json:"num_skip"` 522 SkipBytes int64 `json:"skip_bytes"` 523 } 524 525 func (rc *remoteCompactor) compact(cd *CompactDef, stats *y.CompactionStats, discardStats *DiscardStats) ([]*sstable.BuildResult, error) { 526 defer rc.cleanup() 527 rc.req = &CompactionReq{ 528 Level: cd.Level, 529 Overlap: cd.HasOverlap, 530 NumTop: len(cd.Top), 531 SafeTS: cd.SafeTS, 532 MaxTableSize: cd.Opt.MaxTableSize, 533 } 534 err := rc.appendFiles(cd.Top) 535 if err != nil { 536 return nil, err 537 } 538 err = rc.appendFiles(cd.Bot) 539 if err != nil { 540 return nil, err 541 } 542 conn, err := net.Dial("tcp", rc.remoteAddr) 543 if err != nil { 544 return nil, err 545 } 546 defer conn.Close() 547 err = writeJSON(conn, rc.req) 548 if err != nil { 549 return nil, err 550 } 551 for _, file := range rc.allFiles { 552 _, err = io.Copy(conn, file) 553 if err != nil { 554 return nil, err 555 } 556 } 557 resp := new(CompactionResp) 558 err = readJSON(conn, resp) 559 if err != nil { 560 return nil, err 561 } 562 if len(resp.Error) > 0 { 563 return nil, fmt.Errorf("remote compaction error:%s", resp.Error) 564 } 565 *stats = *resp.Stats 566 discardStats.numSkips = resp.NumSkip 567 discardStats.skippedBytes = resp.SkipBytes 568 var newFileNames []*sstable.BuildResult 569 for i := 0; i < len(resp.FileSizes); i += 2 { 570 fileID := cd.AllocIDFunc() 571 filename := sstable.NewFilename(fileID, cd.Dir) 572 err = readFile(conn, filename, resp.FileSizes[i]) 573 if err != nil { 574 return nil, err 575 } 576 err = readFile(conn, sstable.IndexFilename(filename), resp.FileSizes[i+1]) 577 if err != nil { 578 return nil, err 579 } 580 newFileNames = append(newFileNames, &sstable.BuildResult{FileName: filename}) 581 } 582 return newFileNames, nil 583 } 584 585 func (rc *remoteCompactor) appendFiles(tbls []table.Table) error { 586 for _, tbl := range tbls { 587 sst := tbl.(*sstable.Table) 588 fn := sst.Filename() 589 err := rc.appendFile(fn) 590 if err != nil { 591 return err 592 } 593 err = rc.appendFile(sstable.IndexFilename(fn)) 594 if err != nil { 595 return err 596 } 597 } 598 return nil 599 } 600 601 func (rc *remoteCompactor) appendFile(filename string) error { 602 file, err := os.Open(filename) 603 if err != nil { 604 return err 605 } 606 rc.allFiles = append(rc.allFiles, file) 607 stat, err := file.Stat() 608 if err != nil { 609 return err 610 } 611 rc.req.FileSizes = append(rc.req.FileSizes, stat.Size()) 612 return nil 613 } 614 615 func (rc *remoteCompactor) cleanup() { 616 for _, file := range rc.allFiles { 617 file.Close() 618 } 619 } 620 621 type CompactionServer struct { 622 l net.Listener 623 } 624 625 func NewCompactionServer(addr string) (*CompactionServer, error) { 626 l, err := net.Listen("tcp", addr) 627 if err != nil { 628 return nil, err 629 } 630 return &CompactionServer{ 631 l: l, 632 }, nil 633 } 634 635 func (s *CompactionServer) Close() { 636 s.l.Close() 637 } 638 639 func (s *CompactionServer) Run() { 640 for { 641 conn, err := s.l.Accept() 642 if err != nil { 643 log.S().Error(err) 644 return 645 } 646 go func() { 647 err := s.handleConn(conn) 648 if err != nil { 649 s.sendError(conn, err) 650 } 651 conn.Close() 652 }() 653 } 654 } 655 656 func (c *CompactionServer) handleConn(conn net.Conn) error { 657 req := new(CompactionReq) 658 err := readJSON(conn, req) 659 if err != nil { 660 return err 661 } 662 var recvFiles []*sstable.BuildResult 663 for i := 0; i < len(req.FileSizes); i += 2 { 664 result := new(sstable.BuildResult) 665 result.FileData = make([]byte, req.FileSizes[i]) 666 result.IndexData = make([]byte, req.FileSizes[i+1]) 667 _, err = io.ReadFull(conn, result.FileData) 668 if err != nil { 669 return err 670 } 671 _, err = io.ReadFull(conn, result.IndexData) 672 if err != nil { 673 return err 674 } 675 recvFiles = append(recvFiles, result) 676 } 677 cd := new(CompactDef) 678 cd.HasOverlap = req.Overlap 679 for i, result := range recvFiles { 680 t, err := sstable.OpenInMemoryTable(result.FileData, result.IndexData) 681 if err != nil { 682 return err 683 } 684 if i < req.NumTop { 685 cd.Top = append(cd.Top, t) 686 } else { 687 cd.Bot = append(cd.Bot, t) 688 } 689 } 690 cd.Level = req.Level 691 cd.Opt.MaxTableSize = req.MaxTableSize 692 cd.SafeTS = req.SafeTS 693 cd.Opt = DefaultOptions.TableBuilderOptions 694 cd.Opt.CompressionPerLevel = make([]options.CompressionType, 7) 695 cd.InMemory = true 696 stats := new(y.CompactionStats) 697 discardStats := new(DiscardStats) 698 newFilenames, err := CompactTables(cd, stats, discardStats) 699 if err != nil { 700 return err 701 } 702 for _, t := range cd.Top { 703 t.Close() 704 } 705 for _, t := range cd.Bot { 706 t.Close() 707 } 708 resp := new(CompactionResp) 709 resp.Stats = stats 710 resp.NumSkip = discardStats.numSkips 711 resp.SkipBytes = discardStats.skippedBytes 712 c.sendResponse(conn, newFilenames, resp) 713 return nil 714 } 715 716 func (c *CompactionServer) sendResponse(conn net.Conn, newFiles []*sstable.BuildResult, resp *CompactionResp) { 717 for _, file := range newFiles { 718 resp.FileSizes = append(resp.FileSizes, int64(len(file.FileData)), int64(len(file.IndexData))) 719 } 720 err := writeJSON(conn, resp) 721 if err != nil { 722 log.S().Error(err) 723 } 724 for _, file := range newFiles { 725 _, err = conn.Write(file.FileData) 726 if err != nil { 727 log.S().Error(err) 728 break 729 } 730 _, err = conn.Write(file.IndexData) 731 if err != nil { 732 log.S().Error(err) 733 } 734 } 735 } 736 737 func (c *CompactionServer) sendError(conn net.Conn, err error) error { 738 resp := &CompactionResp{Error: err.Error()} 739 return writeJSON(conn, resp) 740 } 741 742 func readJSON(conn net.Conn, v interface{}) error { 743 metaSizeBuf := make([]byte, 4) 744 _, err := io.ReadFull(conn, metaSizeBuf) 745 if err != nil { 746 return err 747 } 748 metabuf := make([]byte, binary.BigEndian.Uint32(metaSizeBuf)) 749 _, err = io.ReadFull(conn, metabuf) 750 if err != nil { 751 return err 752 } 753 return json.Unmarshal(metabuf, v) 754 } 755 756 func writeJSON(conn net.Conn, v interface{}) error { 757 data, err := json.Marshal(v) 758 if err != nil { 759 return err 760 } 761 sizeBuf := make([]byte, 4) 762 binary.BigEndian.PutUint32(sizeBuf, uint32(len(data))) 763 _, err = conn.Write(sizeBuf) 764 if err != nil { 765 return err 766 } 767 _, err = conn.Write(data) 768 return err 769 } 770 771 func readFile(conn net.Conn, fileName string, fileSize int64) error { 772 reader := io.LimitReader(conn, fileSize) 773 file, err := os.Create(fileName) 774 if err != nil { 775 log.S().Error(err) 776 return err 777 } 778 defer file.Close() 779 _, err = io.Copy(file, reader) 780 if err != nil { 781 log.S().Error(err) 782 } 783 return err 784 }