github.com/yinchengtsinghua/golang-Eos-dpos-Ethereum@v0.0.0-20190121132951-92cc4225ed8e/swarm/storage/ldbstore.go (about) 1 2 //此源码被清华学神尹成大魔王专业翻译分析并修改 3 //尹成QQ77025077 4 //尹成微信18510341407 5 //尹成所在QQ群721929980 6 //尹成邮箱 yinc13@mails.tsinghua.edu.cn 7 //尹成毕业于清华大学,微软区块链领域全球最有价值专家 8 //https://mvp.microsoft.com/zh-cn/PublicProfile/4033620 9 // 10 // 11 // 12 // 13 // 14 // 15 // 16 // 17 // 18 // 19 // 20 // 21 // 22 // 23 // 24 25 // 26 // 27 // 28 // 29 // 30 31 package storage 32 33 import ( 34 "archive/tar" 35 "bytes" 36 "context" 37 "encoding/binary" 38 "encoding/hex" 39 "fmt" 40 "io" 41 "io/ioutil" 42 "sort" 43 "sync" 44 45 "github.com/ethereum/go-ethereum/metrics" 46 "github.com/ethereum/go-ethereum/rlp" 47 "github.com/ethereum/go-ethereum/swarm/chunk" 48 "github.com/ethereum/go-ethereum/swarm/log" 49 "github.com/ethereum/go-ethereum/swarm/storage/mock" 50 "github.com/syndtr/goleveldb/leveldb" 51 "github.com/syndtr/goleveldb/leveldb/opt" 52 ) 53 54 const ( 55 gcArrayFreeRatio = 0.1 56 maxGCitems = 5000 // 57 ) 58 59 var ( 60 keyIndex = byte(0) 61 keyOldData = byte(1) 62 keyAccessCnt = []byte{2} 63 keyEntryCnt = []byte{3} 64 keyDataIdx = []byte{4} 65 keyData = byte(6) 66 keyDistanceCnt = byte(7) 67 ) 68 69 type gcItem struct { 70 idx uint64 71 value uint64 72 idxKey []byte 73 po uint8 74 } 75 76 type LDBStoreParams struct { 77 *StoreParams 78 Path string 79 Po func(Address) uint8 80 } 81 82 // 83 func NewLDBStoreParams(storeparams *StoreParams, path string) *LDBStoreParams { 84 return &LDBStoreParams{ 85 StoreParams: storeparams, 86 Path: path, 87 Po: func(k Address) (ret uint8) { return uint8(Proximity(storeparams.BaseKey[:], k[:])) }, 88 } 89 } 90 91 type LDBStore struct { 92 db *LDBDatabase 93 94 // 95 entryCnt uint64 // 96 accessCnt uint64 // 97 dataIdx uint64 // 98 capacity uint64 99 bucketCnt []uint64 100 101 hashfunc SwarmHasher 102 po func(Address) uint8 103 104 batchC chan bool 105 batchesC chan struct{} 106 batch *leveldb.Batch 107 lock sync.RWMutex 108 quit chan struct{} 109 110 // 111 // 112 // 113 encodeDataFunc func(chunk *Chunk) []byte 114 // 115 // 116 // 117 getDataFunc func(addr Address) (data []byte, err error) 118 } 119 120 // 121 // 122 // 123 func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) { 124 s = new(LDBStore) 125 s.hashfunc = params.Hash 126 s.quit = make(chan struct{}) 127 128 s.batchC = make(chan bool) 129 s.batchesC = make(chan struct{}, 1) 130 go s.writeBatches() 131 s.batch = new(leveldb.Batch) 132 // 133 s.encodeDataFunc = encodeData 134 135 s.db, err = NewLDBDatabase(params.Path) 136 if err != nil { 137 return nil, err 138 } 139 140 s.po = params.Po 141 s.setCapacity(params.DbCapacity) 142 143 s.bucketCnt = make([]uint64, 0x100) 144 for i := 0; i < 0x100; i++ { 145 k := make([]byte, 2) 146 k[0] = keyDistanceCnt 147 k[1] = uint8(i) 148 cnt, _ := s.db.Get(k) 149 s.bucketCnt[i] = BytesToU64(cnt) 150 s.bucketCnt[i]++ 151 } 152 data, _ := s.db.Get(keyEntryCnt) 153 s.entryCnt = BytesToU64(data) 154 s.entryCnt++ 155 data, _ = s.db.Get(keyAccessCnt) 156 s.accessCnt = BytesToU64(data) 157 s.accessCnt++ 158 data, _ = s.db.Get(keyDataIdx) 159 s.dataIdx = BytesToU64(data) 160 s.dataIdx++ 161 162 return s, nil 163 } 164 165 // 166 // 167 // 168 func NewMockDbStore(params *LDBStoreParams, mockStore *mock.NodeStore) (s *LDBStore, err error) { 169 s, err = NewLDBStore(params) 170 if err != nil { 171 return nil, err 172 } 173 174 // 175 if mockStore != nil { 176 s.encodeDataFunc = newMockEncodeDataFunc(mockStore) 177 s.getDataFunc = newMockGetDataFunc(mockStore) 178 } 179 return 180 } 181 182 type dpaDBIndex struct { 183 Idx uint64 184 Access uint64 185 } 186 187 func BytesToU64(data []byte) uint64 { 188 if len(data) < 8 { 189 return 0 190 } 191 return binary.BigEndian.Uint64(data) 192 } 193 194 func U64ToBytes(val uint64) []byte { 195 data := make([]byte, 8) 196 binary.BigEndian.PutUint64(data, val) 197 return data 198 } 199 200 func (s *LDBStore) updateIndexAccess(index *dpaDBIndex) { 201 index.Access = s.accessCnt 202 } 203 204 func getIndexKey(hash Address) []byte { 205 hashSize := len(hash) 206 key := make([]byte, hashSize+1) 207 key[0] = keyIndex 208 copy(key[1:], hash[:]) 209 return key 210 } 211 212 func getOldDataKey(idx uint64) []byte { 213 key := make([]byte, 9) 214 key[0] = keyOldData 215 binary.BigEndian.PutUint64(key[1:9], idx) 216 217 return key 218 } 219 220 func getDataKey(idx uint64, po uint8) []byte { 221 key := make([]byte, 10) 222 key[0] = keyData 223 key[1] = po 224 binary.BigEndian.PutUint64(key[2:], idx) 225 226 return key 227 } 228 229 func encodeIndex(index *dpaDBIndex) []byte { 230 data, _ := rlp.EncodeToBytes(index) 231 return data 232 } 233 234 func encodeData(chunk *Chunk) []byte { 235 // 236 // 237 // 238 // 239 return append(append([]byte{}, chunk.Addr[:]...), chunk.SData...) 240 } 241 242 func decodeIndex(data []byte, index *dpaDBIndex) error { 243 dec := rlp.NewStream(bytes.NewReader(data), 0) 244 return dec.Decode(index) 245 } 246 247 func decodeData(data []byte, chunk *Chunk) { 248 chunk.SData = data[32:] 249 chunk.Size = int64(binary.BigEndian.Uint64(data[32:40])) 250 } 251 252 func decodeOldData(data []byte, chunk *Chunk) { 253 chunk.SData = data 254 chunk.Size = int64(binary.BigEndian.Uint64(data[0:8])) 255 } 256 257 func (s *LDBStore) collectGarbage(ratio float32) { 258 metrics.GetOrRegisterCounter("ldbstore.collectgarbage", nil).Inc(1) 259 260 it := s.db.NewIterator() 261 defer it.Release() 262 263 garbage := []*gcItem{} 264 gcnt := 0 265 266 for ok := it.Seek([]byte{keyIndex}); ok && (gcnt < maxGCitems) && (uint64(gcnt) < s.entryCnt); ok = it.Next() { 267 itkey := it.Key() 268 269 if (itkey == nil) || (itkey[0] != keyIndex) { 270 break 271 } 272 273 // 274 key := make([]byte, len(it.Key())) 275 copy(key, it.Key()) 276 277 val := it.Value() 278 279 var index dpaDBIndex 280 281 hash := key[1:] 282 decodeIndex(val, &index) 283 po := s.po(hash) 284 285 gci := &gcItem{ 286 idxKey: key, 287 idx: index.Idx, 288 value: index.Access, // 289 po: po, 290 } 291 292 garbage = append(garbage, gci) 293 gcnt++ 294 } 295 296 sort.Slice(garbage[:gcnt], func(i, j int) bool { return garbage[i].value < garbage[j].value }) 297 298 cutoff := int(float32(gcnt) * ratio) 299 metrics.GetOrRegisterCounter("ldbstore.collectgarbage.delete", nil).Inc(int64(cutoff)) 300 301 for i := 0; i < cutoff; i++ { 302 s.delete(garbage[i].idx, garbage[i].idxKey, garbage[i].po) 303 } 304 } 305 306 // 307 // 308 func (s *LDBStore) Export(out io.Writer) (int64, error) { 309 tw := tar.NewWriter(out) 310 defer tw.Close() 311 312 it := s.db.NewIterator() 313 defer it.Release() 314 var count int64 315 for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() { 316 key := it.Key() 317 if (key == nil) || (key[0] != keyIndex) { 318 break 319 } 320 321 var index dpaDBIndex 322 323 hash := key[1:] 324 decodeIndex(it.Value(), &index) 325 po := s.po(hash) 326 datakey := getDataKey(index.Idx, po) 327 log.Trace("store.export", "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po) 328 data, err := s.db.Get(datakey) 329 if err != nil { 330 log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key[:], err)) 331 continue 332 } 333 334 hdr := &tar.Header{ 335 Name: hex.EncodeToString(hash), 336 Mode: 0644, 337 Size: int64(len(data)), 338 } 339 if err := tw.WriteHeader(hdr); err != nil { 340 return count, err 341 } 342 if _, err := tw.Write(data); err != nil { 343 return count, err 344 } 345 count++ 346 } 347 348 return count, nil 349 } 350 351 // 352 func (s *LDBStore) Import(in io.Reader) (int64, error) { 353 tr := tar.NewReader(in) 354 355 var count int64 356 var wg sync.WaitGroup 357 for { 358 hdr, err := tr.Next() 359 if err == io.EOF { 360 break 361 } else if err != nil { 362 return count, err 363 } 364 365 if len(hdr.Name) != 64 { 366 log.Warn("ignoring non-chunk file", "name", hdr.Name) 367 continue 368 } 369 370 keybytes, err := hex.DecodeString(hdr.Name) 371 if err != nil { 372 log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err) 373 continue 374 } 375 376 data, err := ioutil.ReadAll(tr) 377 if err != nil { 378 return count, err 379 } 380 key := Address(keybytes) 381 chunk := NewChunk(key, nil) 382 chunk.SData = data[32:] 383 s.Put(context.TODO(), chunk) 384 wg.Add(1) 385 go func() { 386 defer wg.Done() 387 <-chunk.dbStoredC 388 }() 389 count++ 390 } 391 wg.Wait() 392 return count, nil 393 } 394 395 func (s *LDBStore) Cleanup() { 396 // 397 var errorsFound, removed, total int 398 399 it := s.db.NewIterator() 400 defer it.Release() 401 for ok := it.Seek([]byte{keyIndex}); ok; ok = it.Next() { 402 key := it.Key() 403 if (key == nil) || (key[0] != keyIndex) { 404 break 405 } 406 total++ 407 var index dpaDBIndex 408 err := decodeIndex(it.Value(), &index) 409 if err != nil { 410 log.Warn("Cannot decode") 411 errorsFound++ 412 continue 413 } 414 hash := key[1:] 415 po := s.po(hash) 416 datakey := getDataKey(index.Idx, po) 417 data, err := s.db.Get(datakey) 418 if err != nil { 419 found := false 420 421 // 422 for po = 1; po <= 255; po++ { 423 datakey = getDataKey(index.Idx, po) 424 data, err = s.db.Get(datakey) 425 if err == nil { 426 found = true 427 break 428 } 429 } 430 431 if !found { 432 log.Warn(fmt.Sprintf("Chunk %x found but count not be accessed with any po", key[:])) 433 errorsFound++ 434 continue 435 } 436 } 437 438 c := &Chunk{} 439 ck := data[:32] 440 decodeData(data, c) 441 442 cs := int64(binary.LittleEndian.Uint64(c.SData[:8])) 443 log.Trace("chunk", "key", fmt.Sprintf("%x", key[:]), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.SData), "size", cs) 444 445 if len(c.SData) > chunk.DefaultSize+8 { 446 log.Warn("chunk for cleanup", "key", fmt.Sprintf("%x", key[:]), "ck", fmt.Sprintf("%x", ck), "dkey", fmt.Sprintf("%x", datakey), "dataidx", index.Idx, "po", po, "len data", len(data), "len sdata", len(c.SData), "size", cs) 447 s.delete(index.Idx, getIndexKey(key[1:]), po) 448 removed++ 449 errorsFound++ 450 } 451 } 452 453 log.Warn(fmt.Sprintf("Found %v errors out of %v entries. Removed %v chunks.", errorsFound, total, removed)) 454 } 455 456 func (s *LDBStore) ReIndex() { 457 // 458 it := s.db.NewIterator() 459 startPosition := []byte{keyOldData} 460 it.Seek(startPosition) 461 var key []byte 462 var errorsFound, total int 463 for it.Valid() { 464 key = it.Key() 465 if (key == nil) || (key[0] != keyOldData) { 466 break 467 } 468 data := it.Value() 469 hasher := s.hashfunc() 470 hasher.Write(data) 471 hash := hasher.Sum(nil) 472 473 newKey := make([]byte, 10) 474 oldCntKey := make([]byte, 2) 475 newCntKey := make([]byte, 2) 476 oldCntKey[0] = keyDistanceCnt 477 newCntKey[0] = keyDistanceCnt 478 key[0] = keyData 479 key[1] = s.po(Address(key[1:])) 480 oldCntKey[1] = key[1] 481 newCntKey[1] = s.po(Address(newKey[1:])) 482 copy(newKey[2:], key[1:]) 483 newValue := append(hash, data...) 484 485 batch := new(leveldb.Batch) 486 batch.Delete(key) 487 s.bucketCnt[oldCntKey[1]]-- 488 batch.Put(oldCntKey, U64ToBytes(s.bucketCnt[oldCntKey[1]])) 489 batch.Put(newKey, newValue) 490 s.bucketCnt[newCntKey[1]]++ 491 batch.Put(newCntKey, U64ToBytes(s.bucketCnt[newCntKey[1]])) 492 s.db.Write(batch) 493 it.Next() 494 } 495 it.Release() 496 log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total)) 497 } 498 499 func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) { 500 metrics.GetOrRegisterCounter("ldbstore.delete", nil).Inc(1) 501 502 batch := new(leveldb.Batch) 503 batch.Delete(idxKey) 504 batch.Delete(getDataKey(idx, po)) 505 s.entryCnt-- 506 s.bucketCnt[po]-- 507 cntKey := make([]byte, 2) 508 cntKey[0] = keyDistanceCnt 509 cntKey[1] = po 510 batch.Put(keyEntryCnt, U64ToBytes(s.entryCnt)) 511 batch.Put(cntKey, U64ToBytes(s.bucketCnt[po])) 512 s.db.Write(batch) 513 } 514 515 func (s *LDBStore) CurrentBucketStorageIndex(po uint8) uint64 { 516 s.lock.RLock() 517 defer s.lock.RUnlock() 518 519 return s.bucketCnt[po] 520 } 521 522 func (s *LDBStore) Size() uint64 { 523 s.lock.Lock() 524 defer s.lock.Unlock() 525 return s.entryCnt 526 } 527 528 func (s *LDBStore) CurrentStorageIndex() uint64 { 529 s.lock.RLock() 530 defer s.lock.RUnlock() 531 return s.dataIdx 532 } 533 534 func (s *LDBStore) Put(ctx context.Context, chunk *Chunk) { 535 metrics.GetOrRegisterCounter("ldbstore.put", nil).Inc(1) 536 log.Trace("ldbstore.put", "key", chunk.Addr) 537 538 ikey := getIndexKey(chunk.Addr) 539 var index dpaDBIndex 540 541 po := s.po(chunk.Addr) 542 s.lock.Lock() 543 defer s.lock.Unlock() 544 545 log.Trace("ldbstore.put: s.db.Get", "key", chunk.Addr, "ikey", fmt.Sprintf("%x", ikey)) 546 idata, err := s.db.Get(ikey) 547 if err != nil { 548 s.doPut(chunk, &index, po) 549 batchC := s.batchC 550 go func() { 551 <-batchC 552 chunk.markAsStored() 553 }() 554 } else { 555 log.Trace("ldbstore.put: chunk already exists, only update access", "key", chunk.Addr) 556 decodeIndex(idata, &index) 557 chunk.markAsStored() 558 } 559 index.Access = s.accessCnt 560 s.accessCnt++ 561 idata = encodeIndex(&index) 562 s.batch.Put(ikey, idata) 563 select { 564 case s.batchesC <- struct{}{}: 565 default: 566 } 567 } 568 569 // 570 func (s *LDBStore) doPut(chunk *Chunk, index *dpaDBIndex, po uint8) { 571 data := s.encodeDataFunc(chunk) 572 dkey := getDataKey(s.dataIdx, po) 573 s.batch.Put(dkey, data) 574 index.Idx = s.dataIdx 575 s.bucketCnt[po] = s.dataIdx 576 s.entryCnt++ 577 s.dataIdx++ 578 579 cntKey := make([]byte, 2) 580 cntKey[0] = keyDistanceCnt 581 cntKey[1] = po 582 s.batch.Put(cntKey, U64ToBytes(s.bucketCnt[po])) 583 } 584 585 func (s *LDBStore) writeBatches() { 586 mainLoop: 587 for { 588 select { 589 case <-s.quit: 590 break mainLoop 591 case <-s.batchesC: 592 s.lock.Lock() 593 b := s.batch 594 e := s.entryCnt 595 d := s.dataIdx 596 a := s.accessCnt 597 c := s.batchC 598 s.batchC = make(chan bool) 599 s.batch = new(leveldb.Batch) 600 err := s.writeBatch(b, e, d, a) 601 // 602 if err != nil { 603 log.Error(fmt.Sprintf("spawn batch write (%d entries): %v", b.Len(), err)) 604 } 605 close(c) 606 for e > s.capacity { 607 // 608 // 609 done := make(chan struct{}) 610 go func() { 611 s.collectGarbage(gcArrayFreeRatio) 612 close(done) 613 }() 614 615 e = s.entryCnt 616 select { 617 case <-s.quit: 618 s.lock.Unlock() 619 break mainLoop 620 case <-done: 621 } 622 } 623 s.lock.Unlock() 624 } 625 } 626 log.Trace(fmt.Sprintf("DbStore: quit batch write loop")) 627 } 628 629 // 630 func (s *LDBStore) writeBatch(b *leveldb.Batch, entryCnt, dataIdx, accessCnt uint64) error { 631 b.Put(keyEntryCnt, U64ToBytes(entryCnt)) 632 b.Put(keyDataIdx, U64ToBytes(dataIdx)) 633 b.Put(keyAccessCnt, U64ToBytes(accessCnt)) 634 l := b.Len() 635 if err := s.db.Write(b); err != nil { 636 return fmt.Errorf("unable to write batch: %v", err) 637 } 638 log.Trace(fmt.Sprintf("batch write (%d entries)", l)) 639 return nil 640 } 641 642 // 643 // 644 // 645 // 646 func newMockEncodeDataFunc(mockStore *mock.NodeStore) func(chunk *Chunk) []byte { 647 return func(chunk *Chunk) []byte { 648 if err := mockStore.Put(chunk.Addr, encodeData(chunk)); err != nil { 649 log.Error(fmt.Sprintf("%T: Chunk %v put: %v", mockStore, chunk.Addr.Log(), err)) 650 } 651 return chunk.Addr[:] 652 } 653 } 654 655 // 656 func (s *LDBStore) tryAccessIdx(ikey []byte, index *dpaDBIndex) bool { 657 idata, err := s.db.Get(ikey) 658 if err != nil { 659 return false 660 } 661 decodeIndex(idata, index) 662 s.batch.Put(keyAccessCnt, U64ToBytes(s.accessCnt)) 663 s.accessCnt++ 664 index.Access = s.accessCnt 665 idata = encodeIndex(index) 666 s.batch.Put(ikey, idata) 667 select { 668 case s.batchesC <- struct{}{}: 669 default: 670 } 671 return true 672 } 673 674 func (s *LDBStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) { 675 metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1) 676 log.Trace("ldbstore.get", "key", addr) 677 678 s.lock.Lock() 679 defer s.lock.Unlock() 680 return s.get(addr) 681 } 682 683 func (s *LDBStore) get(addr Address) (chunk *Chunk, err error) { 684 var indx dpaDBIndex 685 686 if s.tryAccessIdx(getIndexKey(addr), &indx) { 687 var data []byte 688 if s.getDataFunc != nil { 689 // 690 log.Trace("ldbstore.get retrieve with getDataFunc", "key", addr) 691 data, err = s.getDataFunc(addr) 692 if err != nil { 693 return 694 } 695 } else { 696 // 697 proximity := s.po(addr) 698 datakey := getDataKey(indx.Idx, proximity) 699 data, err = s.db.Get(datakey) 700 log.Trace("ldbstore.get retrieve", "key", addr, "indexkey", indx.Idx, "datakey", fmt.Sprintf("%x", datakey), "proximity", proximity) 701 if err != nil { 702 log.Trace("ldbstore.get chunk found but could not be accessed", "key", addr, "err", err) 703 s.delete(indx.Idx, getIndexKey(addr), s.po(addr)) 704 return 705 } 706 } 707 708 chunk = NewChunk(addr, nil) 709 chunk.markAsStored() 710 decodeData(data, chunk) 711 } else { 712 err = ErrChunkNotFound 713 } 714 715 return 716 } 717 718 // 719 // 720 // 721 func newMockGetDataFunc(mockStore *mock.NodeStore) func(addr Address) (data []byte, err error) { 722 return func(addr Address) (data []byte, err error) { 723 data, err = mockStore.Get(addr) 724 if err == mock.ErrNotFound { 725 // 726 err = ErrChunkNotFound 727 } 728 return data, err 729 } 730 } 731 732 func (s *LDBStore) updateAccessCnt(addr Address) { 733 734 s.lock.Lock() 735 defer s.lock.Unlock() 736 737 var index dpaDBIndex 738 s.tryAccessIdx(getIndexKey(addr), &index) // 739 740 } 741 742 func (s *LDBStore) setCapacity(c uint64) { 743 s.lock.Lock() 744 defer s.lock.Unlock() 745 746 s.capacity = c 747 748 if s.entryCnt > c { 749 ratio := float32(1.01) - float32(c)/float32(s.entryCnt) 750 if ratio < gcArrayFreeRatio { 751 ratio = gcArrayFreeRatio 752 } 753 if ratio > 1 { 754 ratio = 1 755 } 756 for s.entryCnt > c { 757 s.collectGarbage(ratio) 758 } 759 } 760 } 761 762 func (s *LDBStore) Close() { 763 close(s.quit) 764 s.db.Close() 765 } 766 767 // 768 func (s *LDBStore) SyncIterator(since uint64, until uint64, po uint8, f func(Address, uint64) bool) error { 769 metrics.GetOrRegisterCounter("ldbstore.synciterator", nil).Inc(1) 770 771 sincekey := getDataKey(since, po) 772 untilkey := getDataKey(until, po) 773 it := s.db.NewIterator() 774 defer it.Release() 775 776 for ok := it.Seek(sincekey); ok; ok = it.Next() { 777 metrics.GetOrRegisterCounter("ldbstore.synciterator.seek", nil).Inc(1) 778 779 dbkey := it.Key() 780 if dbkey[0] != keyData || dbkey[1] != po || bytes.Compare(untilkey, dbkey) < 0 { 781 break 782 } 783 key := make([]byte, 32) 784 val := it.Value() 785 copy(key, val[:32]) 786 if !f(Address(key), binary.BigEndian.Uint64(dbkey[2:])) { 787 break 788 } 789 } 790 return it.Error() 791 } 792 793 func databaseExists(path string) bool { 794 o := &opt.Options{ 795 ErrorIfMissing: true, 796 } 797 tdb, err := leveldb.OpenFile(path, o) 798 if err != nil { 799 return false 800 } 801 defer tdb.Close() 802 return true 803 }