github.com/unicornultrafoundation/go-u2u@v1.0.0-rc1.0.20240205080301-e74a83d3fadc/cmd/u2u/launcher/db-transform.go (about)

     1  package launcher
     2  
     3  import (
     4  	"os"
     5  	"path"
     6  	"strings"
     7  	"time"
     8  
     9  	"github.com/syndtr/goleveldb/leveldb/opt"
    10  	"github.com/unicornultrafoundation/go-helios/common/bigendian"
    11  	"github.com/unicornultrafoundation/go-helios/u2udb"
    12  	"github.com/unicornultrafoundation/go-helios/u2udb/batched"
    13  	"github.com/unicornultrafoundation/go-helios/u2udb/multidb"
    14  	"github.com/unicornultrafoundation/go-u2u/common"
    15  	"github.com/unicornultrafoundation/go-u2u/log"
    16  	"gopkg.in/urfave/cli.v1"
    17  
    18  	"github.com/unicornultrafoundation/go-u2u/integration"
    19  	"github.com/unicornultrafoundation/go-u2u/utils"
    20  	"github.com/unicornultrafoundation/go-u2u/utils/dbutil/autocompact"
    21  )
    22  
    23  func dbTransform(ctx *cli.Context) error {
    24  	cfg := makeAllConfigs(ctx)
    25  
    26  	tmpPath := path.Join(cfg.Node.DataDir, "tmp")
    27  	integration.MakeDBDirs(tmpPath)
    28  	_ = os.RemoveAll(tmpPath)
    29  	defer os.RemoveAll(tmpPath)
    30  
    31  	// get supported DB producers
    32  	dbTypes := makeUncheckedCachedDBsProducers(path.Join(cfg.Node.DataDir, "chaindata"))
    33  
    34  	byReq, err := readRoutes(cfg, dbTypes)
    35  	if err != nil {
    36  		log.Crit("Failed to read routes", "err", err)
    37  	}
    38  	byDB := separateIntoDBs(byReq)
    39  
    40  	// weed out DBs which don't need transformation
    41  	{
    42  		for _, byReqOfDB := range byDB {
    43  			match := true
    44  			for _, e := range byReqOfDB {
    45  				if e.Old != e.New {
    46  					match = false
    47  					break
    48  				}
    49  			}
    50  			if match {
    51  				for _, e := range byReqOfDB {
    52  					delete(byReq, e.Req)
    53  				}
    54  			}
    55  		}
    56  	}
    57  	if len(byReq) == 0 {
    58  		log.Info("No DB transformation is needed")
    59  		return nil
    60  	}
    61  
    62  	// check if new layout is contradictory
    63  	for _, e0 := range byReq {
    64  		for _, e1 := range byReq {
    65  			if e0 == e1 {
    66  				continue
    67  			}
    68  			if dbLocatorOf(e0.New) == dbLocatorOf(e1.New) && strings.HasPrefix(e0.New.Table, e1.New.Table) {
    69  				log.Crit("New DB layout is contradictory", "db_type", e0.New.Type, "db_name", e0.New.Name,
    70  					"req0", e0.Req, "req1", e1.Req, "table0", e0.New.Table, "table1", e1.New.Table)
    71  			}
    72  		}
    73  	}
    74  
    75  	// separate entries into native-linked components
    76  	byComponents := make([]map[string]dbMigrationEntry, 0)
    77  	for componentI := 0; len(byReq) > 0; componentI++ {
    78  		var someEntry dbMigrationEntry
    79  		for _, e := range byReq {
    80  			someEntry = e
    81  			break
    82  		}
    83  
    84  		// DFS
    85  		component := make(map[string]dbMigrationEntry)
    86  		stack := make(dbMigrationEntries, 0)
    87  		for pwalk := &someEntry; pwalk != nil; pwalk = stack.Pop() {
    88  			if _, ok := component[pwalk.Req]; ok {
    89  				continue
    90  			}
    91  			component[pwalk.Req] = *pwalk
    92  			delete(byReq, pwalk.Req)
    93  			for _, e := range byDB[dbLocatorOf(pwalk.Old)] {
    94  				stack = append(stack, e)
    95  			}
    96  			for _, e := range byDB[dbLocatorOf(pwalk.New)] {
    97  				stack = append(stack, e)
    98  			}
    99  		}
   100  		byComponents = append(byComponents, component)
   101  	}
   102  
   103  	tmpDbTypes := makeUncheckedCachedDBsProducers(path.Join(cfg.Node.DataDir, "tmp"))
   104  	for _, component := range byComponents {
   105  		err := transformComponent(cfg.Node.DataDir, dbTypes, tmpDbTypes, component)
   106  		if err != nil {
   107  			log.Crit("Failed to transform component", "err", err)
   108  		}
   109  	}
   110  	id := bigendian.Uint64ToBytes(uint64(time.Now().UnixNano()))
   111  	for typ, producer := range dbTypes {
   112  		err := clearDirtyFlags(id, producer)
   113  		if err != nil {
   114  			log.Crit("Failed to write clean FlushID", "type", typ, "err", err)
   115  		}
   116  	}
   117  
   118  	memorizeDBPreset(cfg)
   119  	log.Info("DB transformation is complete")
   120  
   121  	return nil
   122  }
   123  
   124  type dbMigrationEntry struct {
   125  	Req string
   126  	Old multidb.Route
   127  	New multidb.Route
   128  }
   129  
   130  type dbMigrationEntries []dbMigrationEntry
   131  
   132  func (ee *dbMigrationEntries) Pop() *dbMigrationEntry {
   133  	l := len(*ee)
   134  	if l == 0 {
   135  		return nil
   136  	}
   137  	res := &(*ee)[l-1]
   138  	*ee = (*ee)[:l-1]
   139  	return res
   140  }
   141  
   142  var dbLocatorOf = multidb.DBLocatorOf
   143  
   144  func readRoutes(cfg *config, dbTypes map[multidb.TypeName]u2udb.FullDBProducer) (map[string]dbMigrationEntry, error) {
   145  	router, err := multidb.NewProducer(dbTypes, cfg.DBs.Routing.Table, integration.TablesKey)
   146  	if err != nil {
   147  		return nil, err
   148  	}
   149  	byReq := make(map[string]dbMigrationEntry)
   150  
   151  	for typ, producer := range dbTypes {
   152  		for _, dbName := range producer.Names() {
   153  			db, err := producer.OpenDB(dbName)
   154  			if err != nil {
   155  				log.Crit("DB opening error", "name", dbName, "err", err)
   156  			}
   157  			defer db.Close()
   158  			tables, err := multidb.ReadTablesList(db, integration.TablesKey)
   159  			if err != nil {
   160  				log.Crit("Failed to read tables list", "name", dbName, "err", err)
   161  			}
   162  			for _, t := range tables {
   163  				oldRoute := multidb.Route{
   164  					Type:  typ,
   165  					Name:  dbName,
   166  					Table: t.Table,
   167  				}
   168  				newRoute := router.RouteOf(t.Req)
   169  				newRoute.NoDrop = false
   170  				byReq[t.Req] = dbMigrationEntry{
   171  					Req: t.Req,
   172  					New: newRoute,
   173  					Old: oldRoute,
   174  				}
   175  			}
   176  		}
   177  	}
   178  	return byReq, nil
   179  }
   180  
   181  func writeCleanTableRecords(dbTypes map[multidb.TypeName]u2udb.FullDBProducer, byReq map[string]dbMigrationEntry) error {
   182  	records := make(map[multidb.DBLocator][]multidb.TableRecord, 0)
   183  	for _, e := range byReq {
   184  		records[dbLocatorOf(e.New)] = append(records[dbLocatorOf(e.New)], multidb.TableRecord{
   185  			Req:   e.Req,
   186  			Table: e.New.Table,
   187  		})
   188  	}
   189  	written := make(map[multidb.DBLocator]bool)
   190  	for _, e := range byReq {
   191  		if written[dbLocatorOf(e.New)] {
   192  			continue
   193  		}
   194  		written[dbLocatorOf(e.New)] = true
   195  
   196  		db, err := dbTypes[e.New.Type].OpenDB(e.New.Name)
   197  		if err != nil {
   198  			return err
   199  		}
   200  		defer db.Close()
   201  		err = multidb.WriteTablesList(db, integration.TablesKey, records[dbLocatorOf(e.New)])
   202  		if err != nil {
   203  			return err
   204  		}
   205  	}
   206  	return nil
   207  }
   208  
   209  func interchangeableType(a_, b_ multidb.TypeName, types map[multidb.TypeName]u2udb.FullDBProducer) bool {
   210  	for t_ := range types {
   211  		a, b, t := string(a_), string(b_), string(t_)
   212  		t = strings.TrimSuffix(t, "fsh")
   213  		t = strings.TrimSuffix(t, "flg")
   214  		t = strings.TrimSuffix(t, "drc")
   215  		if strings.HasPrefix(a, t) && strings.HasPrefix(b, t) {
   216  			return true
   217  		}
   218  	}
   219  	return false
   220  }
   221  
   222  func transformComponent(datadir string, dbTypes, tmpDbTypes map[multidb.TypeName]u2udb.FullDBProducer, byReq map[string]dbMigrationEntry) error {
   223  	byDB := separateIntoDBs(byReq)
   224  	// if it can be transformed just by DB renaming
   225  	if len(byDB) == 2 {
   226  		oldDB := multidb.DBLocator{}
   227  		newDB := multidb.DBLocator{}
   228  		ok := true
   229  		for _, e := range byReq {
   230  			if len(oldDB.Type) == 0 {
   231  				oldDB = dbLocatorOf(e.Old)
   232  				newDB = dbLocatorOf(e.New)
   233  			}
   234  			if !interchangeableType(oldDB.Type, newDB.Type, dbTypes) || e.Old.Table != e.New.Table || e.New.Name != newDB.Name ||
   235  				e.Old.Name != oldDB.Name || e.Old.Type != oldDB.Type || e.New.Type != newDB.Type {
   236  				ok = false
   237  				break
   238  			}
   239  		}
   240  		if ok {
   241  			oldPath := path.Join(datadir, "chaindata", string(oldDB.Type), oldDB.Name)
   242  			newPath := path.Join(datadir, "chaindata", string(newDB.Type), newDB.Name)
   243  			log.Info("Renaming DB", "old", oldPath, "new", newPath)
   244  			return os.Rename(oldPath, newPath)
   245  		}
   246  	}
   247  
   248  	toMove := make(map[multidb.DBLocator]bool)
   249  	{
   250  		const batchKeys = 100000
   251  		keys := make([][]byte, 0, batchKeys)
   252  		values := make([][]byte, 0, batchKeys)
   253  		for _, e := range byReq {
   254  			err := func() error {
   255  				oldDB, err := dbTypes[e.Old.Type].OpenDB(e.Old.Name)
   256  				if err != nil {
   257  					return err
   258  				}
   259  				oldDB = batched.Wrap(oldDB)
   260  				defer oldDB.Close()
   261  				oldReadableName := path.Join(string(e.Old.Type), e.Old.Name)
   262  				newDB, err := tmpDbTypes[e.New.Type].OpenDB(e.New.Name)
   263  				if err != nil {
   264  					return err
   265  				}
   266  				toMove[dbLocatorOf(e.New)] = true
   267  
   268  				newReadableName := path.Join("tmp", string(e.New.Type), e.New.Name)
   269  				newDB = batched.Wrap(autocompact.Wrap2M(newDB, opt.GiB, 16*opt.GiB, true, newReadableName))
   270  				defer newDB.Close()
   271  
   272  				log.Info("Copying DB table", "req", e.Req, "old_db", oldReadableName, "old_table", e.Old.Table,
   273  					"new_db", newReadableName, "new_table", e.New.Table)
   274  				oldTable := utils.NewTableOrSelf(oldDB, []byte(e.Old.Table))
   275  				newTable := utils.NewTableOrSelf(newDB, []byte(e.New.Table))
   276  				it := oldTable.NewIterator(nil, nil)
   277  				defer it.Release()
   278  
   279  				for next := true; next; {
   280  					for len(keys) < batchKeys {
   281  						next = it.Next()
   282  						if !next {
   283  							break
   284  						}
   285  						keys = append(keys, common.CopyBytes(it.Key()))
   286  						values = append(values, common.CopyBytes(it.Value()))
   287  					}
   288  					for i := 0; i < len(keys); i++ {
   289  						err = newTable.Put(keys[i], values[i])
   290  						if err != nil {
   291  							return err
   292  						}
   293  					}
   294  					keys = keys[:0]
   295  					values = values[:0]
   296  				}
   297  				return nil
   298  			}()
   299  			if err != nil {
   300  				return err
   301  			}
   302  		}
   303  	}
   304  
   305  	// finalize tmp DBs
   306  	err := writeCleanTableRecords(tmpDbTypes, byReq)
   307  	if err != nil {
   308  		return err
   309  	}
   310  
   311  	// drop unused DBs
   312  	dropped := make(map[multidb.DBLocator]bool)
   313  	for _, e := range byReq {
   314  		if dropped[dbLocatorOf(e.Old)] {
   315  			continue
   316  		}
   317  		dropped[dbLocatorOf(e.Old)] = true
   318  		log.Info("Dropping old DB", "db_type", e.Old.Type, "db_name", e.Old.Name)
   319  		deletePath := path.Join(datadir, "chaindata", string(e.Old.Type), e.Old.Name)
   320  		err := os.RemoveAll(deletePath)
   321  		if err != nil {
   322  			return err
   323  		}
   324  	}
   325  	// move tmp DBs
   326  	for e := range toMove {
   327  		oldPath := path.Join(datadir, "tmp", string(e.Type), e.Name)
   328  		newPath := path.Join(datadir, "chaindata", string(e.Type), e.Name)
   329  		log.Info("Moving tmp DB to clean dir", "old", oldPath, "new", newPath)
   330  		err := os.Rename(oldPath, newPath)
   331  		if err != nil {
   332  			return err
   333  		}
   334  	}
   335  	return nil
   336  }
   337  
   338  func separateIntoDBs(byReq map[string]dbMigrationEntry) map[multidb.DBLocator]map[string]dbMigrationEntry {
   339  	byDB := make(map[multidb.DBLocator]map[string]dbMigrationEntry)
   340  	for _, e := range byReq {
   341  		if byDB[dbLocatorOf(e.Old)] == nil {
   342  			byDB[dbLocatorOf(e.Old)] = make(map[string]dbMigrationEntry)
   343  		}
   344  		byDB[dbLocatorOf(e.Old)][e.Req] = e
   345  		if byDB[dbLocatorOf(e.New)] == nil {
   346  			byDB[dbLocatorOf(e.New)] = make(map[string]dbMigrationEntry)
   347  		}
   348  		byDB[dbLocatorOf(e.New)][e.Req] = e
   349  	}
   350  	return byDB
   351  }