github.com/weaviate/weaviate@v1.24.6/adapters/repos/db/migrator.go (about)

     1  //                           _       _
     2  // __      _____  __ ___   ___  __ _| |_ ___
     3  // \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
     4  //  \ V  V /  __/ (_| |\ V /| | (_| | ||  __/
     5  //   \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
     6  //
     7  //  Copyright © 2016 - 2024 Weaviate B.V. All rights reserved.
     8  //
     9  //  CONTACT: hello@weaviate.io
    10  //
    11  
    12  package db
    13  
    14  import (
    15  	"context"
    16  	"fmt"
    17  	"time"
    18  
    19  	enterrors "github.com/weaviate/weaviate/entities/errors"
    20  
    21  	"github.com/pkg/errors"
    22  	"github.com/sirupsen/logrus"
    23  	"github.com/weaviate/weaviate/adapters/repos/db/inverted"
    24  	"github.com/weaviate/weaviate/adapters/repos/db/vector/flat"
    25  	"github.com/weaviate/weaviate/adapters/repos/db/vector/hnsw"
    26  	"github.com/weaviate/weaviate/entities/errorcompounder"
    27  	"github.com/weaviate/weaviate/entities/models"
    28  	"github.com/weaviate/weaviate/entities/schema"
    29  	"github.com/weaviate/weaviate/entities/storobj"
    30  	"github.com/weaviate/weaviate/usecases/replica"
    31  	"github.com/weaviate/weaviate/usecases/schema/migrate"
    32  	"github.com/weaviate/weaviate/usecases/sharding"
    33  )
    34  
    35  type Migrator struct {
    36  	db     *DB
    37  	logger logrus.FieldLogger
    38  }
    39  
    40  func (m *Migrator) AddClass(ctx context.Context, class *models.Class,
    41  	shardState *sharding.State,
    42  ) error {
    43  	if err := replica.ValidateConfig(class, m.db.config.Replication); err != nil {
    44  		return fmt.Errorf("replication config: %w", err)
    45  	}
    46  
    47  	idx, err := NewIndex(ctx,
    48  		IndexConfig{
    49  			ClassName:                 schema.ClassName(class.Class),
    50  			RootPath:                  m.db.config.RootPath,
    51  			ResourceUsage:             m.db.config.ResourceUsage,
    52  			QueryMaximumResults:       m.db.config.QueryMaximumResults,
    53  			QueryNestedRefLimit:       m.db.config.QueryNestedRefLimit,
    54  			MemtablesFlushDirtyAfter:  m.db.config.MemtablesFlushDirtyAfter,
    55  			MemtablesInitialSizeMB:    m.db.config.MemtablesInitialSizeMB,
    56  			MemtablesMaxSizeMB:        m.db.config.MemtablesMaxSizeMB,
    57  			MemtablesMinActiveSeconds: m.db.config.MemtablesMinActiveSeconds,
    58  			MemtablesMaxActiveSeconds: m.db.config.MemtablesMaxActiveSeconds,
    59  			TrackVectorDimensions:     m.db.config.TrackVectorDimensions,
    60  			AvoidMMap:                 m.db.config.AvoidMMap,
    61  			DisableLazyLoadShards:     m.db.config.DisableLazyLoadShards,
    62  			ReplicationFactor:         class.ReplicationConfig.Factor,
    63  		},
    64  		shardState,
    65  		// no backward-compatibility check required, since newly added classes will
    66  		// always have the field set
    67  		inverted.ConfigFromModel(class.InvertedIndexConfig),
    68  		convertToVectorIndexConfig(class.VectorIndexConfig),
    69  		convertToVectorIndexConfigs(class.VectorConfig),
    70  		m.db.schemaGetter, m.db, m.logger, m.db.nodeResolver, m.db.remoteIndex,
    71  		m.db.replicaClient, m.db.promMetrics, class, m.db.jobQueueCh, m.db.indexCheckpoints)
    72  	if err != nil {
    73  		return errors.Wrap(err, "create index")
    74  	}
    75  
    76  	err = idx.addUUIDProperty(ctx)
    77  	if err != nil {
    78  		return errors.Wrapf(err, "extend idx '%s' with uuid property", idx.ID())
    79  	}
    80  
    81  	if class.InvertedIndexConfig.IndexTimestamps {
    82  		err = idx.addTimestampProperties(ctx)
    83  		if err != nil {
    84  			return errors.Wrapf(err, "extend idx '%s' with timestamp properties", idx.ID())
    85  		}
    86  	}
    87  
    88  	if m.db.config.TrackVectorDimensions {
    89  		if err := idx.addDimensionsProperty(context.TODO()); err != nil {
    90  			return errors.Wrap(err, "init id property")
    91  		}
    92  	}
    93  
    94  	m.db.indexLock.Lock()
    95  	m.db.indices[idx.ID()] = idx
    96  	idx.notifyReady()
    97  	m.db.indexLock.Unlock()
    98  
    99  	return nil
   100  }
   101  
   102  func (m *Migrator) DropClass(ctx context.Context, className string) error {
   103  	return m.db.DeleteIndex(schema.ClassName(className))
   104  }
   105  
   106  func (m *Migrator) UpdateClass(ctx context.Context, className string, newClassName *string) error {
   107  	if newClassName != nil {
   108  		return errors.New("weaviate does not support renaming of classes")
   109  	}
   110  
   111  	return nil
   112  }
   113  
   114  func (m *Migrator) AddProperty(ctx context.Context, className string, prop *models.Property) error {
   115  	idx := m.db.GetIndex(schema.ClassName(className))
   116  	if idx == nil {
   117  		return errors.Errorf("cannot add property to a non-existing index for %s", className)
   118  	}
   119  
   120  	return idx.addProperty(ctx, prop)
   121  }
   122  
   123  // DropProperty is ignored, API compliant change
   124  func (m *Migrator) DropProperty(ctx context.Context, className string, propertyName string) error {
   125  	// ignore but don't error
   126  	return nil
   127  }
   128  
   129  func (m *Migrator) UpdateProperty(ctx context.Context, className string, propName string, newName *string) error {
   130  	if newName != nil {
   131  		return errors.New("weaviate does not support renaming of properties")
   132  	}
   133  
   134  	return nil
   135  }
   136  
   137  func (m *Migrator) GetShardsQueueSize(ctx context.Context, className, tenant string) (map[string]int64, error) {
   138  	idx := m.db.GetIndex(schema.ClassName(className))
   139  	if idx == nil {
   140  		return nil, errors.Errorf("cannot get shards status for a non-existing index for %s", className)
   141  	}
   142  
   143  	return idx.getShardsQueueSize(ctx, tenant)
   144  }
   145  
   146  func (m *Migrator) GetShardsStatus(ctx context.Context, className, tenant string) (map[string]string, error) {
   147  	idx := m.db.GetIndex(schema.ClassName(className))
   148  	if idx == nil {
   149  		return nil, errors.Errorf("cannot get shards status for a non-existing index for %s", className)
   150  	}
   151  
   152  	return idx.getShardsStatus(ctx, tenant)
   153  }
   154  
   155  func (m *Migrator) UpdateShardStatus(ctx context.Context, className, shardName, targetStatus string) error {
   156  	idx := m.db.GetIndex(schema.ClassName(className))
   157  	if idx == nil {
   158  		return errors.Errorf("cannot update shard status to a non-existing index for %s", className)
   159  	}
   160  
   161  	return idx.updateShardStatus(ctx, shardName, targetStatus)
   162  }
   163  
   164  // NewTenants creates new partitions and returns a commit func
   165  // that can be used to either commit or rollback the partitions
   166  func (m *Migrator) NewTenants(ctx context.Context, class *models.Class, creates []*migrate.CreateTenantPayload) (commit func(success bool), err error) {
   167  	idx := m.db.GetIndex(schema.ClassName(class.Class))
   168  	if idx == nil {
   169  		return nil, fmt.Errorf("cannot find index for %q", class.Class)
   170  	}
   171  
   172  	shards := make(map[string]ShardLike, len(creates))
   173  	rollback := func() {
   174  		for name, shard := range shards {
   175  			if err := shard.drop(); err != nil {
   176  				m.logger.WithField("action", "drop_shard").
   177  					WithField("class", class.Class).
   178  					Errorf("cannot drop self created shard %s: %v", name, err)
   179  			}
   180  		}
   181  	}
   182  	commit = func(success bool) {
   183  		if success {
   184  			for name, shard := range shards {
   185  				idx.shards.Store(name, shard)
   186  			}
   187  			return
   188  		}
   189  		rollback()
   190  	}
   191  	defer func() {
   192  		if err != nil {
   193  			rollback()
   194  		}
   195  	}()
   196  
   197  	for _, pl := range creates {
   198  		if shard := idx.shards.Load(pl.Name); shard != nil {
   199  			continue
   200  		}
   201  		if pl.Status != models.TenantActivityStatusHOT {
   202  			continue // skip creating inactive shards
   203  		}
   204  
   205  		shard, err := idx.initShard(ctx, pl.Name, class, m.db.promMetrics)
   206  		if err != nil {
   207  			return nil, fmt.Errorf("cannot create partition %q: %w", pl, err)
   208  		}
   209  		shards[pl.Name] = shard
   210  	}
   211  
   212  	return commit, nil
   213  }
   214  
   215  // UpdateTenans activates or deactivates tenant partitions and returns a commit func
   216  // that can be used to either commit or rollback the changes
   217  func (m *Migrator) UpdateTenants(ctx context.Context, class *models.Class, updates []*migrate.UpdateTenantPayload) (commit func(success bool), err error) {
   218  	idx := m.db.GetIndex(schema.ClassName(class.Class))
   219  	if idx == nil {
   220  		return nil, fmt.Errorf("cannot find index for %q", class.Class)
   221  	}
   222  
   223  	shardsToHot := make([]string, 0, len(updates))
   224  	shardsToCold := make([]string, 0, len(updates))
   225  	shardsHotted := make(map[string]ShardLike)
   226  	shardsColded := make(map[string]ShardLike)
   227  
   228  	rollbackHotted := func() {
   229  		eg := enterrors.NewErrorGroupWrapper(m.logger)
   230  		eg.SetLimit(2 * _NUMCPU)
   231  		for name, shard := range shardsHotted {
   232  			name, shard := name, shard
   233  			eg.Go(func() error {
   234  				if err := shard.Shutdown(ctx); err != nil {
   235  					idx.logger.WithField("action", "rollback_shutdown_shard").
   236  						WithField("shard", shard.ID()).
   237  						Errorf("cannot shutdown self activated shard %q: %s", name, err)
   238  				}
   239  				return nil
   240  			}, name, shard)
   241  		}
   242  		eg.Wait()
   243  	}
   244  	rollbackColded := func() {
   245  		for name, shard := range shardsColded {
   246  			idx.shards.CompareAndSwap(name, nil, shard)
   247  		}
   248  	}
   249  	rollback := func() {
   250  		rollbackHotted()
   251  		rollbackColded()
   252  	}
   253  
   254  	commitHotted := func() {
   255  		for name, shard := range shardsHotted {
   256  			idx.shards.Store(name, shard)
   257  		}
   258  	}
   259  	commitColded := func() {
   260  		for name := range shardsColded {
   261  			idx.shards.LoadAndDelete(name)
   262  		}
   263  
   264  		eg := enterrors.NewErrorGroupWrapper(m.logger)
   265  		eg.SetLimit(_NUMCPU * 2)
   266  		for name, shard := range shardsColded {
   267  			name, shard := name, shard
   268  			eg.Go(func() error {
   269  				if err := shard.Shutdown(ctx); err != nil {
   270  					idx.logger.WithField("action", "shutdown_shard").
   271  						WithField("shard", shard.ID()).
   272  						Errorf("cannot shutdown shard %q: %s", name, err)
   273  				}
   274  				return nil
   275  			}, name, shard)
   276  		}
   277  		eg.Wait()
   278  	}
   279  	commit = func(success bool) {
   280  		if !success {
   281  			rollback()
   282  			return
   283  		}
   284  		commitHotted()
   285  		commitColded()
   286  	}
   287  
   288  	applyHot := func() error {
   289  		for _, name := range shardsToHot {
   290  			// shard already hot
   291  			if shard := idx.shards.Load(name); shard != nil {
   292  				continue
   293  			}
   294  
   295  			shard, err := idx.initShard(ctx, name, class, m.db.promMetrics)
   296  			if err != nil {
   297  				return fmt.Errorf("cannot activate shard '%s': %w", name, err)
   298  			}
   299  			shardsHotted[name] = shard
   300  		}
   301  		return nil
   302  	}
   303  	applyCold := func() error {
   304  		idx.backupMutex.RLock()
   305  		defer idx.backupMutex.RUnlock()
   306  
   307  		for _, name := range shardsToCold {
   308  			shard, ok := idx.shards.Swap(name, nil) // mark as deactivated
   309  			if !ok {                                // shard doesn't exit (already cold)
   310  				idx.shards.LoadAndDelete(name) // rollback nil value created by swap()
   311  				continue
   312  			}
   313  			if shard != nil {
   314  				shardsColded[name] = shard
   315  			}
   316  		}
   317  		return nil
   318  	}
   319  
   320  	for _, tu := range updates {
   321  		switch tu.Status {
   322  		case models.TenantActivityStatusHOT:
   323  			shardsToHot = append(shardsToHot, tu.Name)
   324  		case models.TenantActivityStatusCOLD:
   325  			shardsToCold = append(shardsToCold, tu.Name)
   326  		}
   327  	}
   328  
   329  	defer func() {
   330  		if err != nil {
   331  			rollback()
   332  		}
   333  	}()
   334  
   335  	if err := applyHot(); err != nil {
   336  		return nil, err
   337  	}
   338  	if err := applyCold(); err != nil {
   339  		return nil, err
   340  	}
   341  
   342  	return commit, nil
   343  }
   344  
   345  // DeleteTenants deletes tenants and returns a commit func
   346  // that can be used to either commit or rollback deletion
   347  func (m *Migrator) DeleteTenants(ctx context.Context, class *models.Class, tenants []string) (commit func(success bool), err error) {
   348  	idx := m.db.GetIndex(schema.ClassName(class.Class))
   349  	if idx == nil {
   350  		return func(bool) {}, nil
   351  	}
   352  	return idx.dropShards(tenants)
   353  }
   354  
   355  func NewMigrator(db *DB, logger logrus.FieldLogger) *Migrator {
   356  	return &Migrator{db: db, logger: logger}
   357  }
   358  
   359  func (m *Migrator) UpdateVectorIndexConfig(ctx context.Context,
   360  	className string, updated schema.VectorIndexConfig,
   361  ) error {
   362  	idx := m.db.GetIndex(schema.ClassName(className))
   363  	if idx == nil {
   364  		return errors.Errorf("cannot update vector index config of non-existing index for %s", className)
   365  	}
   366  
   367  	return idx.updateVectorIndexConfig(ctx, updated)
   368  }
   369  
   370  func (m *Migrator) UpdateVectorIndexConfigs(ctx context.Context,
   371  	className string, updated map[string]schema.VectorIndexConfig,
   372  ) error {
   373  	idx := m.db.GetIndex(schema.ClassName(className))
   374  	if idx == nil {
   375  		return errors.Errorf("cannot update vector config of non-existing index for %s", className)
   376  	}
   377  
   378  	return idx.updateVectorIndexConfigs(ctx, updated)
   379  }
   380  
   381  func (m *Migrator) ValidateVectorIndexConfigUpdate(ctx context.Context,
   382  	old, updated schema.VectorIndexConfig,
   383  ) error {
   384  	// hnsw is the only supported vector index type at the moment, so no need
   385  	// to check, we can always use that an hnsw-specific validation should be
   386  	// used for now.
   387  	switch old.IndexType() {
   388  	case "hnsw":
   389  		return hnsw.ValidateUserConfigUpdate(old, updated)
   390  	case "flat":
   391  		return flat.ValidateUserConfigUpdate(old, updated)
   392  	}
   393  	return fmt.Errorf("Invalid index type: %s", old.IndexType())
   394  }
   395  
   396  func (m *Migrator) ValidateVectorIndexConfigsUpdate(ctx context.Context,
   397  	old, updated map[string]schema.VectorIndexConfig,
   398  ) error {
   399  	for vecName := range old {
   400  		if err := m.ValidateVectorIndexConfigUpdate(ctx, old[vecName], updated[vecName]); err != nil {
   401  			return fmt.Errorf("vector %q", vecName)
   402  		}
   403  	}
   404  	return nil
   405  }
   406  
   407  func (m *Migrator) ValidateInvertedIndexConfigUpdate(ctx context.Context,
   408  	old, updated *models.InvertedIndexConfig,
   409  ) error {
   410  	return inverted.ValidateUserConfigUpdate(old, updated)
   411  }
   412  
   413  func (m *Migrator) UpdateInvertedIndexConfig(ctx context.Context, className string,
   414  	updated *models.InvertedIndexConfig,
   415  ) error {
   416  	idx := m.db.GetIndex(schema.ClassName(className))
   417  	if idx == nil {
   418  		return errors.Errorf("cannot update inverted index config of non-existing index for %s", className)
   419  	}
   420  
   421  	conf := inverted.ConfigFromModel(updated)
   422  
   423  	return idx.updateInvertedIndexConfig(ctx, conf)
   424  }
   425  
   426  func (m *Migrator) RecalculateVectorDimensions(ctx context.Context) error {
   427  	count := 0
   428  	m.logger.
   429  		WithField("action", "reindex").
   430  		Info("Reindexing dimensions, this may take a while")
   431  
   432  	// Iterate over all indexes
   433  	for _, index := range m.db.indices {
   434  		// Iterate over all shards
   435  		if err := index.IterateObjects(ctx, func(index *Index, shard ShardLike, object *storobj.Object) error {
   436  			count = count + 1
   437  			if shard.hasTargetVectors() {
   438  				for vecName, vec := range object.Vectors {
   439  					if err := shard.extendDimensionTrackerForVecLSM(len(vec), object.DocID, vecName); err != nil {
   440  						return err
   441  					}
   442  				}
   443  			} else {
   444  				if err := shard.extendDimensionTrackerLSM(len(object.Vector), object.DocID); err != nil {
   445  					return err
   446  				}
   447  			}
   448  			return nil
   449  		}); err != nil {
   450  			return err
   451  		}
   452  	}
   453  	f := func() {
   454  		for {
   455  			m.logger.
   456  				WithField("action", "reindex").
   457  				Warnf("Reindexed %v objects. Reindexing dimensions complete. Please remove environment variable REINDEX_VECTOR_DIMENSIONS_AT_STARTUP before next startup", count)
   458  			time.Sleep(5 * time.Minute)
   459  		}
   460  	}
   461  	enterrors.GoWrapper(f, m.logger)
   462  
   463  	return nil
   464  }
   465  
   466  func (m *Migrator) RecountProperties(ctx context.Context) error {
   467  	count := 0
   468  	m.logger.
   469  		WithField("action", "recount").
   470  		Info("Recounting properties, this may take a while")
   471  
   472  	m.db.indexLock.Lock()
   473  	defer m.db.indexLock.Unlock()
   474  	// Iterate over all indexes
   475  	for _, index := range m.db.indices {
   476  
   477  		// Clear the shards before counting
   478  		index.IterateShards(ctx, func(index *Index, shard ShardLike) error {
   479  			shard.GetPropertyLengthTracker().Clear()
   480  			return nil
   481  		})
   482  
   483  		// Iterate over all shards
   484  		index.IterateObjects(ctx, func(index *Index, shard ShardLike, object *storobj.Object) error {
   485  			count = count + 1
   486  			props, _, err := shard.AnalyzeObject(object)
   487  			if err != nil {
   488  				m.logger.WithField("error", err).Error("could not analyze object")
   489  				return nil
   490  			}
   491  
   492  			if err := shard.SetPropertyLengths(props); err != nil {
   493  				m.logger.WithField("error", err).Error("could not add prop lengths")
   494  				return nil
   495  			}
   496  
   497  			shard.GetPropertyLengthTracker().Flush(false)
   498  
   499  			return nil
   500  		})
   501  
   502  		// Flush the GetPropertyLengthTracker() to disk
   503  		err := index.IterateShards(ctx, func(index *Index, shard ShardLike) error {
   504  			return shard.GetPropertyLengthTracker().Flush(false)
   505  		})
   506  		if err != nil {
   507  			m.logger.WithField("error", err).Error("could not flush prop lengths")
   508  		}
   509  
   510  	}
   511  	f := func() {
   512  		for {
   513  			m.logger.
   514  				WithField("action", "recount").
   515  				Warnf("Recounted %v objects. Recounting properties complete. Please remove environment variable 	RECOUNT_PROPERTIES_AT_STARTUP before next startup", count)
   516  			time.Sleep(5 * time.Minute)
   517  		}
   518  	}
   519  	enterrors.GoWrapper(f, m.logger)
   520  
   521  	return nil
   522  }
   523  
   524  func (m *Migrator) InvertedReindex(ctx context.Context, taskNames ...string) error {
   525  	var errs errorcompounder.ErrorCompounder
   526  	errs.Add(m.doInvertedReindex(ctx, taskNames...))
   527  	errs.Add(m.doInvertedIndexMissingTextFilterable(ctx, taskNames...))
   528  	return errs.ToError()
   529  }
   530  
   531  func (m *Migrator) doInvertedReindex(ctx context.Context, taskNames ...string) error {
   532  	tasksProviders := map[string]func() ShardInvertedReindexTask{
   533  		"ShardInvertedReindexTaskSetToRoaringSet": func() ShardInvertedReindexTask {
   534  			return &ShardInvertedReindexTaskSetToRoaringSet{}
   535  		},
   536  	}
   537  
   538  	tasks := map[string]ShardInvertedReindexTask{}
   539  	for _, taskName := range taskNames {
   540  		if taskProvider, ok := tasksProviders[taskName]; ok {
   541  			tasks[taskName] = taskProvider()
   542  		}
   543  	}
   544  
   545  	if len(tasks) == 0 {
   546  		return nil
   547  	}
   548  
   549  	eg := enterrors.NewErrorGroupWrapper(m.logger)
   550  	eg.SetLimit(_NUMCPU)
   551  	for _, index := range m.db.indices {
   552  		index.ForEachShard(func(name string, shard ShardLike) error {
   553  			eg.Go(func() error {
   554  				reindexer := NewShardInvertedReindexer(shard, m.logger)
   555  				for taskName, task := range tasks {
   556  					reindexer.AddTask(task)
   557  					m.logInvertedReindexShard(shard).
   558  						WithField("task", taskName).
   559  						Info("About to start inverted reindexing, this may take a while")
   560  				}
   561  				if err := reindexer.Do(ctx); err != nil {
   562  					m.logInvertedReindexShard(shard).
   563  						WithError(err).
   564  						Error("failed reindexing")
   565  					return errors.Wrapf(err, "failed reindexing shard '%s'", shard.ID())
   566  				}
   567  				m.logInvertedReindexShard(shard).
   568  					Info("Finished inverted reindexing")
   569  				return nil
   570  			}, name)
   571  			return nil
   572  		})
   573  	}
   574  	return eg.Wait()
   575  }
   576  
   577  func (m *Migrator) doInvertedIndexMissingTextFilterable(ctx context.Context, taskNames ...string) error {
   578  	taskName := "ShardInvertedReindexTaskMissingTextFilterable"
   579  	taskFound := false
   580  	for _, name := range taskNames {
   581  		if name == taskName {
   582  			taskFound = true
   583  			break
   584  		}
   585  	}
   586  	if !taskFound {
   587  		return nil
   588  	}
   589  
   590  	task := newShardInvertedReindexTaskMissingTextFilterable(m)
   591  	if err := task.init(); err != nil {
   592  		m.logMissingFilterable().WithError(err).Error("failed init missing text filterable task")
   593  		return errors.Wrap(err, "failed init missing text filterable task")
   594  	}
   595  
   596  	if len(task.migrationState.MissingFilterableClass2Props) == 0 {
   597  		m.logMissingFilterable().Info("no classes to create filterable index, skipping")
   598  		return nil
   599  	}
   600  
   601  	m.logMissingFilterable().Info("staring missing text filterable task")
   602  
   603  	eg := enterrors.NewErrorGroupWrapper(m.logger)
   604  	eg.SetLimit(_NUMCPU * 2)
   605  	for _, index := range m.db.indices {
   606  		index := index
   607  		className := index.Config.ClassName.String()
   608  
   609  		if _, ok := task.migrationState.MissingFilterableClass2Props[className]; !ok {
   610  			continue
   611  		}
   612  
   613  		eg.Go(func() error {
   614  			errgrpShards := enterrors.NewErrorGroupWrapper(m.logger)
   615  			index.ForEachShard(func(_ string, shard ShardLike) error {
   616  				errgrpShards.Go(func() error {
   617  					m.logMissingFilterableShard(shard).
   618  						Info("starting filterable indexing on shard, this may take a while")
   619  
   620  					reindexer := NewShardInvertedReindexer(shard, m.logger)
   621  					reindexer.AddTask(task)
   622  
   623  					if err := reindexer.Do(ctx); err != nil {
   624  						m.logMissingFilterableShard(shard).
   625  							WithError(err).
   626  							Error("failed filterable indexing on shard")
   627  						return errors.Wrapf(err, "failed filterable indexing for shard '%s' of index '%s'",
   628  							shard.ID(), index.ID())
   629  					}
   630  					m.logMissingFilterableShard(shard).
   631  						Info("finished filterable indexing on shard")
   632  					return nil
   633  				}, shard.ID())
   634  				return nil
   635  			})
   636  
   637  			if err := errgrpShards.Wait(); err != nil {
   638  				m.logMissingFilterableIndex(index).
   639  					WithError(err).
   640  					Error("failed filterable indexing on index")
   641  				return errors.Wrapf(err, "failed filterable indexing of index '%s'", index.ID())
   642  			}
   643  
   644  			if err := task.updateMigrationStateAndSave(className); err != nil {
   645  				m.logMissingFilterableIndex(index).
   646  					WithError(err).
   647  					Error("failed updating migration state file")
   648  				return errors.Wrapf(err, "failed updating migration state file for class '%s'", className)
   649  			}
   650  
   651  			m.logMissingFilterableIndex(index).
   652  				Info("finished filterable indexing on index")
   653  
   654  			return nil
   655  		}, index.ID())
   656  	}
   657  
   658  	if err := eg.Wait(); err != nil {
   659  		m.logMissingFilterable().
   660  			WithError(err).
   661  			Error("failed missing text filterable task")
   662  		return errors.Wrap(err, "failed missing text filterable task")
   663  	}
   664  
   665  	m.logMissingFilterable().Info("finished missing text filterable task")
   666  	return nil
   667  }
   668  
   669  func (m *Migrator) logInvertedReindex() *logrus.Entry {
   670  	return m.logger.WithField("action", "inverted_reindex")
   671  }
   672  
   673  func (m *Migrator) logInvertedReindexShard(shard ShardLike) *logrus.Entry {
   674  	return m.logInvertedReindex().
   675  		WithField("index", shard.Index().ID()).
   676  		WithField("shard", shard.ID())
   677  }
   678  
   679  func (m *Migrator) logMissingFilterable() *logrus.Entry {
   680  	return m.logger.WithField("action", "ii_missing_text_filterable")
   681  }
   682  
   683  func (m *Migrator) logMissingFilterableIndex(index *Index) *logrus.Entry {
   684  	return m.logMissingFilterable().WithField("index", index.ID())
   685  }
   686  
   687  func (m *Migrator) logMissingFilterableShard(shard ShardLike) *logrus.Entry {
   688  	return m.logMissingFilterableIndex(shard.Index()).WithField("shard", shard.ID())
   689  }
   690  
   691  // As of v1.19 property's IndexInverted setting is replaced with IndexFilterable
   692  // and IndexSearchable
   693  // Filterable buckets use roaring set strategy and searchable ones use map strategy
   694  // (therefore are applicable just for text/text[])
   695  // Since both type of buckets can coexist for text/text[] props they need to be
   696  // distinguished by their name: searchable bucket has "searchable" suffix.
   697  // Up until v1.19 default text/text[]/string/string[] (string/string[] deprecated since v1.19)
   698  // strategy for buckets was map, migrating from pre v1.19 to v1.19 needs to properly
   699  // handle existing text/text[] buckets of map strategy having filterable bucket name.
   700  //
   701  // Enabled InvertedIndex translates in v1.19 to both InvertedFilterable and InvertedSearchable
   702  // enabled, but since only searchable bucket exist (with filterable name), it has to be renamed
   703  // to searchable bucket.
   704  // Though IndexFilterable setting is enabled filterable index does not exists,
   705  // therefore shards are switched into fallback mode, to use searchable buckets instead of
   706  // filterable ones whenever filtered are expected.
   707  // Fallback mode effectively sets IndexFilterable to false, although it stays enabled according
   708  // to schema.
   709  //
   710  // If filterable indexes will be created (that is up to user to decide whether missing indexes
   711  // should be created later on), shards will not be working in fallback mode, and actual filterable index
   712  // will be used when needed.
   713  func (m *Migrator) AdjustFilterablePropSettings(ctx context.Context) error {
   714  	f2sm := newFilterableToSearchableMigrator(m)
   715  	if err := f2sm.migrate(ctx); err != nil {
   716  		return err
   717  	}
   718  	return f2sm.switchShardsToFallbackMode(ctx)
   719  }