github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/table.go (about)

     1  // Copyright 2015 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package sql
    12  
    13  import (
    14  	"context"
    15  	"fmt"
    16  	"strings"
    17  
    18  	"github.com/cockroachdb/cockroach/pkg/clusterversion"
    19  	"github.com/cockroachdb/cockroach/pkg/jobs"
    20  	"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
    21  	"github.com/cockroachdb/cockroach/pkg/kv"
    22  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    23  	"github.com/cockroachdb/cockroach/pkg/server/telemetry"
    24  	"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkv"
    25  	"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
    26  	"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
    27  	"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
    28  	"github.com/cockroachdb/cockroach/pkg/util/log"
    29  	"github.com/cockroachdb/errors"
    30  )
    31  
    32  func (p *planner) getVirtualTabler() VirtualTabler {
    33  	return p.extendedEvalCtx.VirtualSchemas
    34  }
    35  
    36  // createDropDatabaseJob queues a job for dropping a database.
    37  func (p *planner) createDropDatabaseJob(
    38  	ctx context.Context,
    39  	databaseID sqlbase.ID,
    40  	droppedDetails []jobspb.DroppedTableDetails,
    41  	jobDesc string,
    42  ) error {
    43  	if !p.ExecCfg().Settings.Version.IsActive(ctx, clusterversion.VersionSchemaChangeJob) {
    44  		if descs.MigrationSchemaChangeRequiredFromContext(ctx) {
    45  			return descs.ErrSchemaChangeDisallowedInMixedState
    46  		}
    47  	}
    48  	// TODO (lucy): This should probably be deleting the queued jobs for all the
    49  	// tables being dropped, so that we don't have duplicate schema changers.
    50  	descriptorIDs := make([]sqlbase.ID, 0, len(droppedDetails))
    51  	for _, d := range droppedDetails {
    52  		descriptorIDs = append(descriptorIDs, d.ID)
    53  	}
    54  	jobRecord := jobs.Record{
    55  		Description:   jobDesc,
    56  		Username:      p.User(),
    57  		DescriptorIDs: descriptorIDs,
    58  		Details: jobspb.SchemaChangeDetails{
    59  			DroppedTables:     droppedDetails,
    60  			DroppedDatabaseID: databaseID,
    61  			FormatVersion:     jobspb.JobResumerFormatVersion,
    62  		},
    63  		Progress: jobspb.SchemaChangeProgress{},
    64  	}
    65  	_, err := p.extendedEvalCtx.QueueJob(jobRecord)
    66  	return err
    67  }
    68  
    69  // createOrUpdateSchemaChangeJob queues a new job for the schema change if there
    70  // is no existing schema change job for the table, or updates the existing job
    71  // if there is one.
    72  func (p *planner) createOrUpdateSchemaChangeJob(
    73  	ctx context.Context,
    74  	tableDesc *sqlbase.MutableTableDescriptor,
    75  	jobDesc string,
    76  	mutationID sqlbase.MutationID,
    77  ) error {
    78  	if !p.ExecCfg().Settings.Version.IsActive(ctx, clusterversion.VersionSchemaChangeJob) {
    79  		if descs.MigrationSchemaChangeRequiredFromContext(ctx) {
    80  			return descs.ErrSchemaChangeDisallowedInMixedState
    81  		}
    82  	}
    83  	var job *jobs.Job
    84  	// Iterate through the queued jobs to find an existing schema change job for
    85  	// this table, if it exists.
    86  	// TODO (lucy): Looking up each job to determine this is not ideal. Maybe
    87  	// we need some additional state in extraTxnState to help with lookups.
    88  	for _, jobID := range *p.extendedEvalCtx.Jobs {
    89  		var err error
    90  		j, err := p.ExecCfg().JobRegistry.LoadJobWithTxn(ctx, jobID, p.txn)
    91  		if err != nil {
    92  			return err
    93  		}
    94  		schemaDetails, ok := j.Details().(jobspb.SchemaChangeDetails)
    95  		if !ok {
    96  			continue
    97  		}
    98  		if schemaDetails.TableID == tableDesc.ID {
    99  			job = j
   100  			break
   101  		}
   102  	}
   103  
   104  	var spanList []jobspb.ResumeSpanList
   105  	jobExists := job != nil
   106  	if jobExists {
   107  		spanList = job.Details().(jobspb.SchemaChangeDetails).ResumeSpanList
   108  	}
   109  	span := tableDesc.PrimaryIndexSpan(p.ExecCfg().Codec)
   110  	for i := len(tableDesc.ClusterVersion.Mutations) + len(spanList); i < len(tableDesc.Mutations); i++ {
   111  		spanList = append(spanList,
   112  			jobspb.ResumeSpanList{
   113  				ResumeSpans: []roachpb.Span{span},
   114  			},
   115  		)
   116  	}
   117  
   118  	if !jobExists {
   119  		// Queue a new job.
   120  		jobRecord := jobs.Record{
   121  			Description:   jobDesc,
   122  			Username:      p.User(),
   123  			DescriptorIDs: sqlbase.IDs{tableDesc.GetID()},
   124  			Details: jobspb.SchemaChangeDetails{
   125  				TableID:        tableDesc.ID,
   126  				MutationID:     mutationID,
   127  				ResumeSpanList: spanList,
   128  				FormatVersion:  jobspb.JobResumerFormatVersion,
   129  			},
   130  			Progress: jobspb.SchemaChangeProgress{},
   131  		}
   132  		newJob, err := p.extendedEvalCtx.QueueJob(jobRecord)
   133  		if err != nil {
   134  			return err
   135  		}
   136  		// Only add a MutationJob if there's an associated mutation.
   137  		// TODO (lucy): get rid of this when we get rid of MutationJobs.
   138  		if mutationID != sqlbase.InvalidMutationID {
   139  			tableDesc.MutationJobs = append(tableDesc.MutationJobs, sqlbase.TableDescriptor_MutationJob{
   140  				MutationID: mutationID, JobID: *newJob.ID()})
   141  		}
   142  		log.Infof(ctx, "queued new schema change job %d for table %d, mutation %d",
   143  			*newJob.ID(), tableDesc.ID, mutationID)
   144  	} else {
   145  		// Update the existing job.
   146  		oldDetails := job.Details().(jobspb.SchemaChangeDetails)
   147  		newDetails := jobspb.SchemaChangeDetails{
   148  			TableID:        tableDesc.ID,
   149  			MutationID:     oldDetails.MutationID,
   150  			ResumeSpanList: spanList,
   151  			FormatVersion:  jobspb.JobResumerFormatVersion,
   152  		}
   153  		if oldDetails.MutationID != sqlbase.InvalidMutationID {
   154  			// The previous queued schema change job was associated with a mutation,
   155  			// which must have the same mutation ID as this schema change, so just
   156  			// check for consistency.
   157  			if mutationID != sqlbase.InvalidMutationID && mutationID != oldDetails.MutationID {
   158  				return errors.AssertionFailedf(
   159  					"attempted to update job for mutation %d, but job already exists with mutation %d",
   160  					mutationID, oldDetails.MutationID)
   161  			}
   162  		} else {
   163  			// The previous queued schema change job didn't have a mutation.
   164  			if mutationID != sqlbase.InvalidMutationID {
   165  				newDetails.MutationID = mutationID
   166  				// Also add a MutationJob on the table descriptor.
   167  				// TODO (lucy): get rid of this when we get rid of MutationJobs.
   168  				tableDesc.MutationJobs = append(tableDesc.MutationJobs, sqlbase.TableDescriptor_MutationJob{
   169  					MutationID: mutationID, JobID: *job.ID()})
   170  			}
   171  		}
   172  		if err := job.WithTxn(p.txn).SetDetails(ctx, newDetails); err != nil {
   173  			return err
   174  		}
   175  		if jobDesc != "" {
   176  			if err := job.WithTxn(p.txn).SetDescription(
   177  				ctx,
   178  				func(ctx context.Context, description string) (string, error) {
   179  					return strings.Join([]string{description, jobDesc}, ";"), nil
   180  				},
   181  			); err != nil {
   182  				return err
   183  			}
   184  		}
   185  		log.Infof(ctx, "job %d: updated with schema change for table %d, mutation %d",
   186  			*job.ID(), tableDesc.ID, mutationID)
   187  	}
   188  	return nil
   189  }
   190  
   191  // writeSchemaChange effectively writes a table descriptor to the
   192  // database within the current planner transaction, and queues up
   193  // a schema changer for future processing.
   194  // TODO (lucy): The way job descriptions are handled needs improvement.
   195  // Currently, whenever we update a job, the provided job description string, if
   196  // non-empty, is appended to the end of the existing description, regardless of
   197  // whether the particular schema change written in this method call came from a
   198  // separate statement in the same transaction, or from updating a dependent
   199  // table descriptor during a schema change to another table, or from a step in a
   200  // larger schema change to the same table.
   201  func (p *planner) writeSchemaChange(
   202  	ctx context.Context,
   203  	tableDesc *sqlbase.MutableTableDescriptor,
   204  	mutationID sqlbase.MutationID,
   205  	jobDesc string,
   206  ) error {
   207  	if !p.EvalContext().TxnImplicit {
   208  		telemetry.Inc(sqltelemetry.SchemaChangeInExplicitTxnCounter)
   209  	}
   210  	if tableDesc.Dropped() {
   211  		// We don't allow schema changes on a dropped table.
   212  		return fmt.Errorf("table %q is being dropped", tableDesc.Name)
   213  	}
   214  	if err := p.createOrUpdateSchemaChangeJob(ctx, tableDesc, jobDesc, mutationID); err != nil {
   215  		return err
   216  	}
   217  	return p.writeTableDesc(ctx, tableDesc)
   218  }
   219  
   220  func (p *planner) writeSchemaChangeToBatch(
   221  	ctx context.Context, tableDesc *sqlbase.MutableTableDescriptor, b *kv.Batch,
   222  ) error {
   223  	if !p.EvalContext().TxnImplicit {
   224  		telemetry.Inc(sqltelemetry.SchemaChangeInExplicitTxnCounter)
   225  	}
   226  	if tableDesc.Dropped() {
   227  		// We don't allow schema changes on a dropped table.
   228  		return fmt.Errorf("table %q is being dropped", tableDesc.Name)
   229  	}
   230  	return p.writeTableDescToBatch(ctx, tableDesc, b)
   231  }
   232  
   233  func (p *planner) writeDropTable(
   234  	ctx context.Context, tableDesc *sqlbase.MutableTableDescriptor, queueJob bool, jobDesc string,
   235  ) error {
   236  	if queueJob {
   237  		if err := p.createOrUpdateSchemaChangeJob(ctx, tableDesc, jobDesc, sqlbase.InvalidMutationID); err != nil {
   238  			return err
   239  		}
   240  	}
   241  	return p.writeTableDesc(ctx, tableDesc)
   242  }
   243  
   244  func (p *planner) writeTableDesc(
   245  	ctx context.Context, tableDesc *sqlbase.MutableTableDescriptor,
   246  ) error {
   247  	b := p.txn.NewBatch()
   248  	if err := p.writeTableDescToBatch(ctx, tableDesc, b); err != nil {
   249  		return err
   250  	}
   251  	return p.txn.Run(ctx, b)
   252  }
   253  
   254  func (p *planner) writeTableDescToBatch(
   255  	ctx context.Context, tableDesc *sqlbase.MutableTableDescriptor, b *kv.Batch,
   256  ) error {
   257  	if tableDesc.IsVirtualTable() {
   258  		return errors.AssertionFailedf("virtual descriptors cannot be stored, found: %v", tableDesc)
   259  	}
   260  
   261  	if tableDesc.IsNewTable() {
   262  		if err := runSchemaChangesInTxn(
   263  			ctx, p, tableDesc, p.ExtendedEvalContext().Tracing.KVTracingEnabled(),
   264  		); err != nil {
   265  			return err
   266  		}
   267  	} else {
   268  		// Only increment the table descriptor version once in this transaction.
   269  		if err := tableDesc.MaybeIncrementVersion(ctx, p.txn, p.execCfg.Settings); err != nil {
   270  			return err
   271  		}
   272  	}
   273  
   274  	if err := tableDesc.ValidateTable(); err != nil {
   275  		return errors.AssertionFailedf("table descriptor is not valid: %s\n%v", err, tableDesc)
   276  	}
   277  
   278  	if err := p.Tables().AddUncommittedTable(*tableDesc); err != nil {
   279  		return err
   280  	}
   281  
   282  	return catalogkv.WriteDescToBatch(
   283  		ctx,
   284  		p.extendedEvalCtx.Tracing.KVTracingEnabled(),
   285  		p.ExecCfg().Settings,
   286  		b,
   287  		p.ExecCfg().Codec,
   288  		tableDesc.GetID(),
   289  		tableDesc.TableDesc(),
   290  	)
   291  }