go.ligato.io/vpp-agent/v3@v3.5.0/plugins/kvscheduler/txn_process.go (about)

     1  // Copyright (c) 2018 Cisco and/or its affiliates.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at:
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package kvscheduler
    16  
    17  import (
    18  	"context"
    19  	"runtime/trace"
    20  	"time"
    21  
    22  	"google.golang.org/protobuf/proto"
    23  
    24  	"go.ligato.io/cn-infra/v2/logging"
    25  
    26  	kvs "go.ligato.io/vpp-agent/v3/plugins/kvscheduler/api"
    27  	"go.ligato.io/vpp-agent/v3/plugins/kvscheduler/internal/graph"
    28  	"go.ligato.io/vpp-agent/v3/plugins/kvscheduler/internal/utils"
    29  	"go.ligato.io/vpp-agent/v3/proto/ligato/kvscheduler"
    30  )
    31  
    32  // transaction represents kscheduler transaction that is being queued/processed.
    33  // Once finalized, it is recorded as instance of RecordedTxn and these data
    34  // are thrown away.
    35  type transaction struct {
    36  	ctx     context.Context
    37  	seqNum  uint64
    38  	txnType kvs.TxnType
    39  	values  []kvForTxn
    40  	nb      *nbTxn    // defined for NB transactions
    41  	retry   *retryTxn // defined for retry of failed operations
    42  	created time.Time
    43  }
    44  
    45  // kvForTxn represents a new value for a given key to be applied in a transaction.
    46  type kvForTxn struct {
    47  	key      string
    48  	value    proto.Message
    49  	metadata kvs.Metadata
    50  	origin   kvs.ValueOrigin
    51  	isRevert bool
    52  }
    53  
    54  // nbTxn encapsulates data for NB transaction.
    55  type nbTxn struct {
    56  	resyncType     kvs.ResyncType
    57  	verboseRefresh bool
    58  	isBlocking     bool
    59  
    60  	retryEnabled bool
    61  	retryArgs    *kvs.RetryOpt
    62  
    63  	revertOnFailure bool
    64  	withSimulation  bool
    65  	description     string
    66  	resultChan      chan txnResult
    67  }
    68  
    69  // retryTxn encapsulates data for retry of failed operations.
    70  type retryTxn struct {
    71  	retryTxnMeta
    72  	keys map[string]uint64 // key -> value revision (last update) when the retry was enqueued
    73  }
    74  
    75  // retryTxnMeta contains metadata for Retry transaction.
    76  type retryTxnMeta struct {
    77  	txnSeqNum uint64
    78  	delay     time.Duration
    79  	attempt   int
    80  }
    81  
    82  // txnResult represents transaction result.
    83  type txnResult struct {
    84  	err       error
    85  	txnSeqNum uint64
    86  }
    87  
    88  // consumeTransactions pulls the oldest queued transaction and starts the processing.
    89  func (s *Scheduler) consumeTransactions() {
    90  	defer s.wg.Done()
    91  	for {
    92  		txn, canceled := s.dequeueTxn()
    93  		if canceled {
    94  			return
    95  		}
    96  		reportQueueWait(txn.txnType, time.Since(txn.created).Seconds())
    97  		s.processTransaction(txn)
    98  		reportTxnProcessed(txn.txnType, time.Since(txn.created).Seconds())
    99  	}
   100  }
   101  
   102  // processTransaction processes transaction in 6 steps:
   103  //	1. Pre-processing: transaction parameters are initialized, retry operations
   104  //     are filtered from the obsolete ones and for the resync the graph is refreshed
   105  //  2. Ordering: pre-order operations using a heuristic to get the shortest graph
   106  //     walk in average
   107  //  3. Simulation: simulating transaction without actually executing any of the
   108  //     Create/Delete/Update operations in order to obtain the "execution plan"
   109  //  4. Pre-recording: logging transaction arguments + plan before execution to
   110  //     persist some information in case there is a crash during execution
   111  //  5. Execution: executing the transaction, collecting errors
   112  //  6. Recording: recording the finalized transaction (log + in-memory)
   113  //  7. Post-processing: scheduling retry for failed operations, propagating value
   114  //     state updates to the subscribers and returning error/nil to the caller
   115  //     of blocking commit
   116  //  8. Update of transaction statistics
   117  func (s *Scheduler) processTransaction(txn *transaction) {
   118  	s.txnLock.Lock()
   119  	defer s.txnLock.Unlock()
   120  	defer trackTransactionMethod("processTransaction")()
   121  
   122  	startTime := time.Now()
   123  
   124  	// 1. Pre-processing:
   125  	skipExec, skipSimulation, record := s.preProcessTransaction(txn)
   126  
   127  	// 2. Ordering:
   128  	if !skipExec {
   129  		txn.values = s.orderValuesByOp(txn.values)
   130  	}
   131  
   132  	// 3. Simulation:
   133  	var simulatedOps kvs.RecordedTxnOps
   134  	if !skipSimulation {
   135  		graphW := s.graph.Write(false, record)
   136  		simulatedOps = s.executeTransaction(txn, graphW, true)
   137  		if len(simulatedOps) == 0 {
   138  			// nothing to execute
   139  			graphW.Save()
   140  			skipExec = true
   141  		}
   142  		graphW.Release()
   143  	}
   144  
   145  	// 4. Pre-recording
   146  	preTxnRecord := s.preRecordTransaction(txn, simulatedOps, skipSimulation)
   147  
   148  	// 5. Execution:
   149  	var executedOps kvs.RecordedTxnOps
   150  	if !skipExec {
   151  		graphW := s.graph.Write(true, record)
   152  		executedOps = s.executeTransaction(txn, graphW, false)
   153  		graphW.Release()
   154  	}
   155  
   156  	stopTime := time.Now()
   157  
   158  	// 6. Recording:
   159  	s.recordTransaction(txn, preTxnRecord, executedOps, startTime, stopTime)
   160  
   161  	// 7. Post-processing:
   162  	s.postProcessTransaction(txn, executedOps)
   163  
   164  	// 8. Statistics:
   165  	updateTransactionStats(executedOps)
   166  }
   167  
   168  // preProcessTransaction initializes transaction parameters, filters obsolete retry
   169  // operations and refreshes the graph for resync.
   170  func (s *Scheduler) preProcessTransaction(txn *transaction) (skipExec, skipSimulation, record bool) {
   171  	defer trace.StartRegion(txn.ctx, "preProcessTransaction").End()
   172  	defer trackTransactionMethod("preProcessTransaction")()
   173  
   174  	// allocate new transaction sequence number
   175  	txn.seqNum = s.txnSeqNumber
   176  	s.txnSeqNumber++
   177  
   178  	switch txn.txnType {
   179  	case kvs.SBNotification:
   180  		skipExec = s.preProcessNotification(txn)
   181  		skipSimulation = !s.config.EnableTxnSimulation
   182  		record = true
   183  	case kvs.NBTransaction:
   184  		skipExec = s.preProcessNBTransaction(txn)
   185  		skipSimulation = skipExec || !txn.nb.withSimulation
   186  		record = txn.nb.resyncType != kvs.DownstreamResync
   187  	case kvs.RetryFailedOps:
   188  		skipExec = s.preProcessRetryTxn(txn)
   189  		skipSimulation = skipExec
   190  		record = true
   191  	}
   192  
   193  	return
   194  }
   195  
   196  // preProcessNotification filters out non-valid SB notification.
   197  func (s *Scheduler) preProcessNotification(txn *transaction) (skip bool) {
   198  	graphR := s.graph.Read()
   199  	defer graphR.Release()
   200  
   201  	kv := txn.values[0]
   202  	skip = s.filterNotification(graphR, kv.key, kv.value, txn.seqNum)
   203  	return
   204  }
   205  
   206  // preProcessNBTransaction refreshes the graph for resync.
   207  func (s *Scheduler) preProcessNBTransaction(txn *transaction) (skip bool) {
   208  	if txn.nb.resyncType == kvs.NotResync {
   209  		// nothing to do in the pre-processing stage
   210  		return false
   211  	}
   212  
   213  	// for resync refresh the graph + collect deletes
   214  	graphW := s.graph.Write(true, false)
   215  	defer graphW.Release()
   216  	s.resyncCount++
   217  
   218  	if txn.nb.resyncType == kvs.DownstreamResync {
   219  		// for downstream resync it is assumed that scheduler is in-sync with NB
   220  		currentNodes := graphW.GetNodes(nil, nbBaseValsSelectors()...)
   221  		for _, node := range currentNodes {
   222  			lastUpdate := getNodeLastUpdate(node)
   223  			txn.values = append(txn.values,
   224  				kvForTxn{
   225  					key:      node.GetKey(),
   226  					value:    lastUpdate.value,
   227  					origin:   kvs.FromNB,
   228  					isRevert: lastUpdate.revert,
   229  				})
   230  		}
   231  	}
   232  
   233  	// build the set of keys currently in NB
   234  	nbKeys := utils.NewMapBasedKeySet()
   235  	for _, kv := range txn.values {
   236  		nbKeys.Add(kv.key)
   237  	}
   238  
   239  	// unless this is only UpstreamResync, refresh the graph with the current
   240  	// state of SB
   241  	if txn.nb.resyncType != kvs.UpstreamResync {
   242  		s.refreshGraph(graphW, nil, &resyncData{
   243  			first:  s.resyncCount == 1,
   244  			values: txn.values,
   245  		}, txn.nb.verboseRefresh)
   246  	}
   247  
   248  	// collect deletes for obsolete values
   249  	currentNodes := graphW.GetNodes(nil, nbBaseValsSelectors()...)
   250  	for _, node := range currentNodes {
   251  		if nbKey := nbKeys.Has(node.GetKey()); nbKey {
   252  			continue
   253  		}
   254  		txn.values = append(txn.values,
   255  			kvForTxn{
   256  				key:    node.GetKey(),
   257  				value:  nil, // remove
   258  				origin: kvs.FromNB,
   259  			})
   260  	}
   261  
   262  	// update (record) SB values
   263  	sbNodes := graphW.GetNodes(nil, sbBaseValsSelectors()...)
   264  	for _, node := range sbNodes {
   265  		if nbKey := nbKeys.Has(node.GetKey()); nbKey {
   266  			continue
   267  		}
   268  		txn.values = append(txn.values,
   269  			kvForTxn{
   270  				key:    node.GetKey(),
   271  				value:  node.GetValue(),
   272  				origin: kvs.FromSB,
   273  			})
   274  	}
   275  
   276  	skip = len(txn.values) == 0
   277  	return
   278  }
   279  
   280  // preProcessRetryTxn filters out obsolete retry operations.
   281  func (s *Scheduler) preProcessRetryTxn(txn *transaction) (skip bool) {
   282  	graphR := s.graph.Read()
   283  	defer graphR.Release()
   284  
   285  	for key, retryRev := range txn.retry.keys {
   286  		node := graphR.GetNode(key)
   287  		if node == nil {
   288  			continue
   289  		}
   290  		lastUpdate := getNodeLastUpdate(node)
   291  		if lastUpdate == nil || lastUpdate.txnSeqNum > retryRev {
   292  			// obsolete retry, the value has been updated since the failure
   293  			continue
   294  		}
   295  		txn.values = append(txn.values,
   296  			kvForTxn{
   297  				key:      key,
   298  				value:    lastUpdate.value,
   299  				origin:   kvs.FromNB,
   300  				isRevert: lastUpdate.revert,
   301  			})
   302  	}
   303  	skip = len(txn.values) == 0
   304  	return
   305  }
   306  
   307  // postProcessTransaction schedules retry for failed operations and propagates
   308  // value state updates to the subscribers and error/nil to the caller of a blocking
   309  // commit.
   310  func (s *Scheduler) postProcessTransaction(txn *transaction, executed kvs.RecordedTxnOps) {
   311  	defer trace.StartRegion(txn.ctx, "postProcessTransaction").End()
   312  	defer trackTransactionMethod("postProcessTransaction")()
   313  
   314  	// collect new failures (combining derived with base)
   315  	toRetry := utils.NewSliceBasedKeySet()
   316  	toRefresh := utils.NewSliceBasedKeySet()
   317  
   318  	var afterErrRefresh bool
   319  	var kvErrors []kvs.KeyWithError
   320  	graphR := s.graph.Read()
   321  	for _, op := range executed {
   322  		node := graphR.GetNode(op.Key)
   323  		if node == nil {
   324  			continue
   325  		}
   326  		state := getNodeState(node)
   327  		baseKey := getNodeBaseKey(node)
   328  		if state == kvscheduler.ValueState_UNIMPLEMENTED {
   329  			continue
   330  		}
   331  		if state == kvscheduler.ValueState_FAILED {
   332  			toRefresh.Add(baseKey)
   333  			afterErrRefresh = true
   334  		}
   335  		if state == kvscheduler.ValueState_RETRYING {
   336  			toRefresh.Add(baseKey)
   337  			toRetry.Add(baseKey)
   338  			afterErrRefresh = true
   339  		}
   340  		if s.verifyMode {
   341  			toRefresh.Add(baseKey)
   342  		}
   343  	}
   344  	graphR.Release()
   345  
   346  	// refresh base values which themselves are in a failed state or have derived failed values
   347  	// - in verifyMode all updated values are re-freshed
   348  	if toRefresh.Length() > 0 {
   349  		// changes brought by refresh triggered solely for the verification are
   350  		// not saved into the graph
   351  		graphW := s.graph.Write(afterErrRefresh, false)
   352  		s.refreshGraph(graphW, toRefresh, nil, afterErrRefresh)
   353  		s.scheduleRetries(txn, graphW, toRetry)
   354  
   355  		// if enabled, verify transaction effects
   356  		if s.verifyMode {
   357  			kvErrors = s.verifyTransaction(graphW, executed)
   358  		}
   359  		graphW.Release()
   360  	}
   361  
   362  	// collect state updates
   363  	var stateUpdates []*kvscheduler.BaseValueStatus
   364  	removed := utils.NewSliceBasedKeySet()
   365  	graphR = s.graph.Read()
   366  	for _, key := range s.updatedStates.Iterate() {
   367  		node := graphR.GetNode(key)
   368  		status := getValueStatus(node, key)
   369  		if status.Value.State == kvscheduler.ValueState_REMOVED {
   370  			removed.Add(key)
   371  		}
   372  		stateUpdates = append(stateUpdates, status)
   373  	}
   374  	graphR.Release()
   375  	// clear the set of updated states
   376  	s.updatedStates = utils.NewSliceBasedKeySet()
   377  
   378  	// build transaction error
   379  	var txnErr error
   380  	for _, txnOp := range executed {
   381  		if txnOp.NewErr == nil {
   382  			continue
   383  		}
   384  		kvErrors = append(kvErrors,
   385  			kvs.KeyWithError{
   386  				Key:          txnOp.Key,
   387  				TxnOperation: txnOp.Operation,
   388  				Error:        txnOp.NewErr,
   389  			})
   390  	}
   391  	if len(kvErrors) > 0 {
   392  		txnErr = kvs.NewTransactionError(nil, kvErrors)
   393  	}
   394  	if txn.txnType == kvs.NBTransaction && txn.nb.isBlocking {
   395  		// for blocking txn, send non-nil errors to the resultChan
   396  		select {
   397  		case txn.nb.resultChan <- txnResult{txnSeqNum: txn.seqNum, err: txnErr}:
   398  		default:
   399  			s.Log.WithField("txnSeq", txn.seqNum).
   400  				Warn("Failed to deliver transaction result to the caller")
   401  		}
   402  	} else {
   403  		// for asynchronous events, just log the transaction error
   404  		if txnErr == nil {
   405  			s.Log.Infof("Transaction %d successful!", txn.seqNum)
   406  		} else {
   407  			s.Log.Error(txnErr.Error())
   408  		}
   409  	}
   410  
   411  	// send value status updates to the watchers
   412  	for _, watcher := range s.valStateWatchers {
   413  		for _, stateUpdate := range stateUpdates {
   414  			if watcher.selector == nil || watcher.selector(stateUpdate.Value.Key) {
   415  				select {
   416  				case watcher.channel <- stateUpdate:
   417  				default:
   418  					s.Log.WithField("txnSeq", txn.seqNum).
   419  						Warn("Failed to deliver value status update to a watcher")
   420  				}
   421  			}
   422  		}
   423  	}
   424  
   425  	// delete removed values from the graph after the notifications have been sent
   426  	if removed.Length() > 0 {
   427  		graphW := s.graph.Write(true, true)
   428  		for _, key := range removed.Iterate() {
   429  			graphW.DeleteNode(key)
   430  		}
   431  		graphW.Release()
   432  	}
   433  }
   434  
   435  // scheduleRetries schedules a series of re-try transactions for failed values
   436  func (s *Scheduler) scheduleRetries(txn *transaction, graphR graph.ReadAccess, toRetry utils.KeySet) {
   437  	// split values based on the retry metadata
   438  	retryTxns := make(map[retryTxnMeta]*retryTxn)
   439  	for _, retryKey := range toRetry.Iterate() {
   440  		node := graphR.GetNode(retryKey)
   441  		lastUpdate := getNodeLastUpdate(node)
   442  		// did retry fail?
   443  		var alreadyRetried bool
   444  		if txn.txnType == kvs.RetryFailedOps {
   445  			_, alreadyRetried = txn.retry.keys[retryKey]
   446  		}
   447  		// determine how long to delay the retry
   448  		delay := lastUpdate.retryArgs.Period
   449  		if alreadyRetried && lastUpdate.retryArgs.ExpBackoff {
   450  			delay = txn.retry.delay * 2
   451  		}
   452  		// determine which attempt this is
   453  		attempt := 1
   454  		if alreadyRetried {
   455  			attempt = txn.retry.attempt + 1
   456  		}
   457  		// determine which transaction this retry is for
   458  		seqNum := txn.seqNum
   459  		if alreadyRetried {
   460  			seqNum = txn.retry.txnSeqNum
   461  		}
   462  		// add key into the set to retry within a single transaction
   463  		retryMeta := retryTxnMeta{
   464  			txnSeqNum: seqNum,
   465  			delay:     delay,
   466  			attempt:   attempt,
   467  		}
   468  		if _, has := retryTxns[retryMeta]; !has {
   469  			retryTxns[retryMeta] = &retryTxn{
   470  				retryTxnMeta: retryMeta,
   471  				keys:         make(map[string]uint64),
   472  			}
   473  		}
   474  		retryTxns[retryMeta].keys[retryKey] = lastUpdate.txnSeqNum
   475  	}
   476  
   477  	// schedule a series of re-try transactions for failed values
   478  	for _, retryTxn := range retryTxns {
   479  		s.enqueueRetry(retryTxn)
   480  	}
   481  }
   482  
   483  // verifyTransaction verifies if the effect of the transaction is as expected.
   484  func (s *Scheduler) verifyTransaction(graphR graph.ReadAccess, executed kvs.RecordedTxnOps) (kvErrors []kvs.KeyWithError) {
   485  	for _, op := range executed {
   486  		key := op.Key
   487  		node := graphR.GetNode(key)
   488  		if node == nil {
   489  			continue
   490  		}
   491  		state := getNodeState(node)
   492  		if state == kvscheduler.ValueState_RETRYING || state == kvscheduler.ValueState_FAILED {
   493  			// effects of failed operations are uncertain and cannot be therefore verified
   494  			continue
   495  		}
   496  
   497  		expValue := getNodeLastAppliedValue(node)
   498  		lastOp := getNodeLastOperation(node)
   499  
   500  		expToNotExist := expValue == nil || state == kvscheduler.ValueState_PENDING || state == kvscheduler.ValueState_INVALID
   501  		if expToNotExist && isNodeAvailable(node) {
   502  			kvErrors = append(kvErrors, kvs.KeyWithError{
   503  				Key:          key,
   504  				Error:        kvs.NewVerificationError(key, kvs.ExpectedToNotExist),
   505  				TxnOperation: lastOp,
   506  			})
   507  			continue
   508  		}
   509  		if expValue == nil {
   510  			// properly removed
   511  			continue
   512  		}
   513  		if !expToNotExist && !isNodeAvailable(node) {
   514  			kvErrors = append(kvErrors, kvs.KeyWithError{
   515  				Key:          key,
   516  				Error:        kvs.NewVerificationError(key, kvs.ExpectedToExist),
   517  				TxnOperation: lastOp,
   518  			})
   519  			continue
   520  		}
   521  		descriptor := s.registry.GetDescriptorForKey(key)
   522  		handler := newDescriptorHandler(descriptor)
   523  		equivalent := handler.equivalentValues(key, node.GetValue(), expValue)
   524  		if !equivalent {
   525  			kvErrors = append(kvErrors, kvs.KeyWithError{
   526  				Key:          key,
   527  				Error:        kvs.NewVerificationError(key, kvs.NotEquivalent),
   528  				TxnOperation: lastOp,
   529  			})
   530  			s.Log.WithFields(
   531  				logging.Fields{
   532  					"applied":   expValue,
   533  					"refreshed": node.GetValue(),
   534  				}).Warn("Detected non-equivalent applied vs. refreshed values")
   535  		}
   536  	}
   537  	return
   538  }
   539  
   540  // filterNotification checks if the received notification should be filtered
   541  // or normally applied.
   542  func (s *Scheduler) filterNotification(graphR graph.ReadAccess, key string, value proto.Message, txnSeqNum uint64) bool {
   543  	descriptor := s.registry.GetDescriptorForKey(key)
   544  	if descriptor == nil {
   545  		s.Log.WithFields(logging.Fields{
   546  			"txnSeqNum": txnSeqNum,
   547  			"key":       key,
   548  		}).Debug("Ignoring unimplemented notification")
   549  		return true
   550  	}
   551  	node := graphR.GetNode(key)
   552  	if node != nil {
   553  		if getNodeOrigin(node) == kvs.FromNB {
   554  			s.Log.WithFields(logging.Fields{
   555  				"txnSeqNum": txnSeqNum,
   556  				"key":       key,
   557  			}).Debug("Ignoring notification for a NB-managed value")
   558  			return true
   559  		}
   560  	}
   561  	return false
   562  }