github.com/braveheart12/just@v0.8.7/ledger/pulsemanager/pulsemanager.go (about)

     1  /*
     2   *    Copyright 2019 Insolar Technologies
     3   *
     4   *    Licensed under the Apache License, Version 2.0 (the "License");
     5   *    you may not use this file except in compliance with the License.
     6   *    You may obtain a copy of the License at
     7   *
     8   *        http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   *    Unless required by applicable law or agreed to in writing, software
    11   *    distributed under the License is distributed on an "AS IS" BASIS,
    12   *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   *    See the License for the specific language governing permissions and
    14   *    limitations under the License.
    15   */
    16  
    17  package pulsemanager
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/rand"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/insolar/insolar"
    27  	"github.com/insolar/insolar/ledger/storage/nodes"
    28  	"github.com/pkg/errors"
    29  	"go.opencensus.io/stats"
    30  	"go.opencensus.io/trace"
    31  	"golang.org/x/sync/errgroup"
    32  
    33  	"github.com/insolar/insolar/configuration"
    34  	"github.com/insolar/insolar/core"
    35  	"github.com/insolar/insolar/core/message"
    36  	"github.com/insolar/insolar/core/reply"
    37  	"github.com/insolar/insolar/instrumentation/inslogger"
    38  	"github.com/insolar/insolar/instrumentation/instracer"
    39  	"github.com/insolar/insolar/ledger/artifactmanager"
    40  	"github.com/insolar/insolar/ledger/heavyclient"
    41  	"github.com/insolar/insolar/ledger/recentstorage"
    42  	"github.com/insolar/insolar/ledger/storage"
    43  	"github.com/insolar/insolar/ledger/storage/index"
    44  	"github.com/insolar/insolar/ledger/storage/jet"
    45  )
    46  
    47  //go:generate minimock -i github.com/insolar/insolar/ledger/pulsemanager.ActiveListSwapper -o ../../testutils -s _mock.go
    48  type ActiveListSwapper interface {
    49  	MoveSyncToActive(ctx context.Context) error
    50  }
    51  
    52  // PulseManager implements core.PulseManager.
    53  type PulseManager struct {
    54  	LR                         core.LogicRunner                `inject:""`
    55  	Bus                        core.MessageBus                 `inject:""`
    56  	NodeNet                    core.NodeNetwork                `inject:""`
    57  	JetCoordinator             core.JetCoordinator             `inject:""`
    58  	GIL                        core.GlobalInsolarLock          `inject:""`
    59  	CryptographyService        core.CryptographyService        `inject:""`
    60  	PlatformCryptographyScheme core.PlatformCryptographyScheme `inject:""`
    61  	RecentStorageProvider      recentstorage.Provider          `inject:""`
    62  	ActiveListSwapper          ActiveListSwapper               `inject:""`
    63  	PulseStorage               pulseStoragePm                  `inject:""`
    64  	HotDataWaiter              artifactmanager.HotDataWaiter   `inject:""`
    65  	JetStorage                 storage.JetStorage              `inject:""`
    66  	DropStorage                storage.DropStorage             `inject:""`
    67  	ObjectStorage              storage.ObjectStorage           `inject:""`
    68  	NodeSetter                 nodes.Setter                    `inject:""`
    69  	Nodes                      nodes.Accessor                  `inject:""`
    70  	PulseTracker               storage.PulseTracker            `inject:""`
    71  	ReplicaStorage             storage.ReplicaStorage          `inject:""`
    72  	DBContext                  storage.DBContext               `inject:""`
    73  	StorageCleaner             storage.Cleaner                 `inject:""`
    74  
    75  	// TODO: move clients pool to component - @nordicdyno - 18.Dec.2018
    76  	syncClientsPool *heavyclient.Pool
    77  
    78  	currentPulse core.Pulse
    79  
    80  	// setLock locks Set method call.
    81  	setLock sync.RWMutex
    82  	// saves PM stopping mode
    83  	stopped bool
    84  
    85  	// stores pulse manager options
    86  	options pmOptions
    87  }
    88  
    89  type jetInfo struct {
    90  	id       core.RecordID
    91  	mineNext bool
    92  	left     *jetInfo
    93  	right    *jetInfo
    94  }
    95  
    96  // TODO: @andreyromancev. 15.01.19. Just store ledger configuration in PM. This is not required.
    97  type pmOptions struct {
    98  	enableSync            bool
    99  	splitThreshold        uint64
   100  	dropHistorySize       int
   101  	storeLightPulses      int
   102  	heavySyncMessageLimit int
   103  	lightChainLimit       int
   104  }
   105  
   106  // NewPulseManager creates PulseManager instance.
   107  func NewPulseManager(conf configuration.Ledger) *PulseManager {
   108  	pmconf := conf.PulseManager
   109  
   110  	pm := &PulseManager{
   111  		currentPulse: *core.GenesisPulse,
   112  		options: pmOptions{
   113  			enableSync:            pmconf.HeavySyncEnabled,
   114  			splitThreshold:        pmconf.SplitThreshold,
   115  			dropHistorySize:       conf.JetSizesHistoryDepth,
   116  			storeLightPulses:      conf.LightChainLimit,
   117  			heavySyncMessageLimit: pmconf.HeavySyncMessageLimit,
   118  			lightChainLimit:       conf.LightChainLimit,
   119  		},
   120  	}
   121  	return pm
   122  }
   123  
   124  func (m *PulseManager) processEndPulse(
   125  	ctx context.Context,
   126  	jets []jetInfo,
   127  	prevPulseNumber core.PulseNumber,
   128  	currentPulse, newPulse core.Pulse,
   129  ) error {
   130  	var g errgroup.Group
   131  	ctx, span := instracer.StartSpan(ctx, "pulse.process_end")
   132  	defer span.End()
   133  
   134  	logger := inslogger.FromContext(ctx)
   135  	for _, i := range jets {
   136  		info := i
   137  
   138  		g.Go(func() error {
   139  			drop, dropSerialized, _, err := m.createDrop(ctx, info.id, prevPulseNumber, currentPulse.PulseNumber)
   140  			if err != nil {
   141  				return errors.Wrapf(err, "create drop on pulse %v failed", currentPulse.PulseNumber)
   142  			}
   143  
   144  			sender := func(msg message.HotData, jetID core.RecordID) {
   145  				ctx, span := instracer.StartSpan(ctx, "pulse.send_hot")
   146  				defer span.End()
   147  				msg.Jet = *core.NewRecordRef(core.DomainID, jetID)
   148  				genericRep, err := m.Bus.Send(ctx, &msg, nil)
   149  				if err != nil {
   150  					logger.WithField("err", err).Error("failed to send hot data")
   151  					return
   152  				}
   153  				if _, ok := genericRep.(*reply.OK); !ok {
   154  					logger.WithField(
   155  						"err",
   156  						fmt.Sprintf("unexpected reply: %T", genericRep),
   157  					).Error("failed to send hot data")
   158  					return
   159  				}
   160  			}
   161  
   162  			if info.left == nil && info.right == nil {
   163  				msg, err := m.getExecutorHotData(
   164  					ctx, info.id, newPulse.PulseNumber, drop, dropSerialized,
   165  				)
   166  				if err != nil {
   167  					return errors.Wrapf(err, "getExecutorData failed for jet id %v", info.id)
   168  				}
   169  				// No split happened.
   170  				if !info.mineNext {
   171  					go sender(*msg, info.id)
   172  				}
   173  			} else {
   174  				msg, err := m.getExecutorHotData(
   175  					ctx, info.id, newPulse.PulseNumber, drop, dropSerialized,
   176  				)
   177  				if err != nil {
   178  					return errors.Wrapf(err, "getExecutorData failed for jet id %v", info.id)
   179  				}
   180  				// Split happened.
   181  				if !info.left.mineNext {
   182  					go sender(*msg, info.left.id)
   183  				}
   184  				if !info.right.mineNext {
   185  					go sender(*msg, info.right.id)
   186  				}
   187  			}
   188  
   189  			m.RecentStorageProvider.RemovePendingStorage(ctx, info.id)
   190  
   191  			// FIXME: @andreyromancev. 09.01.2019. Temporary disabled validation. Uncomment when jet split works properly.
   192  			// dropErr := m.processDrop(ctx, jetID, currentPulse, dropSerialized, messages)
   193  			// if dropErr != nil {
   194  			// 	return errors.Wrap(dropErr, "processDrop failed")
   195  			// }
   196  
   197  			return nil
   198  		})
   199  	}
   200  	err := g.Wait()
   201  	if err != nil {
   202  		return errors.Wrap(err, "got error on jets sync")
   203  	}
   204  
   205  	return nil
   206  }
   207  
   208  func (m *PulseManager) createDrop(
   209  	ctx context.Context,
   210  	jetID core.RecordID,
   211  	prevPulse, currentPulse core.PulseNumber,
   212  ) (
   213  	drop *jet.JetDrop,
   214  	dropSerialized []byte,
   215  	messages [][]byte,
   216  	err error,
   217  ) {
   218  	var prevDrop *jet.JetDrop
   219  	prevDrop, err = m.DropStorage.GetDrop(ctx, jetID, prevPulse)
   220  	if err == core.ErrNotFound {
   221  		prevDrop, err = m.DropStorage.GetDrop(ctx, jet.Parent(jetID), prevPulse)
   222  		if err == core.ErrNotFound {
   223  			inslogger.FromContext(ctx).WithFields(map[string]interface{}{
   224  				"pulse": prevPulse,
   225  				"jet":   jetID.DebugString(),
   226  			}).Error("failed to find drop")
   227  			prevDrop = &jet.JetDrop{Pulse: prevPulse}
   228  			err = m.DropStorage.SetDrop(ctx, jetID, prevDrop)
   229  			if err != nil {
   230  				return nil, nil, nil, errors.Wrap(err, "failed to create empty drop")
   231  			}
   232  		} else if err != nil {
   233  			return nil, nil, nil, errors.Wrap(err, "[ createDrop ] failed to find parent")
   234  		}
   235  	} else if err != nil {
   236  		return nil, nil, nil, errors.Wrap(err, "[ createDrop ] Can't GetDrop")
   237  	}
   238  
   239  	drop, messages, dropSize, err := m.DropStorage.CreateDrop(ctx, jetID, currentPulse, prevDrop.Hash)
   240  	if err != nil {
   241  		return nil, nil, nil, errors.Wrap(err, "[ createDrop ] Can't CreateDrop")
   242  	}
   243  	err = m.DropStorage.SetDrop(ctx, jetID, drop)
   244  	if err != nil {
   245  		return nil, nil, nil, errors.Wrap(err, "[ createDrop ] Can't SetDrop")
   246  	}
   247  
   248  	dropSerialized, err = jet.Encode(drop)
   249  	if err != nil {
   250  		return nil, nil, nil, errors.Wrap(err, "[ createDrop ] Can't Encode")
   251  	}
   252  
   253  	dropSizeData := &jet.DropSize{
   254  		JetID:    jetID,
   255  		PulseNo:  currentPulse,
   256  		DropSize: dropSize,
   257  	}
   258  	hasher := m.PlatformCryptographyScheme.IntegrityHasher()
   259  	_, err = dropSizeData.WriteHashData(hasher)
   260  	if err != nil {
   261  		return nil, nil, nil, errors.Wrap(err, "[ createDrop ] Can't WriteHashData")
   262  	}
   263  	signature, err := m.CryptographyService.Sign(hasher.Sum(nil))
   264  	dropSizeData.Signature = signature.Bytes()
   265  
   266  	if err != nil {
   267  		return nil, nil, nil, errors.Wrap(err, "[ createDrop ] Can't Sign")
   268  	}
   269  
   270  	err = m.DropStorage.AddDropSize(ctx, dropSizeData)
   271  	if err != nil {
   272  		return nil, nil, nil, errors.Wrap(err, "[ createDrop ] Can't AddDropSize")
   273  	}
   274  
   275  	return
   276  }
   277  
   278  func (m *PulseManager) getExecutorHotData(
   279  	ctx context.Context,
   280  	jetID core.RecordID,
   281  	pulse core.PulseNumber,
   282  	drop *jet.JetDrop,
   283  	dropSerialized []byte,
   284  ) (*message.HotData, error) {
   285  	ctx, span := instracer.StartSpan(ctx, "pulse.prepare_hot_data")
   286  	defer span.End()
   287  
   288  	logger := inslogger.FromContext(ctx)
   289  	indexStorage := m.RecentStorageProvider.GetIndexStorage(ctx, jetID)
   290  	pendingStorage := m.RecentStorageProvider.GetPendingStorage(ctx, jetID)
   291  	recentObjectsIds := indexStorage.GetObjects()
   292  
   293  	recentObjects := map[core.RecordID]message.HotIndex{}
   294  	pendingRequests := map[core.RecordID]recentstorage.PendingObjectContext{}
   295  
   296  	for id, ttl := range recentObjectsIds {
   297  		lifeline, err := m.ObjectStorage.GetObjectIndex(ctx, jetID, &id, false)
   298  		if err != nil {
   299  			logger.Error(err)
   300  			continue
   301  		}
   302  		encoded, err := index.EncodeObjectLifeline(lifeline)
   303  		if err != nil {
   304  			logger.Error(err)
   305  			continue
   306  		}
   307  		recentObjects[id] = message.HotIndex{
   308  			TTL:   ttl,
   309  			Index: encoded,
   310  		}
   311  	}
   312  
   313  	requestCount := 0
   314  	for objID, objContext := range pendingStorage.GetRequests() {
   315  		if len(objContext.Requests) > 0 {
   316  			pendingRequests[objID] = objContext
   317  			requestCount += len(objContext.Requests)
   318  		}
   319  	}
   320  
   321  	stats.Record(
   322  		ctx,
   323  		statHotObjectsSent.M(int64(len(recentObjects))),
   324  		statPendingSent.M(int64(requestCount)),
   325  	)
   326  
   327  	dropSizeHistory, err := m.DropStorage.GetDropSizeHistory(ctx, jetID)
   328  	if err != nil {
   329  		return nil, errors.Wrap(err, "[ processRecentObjects ] Can't GetDropSizeHistory")
   330  	}
   331  
   332  	msg := &message.HotData{
   333  		Drop:               *drop,
   334  		DropJet:            jetID,
   335  		PulseNumber:        pulse,
   336  		RecentObjects:      recentObjects,
   337  		PendingRequests:    pendingRequests,
   338  		JetDropSizeHistory: dropSizeHistory,
   339  	}
   340  	return msg, nil
   341  }
   342  
   343  // TODO: @andreyromancev. 12.01.19. Remove when dynamic split is working.
   344  var splitCount = 5
   345  
   346  func (m *PulseManager) processJets(ctx context.Context, currentPulse, newPulse core.PulseNumber) ([]jetInfo, error) {
   347  	ctx, span := instracer.StartSpan(ctx, "jets.process")
   348  	defer span.End()
   349  
   350  	tree := m.JetStorage.CloneJetTree(ctx, currentPulse, newPulse)
   351  
   352  	if m.NodeNet.GetOrigin().Role() != core.StaticRoleLightMaterial {
   353  		return nil, nil
   354  	}
   355  
   356  	var results []jetInfo
   357  	jetIDs := tree.LeafIDs()
   358  	me := m.JetCoordinator.Me()
   359  	logger := inslogger.FromContext(ctx).WithFields(map[string]interface{}{
   360  		"current_pulse": currentPulse,
   361  		"new_pulse":     newPulse,
   362  	})
   363  	indexToSplit := rand.Intn(len(jetIDs))
   364  	for i, jetID := range jetIDs {
   365  		wasExecutor := false
   366  		executor, err := m.JetCoordinator.LightExecutorForJet(ctx, jetID, currentPulse)
   367  		if err != nil && err != core.ErrNoNodes {
   368  			return nil, err
   369  		}
   370  		if err == nil {
   371  			wasExecutor = *executor == me
   372  		}
   373  
   374  		logger = logger.WithField("jetid", jetID.DebugString())
   375  		inslogger.SetLogger(ctx, logger)
   376  		logger.WithField("i_was_executor", wasExecutor).Debug("process jet")
   377  		if !wasExecutor {
   378  			continue
   379  		}
   380  
   381  		info := jetInfo{id: jetID}
   382  		if indexToSplit == i && splitCount > 0 {
   383  			splitCount--
   384  
   385  			leftJetID, rightJetID, err := m.JetStorage.SplitJetTree(
   386  				ctx,
   387  				newPulse,
   388  				jetID,
   389  			)
   390  			if err != nil {
   391  				return nil, errors.Wrap(err, "failed to split jet tree")
   392  			}
   393  			err = m.JetStorage.AddJets(ctx, *leftJetID, *rightJetID)
   394  			if err != nil {
   395  				return nil, errors.Wrap(err, "failed to add jets")
   396  			}
   397  			// Set actual because we are the last executor for jet.
   398  			m.JetStorage.UpdateJetTree(ctx, newPulse, true, *leftJetID, *rightJetID)
   399  
   400  			info.left = &jetInfo{id: *leftJetID}
   401  			info.right = &jetInfo{id: *rightJetID}
   402  			nextLeftExecutor, err := m.JetCoordinator.LightExecutorForJet(ctx, *leftJetID, newPulse)
   403  			if err != nil {
   404  				return nil, err
   405  			}
   406  			if *nextLeftExecutor == me {
   407  				info.left.mineNext = true
   408  				err := m.rewriteHotData(ctx, jetID, *leftJetID)
   409  				if err != nil {
   410  					return nil, err
   411  				}
   412  			}
   413  			nextRightExecutor, err := m.JetCoordinator.LightExecutorForJet(ctx, *rightJetID, newPulse)
   414  			if err != nil {
   415  				return nil, err
   416  			}
   417  			if *nextRightExecutor == me {
   418  				info.right.mineNext = true
   419  				err := m.rewriteHotData(ctx, jetID, *rightJetID)
   420  				if err != nil {
   421  					return nil, err
   422  				}
   423  			}
   424  
   425  			logger.WithFields(map[string]interface{}{
   426  				"left_child":  leftJetID.DebugString(),
   427  				"right_child": rightJetID.DebugString(),
   428  			}).Info("jet split performed")
   429  		} else {
   430  			// Set actual because we are the last executor for jet.
   431  			m.JetStorage.UpdateJetTree(ctx, newPulse, true, jetID)
   432  			nextExecutor, err := m.JetCoordinator.LightExecutorForJet(ctx, jetID, newPulse)
   433  			if err != nil {
   434  				return nil, err
   435  			}
   436  			if *nextExecutor == me {
   437  				info.mineNext = true
   438  			}
   439  		}
   440  		results = append(results, info)
   441  	}
   442  
   443  	return results, nil
   444  }
   445  
   446  func (m *PulseManager) rewriteHotData(ctx context.Context, fromJetID, toJetID core.RecordID) error {
   447  	indexStorage := m.RecentStorageProvider.GetIndexStorage(ctx, fromJetID)
   448  
   449  	logger := inslogger.FromContext(ctx).WithFields(map[string]interface{}{
   450  		"from_jet": fromJetID.DebugString(),
   451  		"to_jet":   toJetID.DebugString(),
   452  	})
   453  	for id := range indexStorage.GetObjects() {
   454  		idx, err := m.ObjectStorage.GetObjectIndex(ctx, fromJetID, &id, false)
   455  		if err != nil {
   456  			if err == core.ErrNotFound {
   457  				logger.WithField("id", id.DebugString()).Error("rewrite index not found")
   458  				continue
   459  			}
   460  			return errors.Wrap(err, "failed to rewrite index")
   461  		}
   462  		err = m.ObjectStorage.SetObjectIndex(ctx, toJetID, &id, idx)
   463  		if err != nil {
   464  			return errors.Wrap(err, "failed to rewrite index")
   465  		}
   466  	}
   467  
   468  	m.RecentStorageProvider.CloneIndexStorage(ctx, fromJetID, toJetID)
   469  	m.RecentStorageProvider.ClonePendingStorage(ctx, fromJetID, toJetID)
   470  
   471  	return nil
   472  }
   473  
   474  // Set set's new pulse and closes current jet drop.
   475  func (m *PulseManager) Set(ctx context.Context, newPulse core.Pulse, persist bool) error {
   476  	m.setLock.Lock()
   477  	defer m.setLock.Unlock()
   478  	if m.stopped {
   479  		return errors.New("can't call Set method on PulseManager after stop")
   480  	}
   481  
   482  	ctx, span := instracer.StartSpan(
   483  		ctx, "pulse.process", trace.WithSampler(trace.AlwaysSample()),
   484  	)
   485  	span.AddAttributes(
   486  		trace.Int64Attribute("pulse.PulseNumber", int64(newPulse.PulseNumber)),
   487  	)
   488  	defer span.End()
   489  
   490  	jets, jetIndexesRemoved, oldPulse, prevPN, err := m.setUnderGilSection(ctx, newPulse, persist)
   491  	if err != nil {
   492  		return err
   493  	}
   494  
   495  	if !persist {
   496  		return nil
   497  	}
   498  
   499  	// Run only on material executor.
   500  	// execute only on material executor
   501  	// TODO: do as much as possible async.
   502  	if m.NodeNet.GetOrigin().Role() == core.StaticRoleLightMaterial && oldPulse != nil && prevPN != nil {
   503  		err = m.processEndPulse(ctx, jets, *prevPN, *oldPulse, newPulse)
   504  		if err != nil {
   505  			return err
   506  		}
   507  		m.postProcessJets(ctx, newPulse, jets)
   508  		m.addSync(ctx, jets, oldPulse.PulseNumber)
   509  		go m.cleanLightData(ctx, newPulse, jetIndexesRemoved)
   510  	}
   511  
   512  	err = m.Bus.OnPulse(ctx, newPulse)
   513  	if err != nil {
   514  		inslogger.FromContext(ctx).Error(errors.Wrap(err, "MessageBus OnPulse() returns error"))
   515  	}
   516  
   517  	if m.NodeNet.GetOrigin().Role() == core.StaticRoleVirtual {
   518  		err = m.LR.OnPulse(ctx, newPulse)
   519  	}
   520  	if err != nil {
   521  		return err
   522  	}
   523  
   524  	return nil
   525  }
   526  
   527  func (m *PulseManager) setUnderGilSection(
   528  	ctx context.Context, newPulse core.Pulse, persist bool,
   529  ) (
   530  	[]jetInfo, map[core.RecordID][]core.RecordID, *core.Pulse, *core.PulseNumber, error,
   531  ) {
   532  	var (
   533  		oldPulse *core.Pulse
   534  		prevPN   *core.PulseNumber
   535  	)
   536  
   537  	m.GIL.Acquire(ctx)
   538  	ctx, span := instracer.StartSpan(ctx, "pulse.gil_locked")
   539  	defer span.End()
   540  	defer m.GIL.Release(ctx)
   541  
   542  	m.PulseStorage.Lock()
   543  	// FIXME: @andreyromancev. 17.12.18. return core.Pulse here.
   544  	storagePulse, err := m.PulseTracker.GetLatestPulse(ctx)
   545  	if err != nil && err != core.ErrNotFound {
   546  		m.PulseStorage.Unlock()
   547  		return nil, nil, nil, nil, errors.Wrap(err, "call of GetLatestPulseNumber failed")
   548  	}
   549  
   550  	if err != core.ErrNotFound {
   551  		oldPulse = &storagePulse.Pulse
   552  		prevPN = storagePulse.Prev
   553  		ctx, _ = inslogger.WithField(ctx, "current_pulse", fmt.Sprintf("%d", oldPulse.PulseNumber))
   554  	}
   555  
   556  	logger := inslogger.FromContext(ctx)
   557  	logger.WithFields(map[string]interface{}{
   558  		"new_pulse": newPulse.PulseNumber,
   559  		"persist":   persist,
   560  	}).Debugf("received pulse")
   561  
   562  	// swap pulse
   563  	m.currentPulse = newPulse
   564  
   565  	// swap active nodes
   566  	err = m.ActiveListSwapper.MoveSyncToActive(ctx)
   567  	if err != nil {
   568  		return nil, nil, nil, nil, errors.Wrap(err, "failed to apply new active node list")
   569  	}
   570  	if persist {
   571  		if err := m.PulseTracker.AddPulse(ctx, newPulse); err != nil {
   572  			m.PulseStorage.Unlock()
   573  			return nil, nil, nil, nil, errors.Wrap(err, "call of AddPulse failed")
   574  		}
   575  		fromNetwork := m.NodeNet.GetWorkingNodes()
   576  		toSet := make([]insolar.Node, 0, len(fromNetwork))
   577  		for _, node := range fromNetwork {
   578  			toSet = append(toSet, insolar.Node{ID: node.ID(), Role: node.Role()})
   579  		}
   580  		err = m.NodeSetter.Set(newPulse.PulseNumber, toSet)
   581  		if err != nil {
   582  			m.PulseStorage.Unlock()
   583  			return nil, nil, nil, nil, errors.Wrap(err, "call of SetActiveNodes failed")
   584  		}
   585  	}
   586  
   587  	m.PulseStorage.Set(&newPulse)
   588  	m.PulseStorage.Unlock()
   589  
   590  	if m.NodeNet.GetOrigin().Role() == core.StaticRoleHeavyMaterial {
   591  		return nil, nil, nil, nil, nil
   592  	}
   593  
   594  	var jets []jetInfo
   595  	if persist && oldPulse != nil {
   596  		jets, err = m.processJets(ctx, oldPulse.PulseNumber, newPulse.PulseNumber)
   597  		// We just joined to network
   598  		if err == core.ErrNoNodes {
   599  			return jets, map[core.RecordID][]core.RecordID{}, oldPulse, prevPN, nil
   600  		}
   601  		if err != nil {
   602  			return nil, nil, nil, nil, errors.Wrap(err, "failed to process jets")
   603  		}
   604  	}
   605  
   606  	removed := map[core.RecordID][]core.RecordID{}
   607  	if oldPulse != nil && prevPN != nil {
   608  		removed = m.RecentStorageProvider.DecreaseIndexesTTL(ctx)
   609  		if m.NodeNet.GetOrigin().Role() == core.StaticRoleLightMaterial {
   610  			m.prepareArtifactManagerMessageHandlerForNextPulse(ctx, newPulse, jets)
   611  		}
   612  	}
   613  
   614  	if persist && oldPulse != nil {
   615  		nodes, err := m.Nodes.All(oldPulse.PulseNumber)
   616  		if err != nil {
   617  			return nil, nil, nil, nil, err
   618  		}
   619  		// No active nodes for pulse. It means there was no processing (network start).
   620  		if len(nodes) == 0 {
   621  			// Activate zero jet for jet tree and unlock jet waiter.
   622  			zeroJet := *jet.NewID(0, nil)
   623  			m.JetStorage.UpdateJetTree(ctx, newPulse.PulseNumber, true, zeroJet)
   624  			err := m.HotDataWaiter.Unlock(ctx, zeroJet)
   625  			if err != nil {
   626  				if err == artifactmanager.ErrWaiterNotLocked {
   627  					inslogger.FromContext(ctx).Error(err)
   628  				} else {
   629  					return nil, nil, nil, nil, errors.Wrap(err, "failed to unlock zero jet")
   630  				}
   631  			}
   632  		}
   633  	}
   634  
   635  	return jets, removed, oldPulse, prevPN, nil
   636  }
   637  
   638  func (m *PulseManager) addSync(ctx context.Context, jets []jetInfo, pulse core.PulseNumber) {
   639  	ctx, span := instracer.StartSpan(ctx, "pulse.add_sync")
   640  	defer span.End()
   641  
   642  	if !m.options.enableSync || m.NodeNet.GetOrigin().Role() != core.StaticRoleLightMaterial {
   643  		return
   644  	}
   645  
   646  	for _, jInfo := range jets {
   647  		m.syncClientsPool.AddPulsesToSyncClient(ctx, jInfo.id, true, pulse)
   648  	}
   649  }
   650  
   651  func (m *PulseManager) postProcessJets(ctx context.Context, newPulse core.Pulse, jets []jetInfo) {
   652  	ctx, span := instracer.StartSpan(ctx, "jets.post_process")
   653  	defer span.End()
   654  
   655  	for _, jetInfo := range jets {
   656  		if !jetInfo.mineNext {
   657  			m.RecentStorageProvider.RemovePendingStorage(ctx, jetInfo.id)
   658  		}
   659  	}
   660  }
   661  
   662  func (m *PulseManager) cleanLightData(ctx context.Context, newPulse core.Pulse, jetIndexesRemoved map[core.RecordID][]core.RecordID) {
   663  	startSync := time.Now()
   664  	inslog := inslogger.FromContext(ctx)
   665  	ctx, span := instracer.StartSpan(ctx, "pulse.clean")
   666  	defer func() {
   667  		latency := time.Since(startSync)
   668  		stats.Record(ctx, statCleanLatencyTotal.M(latency.Nanoseconds()/1e6))
   669  		span.End()
   670  		inslog.Infof("cleanLightData all time spend=%v", latency)
   671  	}()
   672  
   673  	delta := m.options.storeLightPulses
   674  
   675  	p, err := m.PulseTracker.GetNthPrevPulse(ctx, uint(delta), newPulse.PulseNumber)
   676  	if err != nil {
   677  		inslogger.FromContext(ctx).Errorf("Can't get %dth previous pulse: %s", delta, err)
   678  		return
   679  	}
   680  
   681  	pn := p.Pulse.PulseNumber
   682  	err = m.syncClientsPool.LightCleanup(ctx, pn, m.RecentStorageProvider, jetIndexesRemoved)
   683  	if err != nil {
   684  		inslogger.FromContext(ctx).Errorf(
   685  			"Error on light cleanup, until pulse = %v, singlefligt err = %v", pn, err)
   686  	}
   687  
   688  	p, err = m.PulseTracker.GetPreviousPulse(ctx, pn)
   689  	if err != nil {
   690  		inslogger.FromContext(ctx).Errorf("Can't get previous pulse: %s", err)
   691  		return
   692  	}
   693  	m.JetStorage.DeleteJetTree(ctx, p.Pulse.PulseNumber)
   694  	m.NodeSetter.Delete(p.Pulse.PulseNumber)
   695  	err = m.PulseTracker.DeletePulse(ctx, p.Pulse.PulseNumber)
   696  	if err != nil {
   697  		inslogger.FromContext(ctx).Errorf("Can't clean pulse-tracker from pulse: %s", err)
   698  	}
   699  }
   700  
   701  func (m *PulseManager) prepareArtifactManagerMessageHandlerForNextPulse(ctx context.Context, newPulse core.Pulse, jets []jetInfo) {
   702  	ctx, span := instracer.StartSpan(ctx, "early.close")
   703  	defer span.End()
   704  
   705  	m.HotDataWaiter.ThrowTimeout(ctx)
   706  
   707  	logger := inslogger.FromContext(ctx)
   708  	for _, jetInfo := range jets {
   709  		if jetInfo.left == nil && jetInfo.right == nil {
   710  			// No split happened.
   711  			if jetInfo.mineNext {
   712  				err := m.HotDataWaiter.Unlock(ctx, jetInfo.id)
   713  				if err != nil {
   714  					logger.Error(err)
   715  				}
   716  			}
   717  		} else {
   718  			// Split happened.
   719  			if jetInfo.left.mineNext {
   720  				err := m.HotDataWaiter.Unlock(ctx, jetInfo.left.id)
   721  				if err != nil {
   722  					logger.Error(err)
   723  				}
   724  			}
   725  			if jetInfo.right.mineNext {
   726  				err := m.HotDataWaiter.Unlock(ctx, jetInfo.right.id)
   727  				if err != nil {
   728  					logger.Error(err)
   729  				}
   730  			}
   731  		}
   732  	}
   733  }
   734  
   735  // Start starts pulse manager, spawns replication goroutine under a hood.
   736  func (m *PulseManager) Start(ctx context.Context) error {
   737  	err := m.restoreLatestPulse(ctx)
   738  	if err != nil {
   739  		return err
   740  	}
   741  
   742  	origin := m.NodeNet.GetOrigin()
   743  	err = m.NodeSetter.Set(core.FirstPulseNumber, []insolar.Node{{ID: origin.ID(), Role: origin.Role()}})
   744  	if err != nil && err != storage.ErrOverride {
   745  		return err
   746  	}
   747  
   748  	if m.options.enableSync && m.NodeNet.GetOrigin().Role() == core.StaticRoleLightMaterial {
   749  		heavySyncPool := heavyclient.NewPool(
   750  			m.Bus,
   751  			m.PulseStorage,
   752  			m.PulseTracker,
   753  			m.ReplicaStorage,
   754  			m.StorageCleaner,
   755  			m.DBContext,
   756  			heavyclient.Options{
   757  				SyncMessageLimit: m.options.heavySyncMessageLimit,
   758  				PulsesDeltaLimit: m.options.lightChainLimit,
   759  			},
   760  		)
   761  		m.syncClientsPool = heavySyncPool
   762  
   763  		err := m.initJetSyncState(ctx)
   764  		if err != nil {
   765  			return err
   766  		}
   767  	}
   768  
   769  	return m.restoreGenesisRecentObjects(ctx)
   770  }
   771  
   772  func (m *PulseManager) restoreLatestPulse(ctx context.Context) error {
   773  	if m.NodeNet.GetOrigin().Role() != core.StaticRoleHeavyMaterial {
   774  		return nil
   775  	}
   776  	pulse, err := m.PulseTracker.GetLatestPulse(ctx)
   777  	if err != nil {
   778  		return err
   779  	}
   780  	m.PulseStorage.Lock()
   781  	m.PulseStorage.Set(&pulse.Pulse)
   782  	m.PulseStorage.Unlock()
   783  
   784  	return nil
   785  }
   786  
   787  func (m *PulseManager) restoreGenesisRecentObjects(ctx context.Context) error {
   788  	if m.NodeNet.GetOrigin().Role() == core.StaticRoleHeavyMaterial {
   789  		return nil
   790  	}
   791  
   792  	jetID := *jet.NewID(0, nil)
   793  	recent := m.RecentStorageProvider.GetIndexStorage(ctx, jetID)
   794  
   795  	return m.ObjectStorage.IterateIndexIDs(ctx, jetID, func(id core.RecordID) error {
   796  		if id.Pulse() == core.FirstPulseNumber {
   797  			recent.AddObject(ctx, id)
   798  		}
   799  		return nil
   800  	})
   801  }
   802  
   803  // Stop stops PulseManager. Waits replication goroutine is done.
   804  func (m *PulseManager) Stop(ctx context.Context) error {
   805  	// There should not to be any Set call after Stop call
   806  	m.setLock.Lock()
   807  	m.stopped = true
   808  	m.setLock.Unlock()
   809  
   810  	if m.options.enableSync && m.NodeNet.GetOrigin().Role() == core.StaticRoleLightMaterial {
   811  		inslogger.FromContext(ctx).Info("waiting finish of heavy replication client...")
   812  		m.syncClientsPool.Stop(ctx)
   813  	}
   814  	return nil
   815  }