github.com/braveheart12/insolar-09-08-19@v0.8.7/ledger/heavyclient/heavy_pool.go (about)

     1  /*
     2   *    Copyright 2019 Insolar Technologies
     3   *
     4   *    Licensed under the Apache License, Version 2.0 (the "License");
     5   *    you may not use this file except in compliance with the License.
     6   *    You may obtain a copy of the License at
     7   *
     8   *        http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   *    Unless required by applicable law or agreed to in writing, software
    11   *    distributed under the License is distributed on an "AS IS" BASIS,
    12   *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   *    See the License for the specific language governing permissions and
    14   *    limitations under the License.
    15   */
    16  
    17  package heavyclient
    18  
    19  import (
    20  	"context"
    21  	"sync"
    22  	"time"
    23  
    24  	"github.com/insolar/insolar/core"
    25  	"github.com/insolar/insolar/instrumentation/inslogger"
    26  	"github.com/insolar/insolar/ledger/recentstorage"
    27  	"github.com/insolar/insolar/ledger/storage"
    28  	"github.com/insolar/insolar/ledger/storage/jet"
    29  	"go.opencensus.io/stats"
    30  	"golang.org/x/sync/singleflight"
    31  )
    32  
    33  // Pool manages state of heavy sync clients (one client per jet id).
    34  type Pool struct {
    35  	bus            core.MessageBus
    36  	pulseStorage   core.PulseStorage
    37  	pulseTracker   storage.PulseTracker
    38  	replicaStorage storage.ReplicaStorage
    39  	cleaner        storage.Cleaner
    40  	db             storage.DBContext
    41  
    42  	clientDefaults Options
    43  
    44  	sync.Mutex
    45  	clients map[core.RecordID]*JetClient
    46  
    47  	cleanupGroup singleflight.Group
    48  }
    49  
    50  // NewPool constructor of new pool.
    51  func NewPool(
    52  	bus core.MessageBus,
    53  	pulseStorage core.PulseStorage,
    54  	tracker storage.PulseTracker,
    55  	replicaStorage storage.ReplicaStorage,
    56  	cleaner storage.Cleaner,
    57  	db storage.DBContext,
    58  	clientDefaults Options,
    59  ) *Pool {
    60  	return &Pool{
    61  		bus:            bus,
    62  		pulseStorage:   pulseStorage,
    63  		pulseTracker:   tracker,
    64  		replicaStorage: replicaStorage,
    65  		clientDefaults: clientDefaults,
    66  		cleaner:        cleaner,
    67  		db:             db,
    68  		clients:        map[core.RecordID]*JetClient{},
    69  	}
    70  }
    71  
    72  // Stop send stop signals to all managed heavy clients and waits when until all of them will stop.
    73  func (scp *Pool) Stop(ctx context.Context) {
    74  	scp.Lock()
    75  	defer scp.Unlock()
    76  
    77  	var wg sync.WaitGroup
    78  	wg.Add(len(scp.clients))
    79  	for _, c := range scp.clients {
    80  		c := c
    81  		go func() {
    82  			c.Stop(ctx)
    83  			wg.Done()
    84  		}()
    85  	}
    86  	wg.Wait()
    87  }
    88  
    89  // AddPulsesToSyncClient add pulse numbers to the end of jet's heavy client queue.
    90  //
    91  // Bool flag 'shouldrun' controls should heavy client be started (if not already) or not.
    92  func (scp *Pool) AddPulsesToSyncClient(
    93  	ctx context.Context,
    94  	jetID core.RecordID,
    95  	shouldrun bool,
    96  	pns ...core.PulseNumber,
    97  ) *JetClient {
    98  	scp.Lock()
    99  	client, ok := scp.clients[jetID]
   100  	if !ok {
   101  		client = NewJetClient(
   102  			scp.replicaStorage,
   103  			scp.bus,
   104  			scp.pulseStorage,
   105  			scp.pulseTracker,
   106  			scp.cleaner,
   107  			scp.db,
   108  			jetID,
   109  			scp.clientDefaults,
   110  		)
   111  
   112  		scp.clients[jetID] = client
   113  	}
   114  	scp.Unlock()
   115  
   116  	client.addPulses(ctx, pns)
   117  
   118  	if shouldrun {
   119  		client.runOnce(ctx)
   120  		if len(client.signal) == 0 {
   121  			// send signal we have new pulse
   122  			client.signal <- struct{}{}
   123  		}
   124  	}
   125  	return client
   126  }
   127  
   128  // AllClients returns slice with all clients in Pool.
   129  func (scp *Pool) AllClients(ctx context.Context) []*JetClient {
   130  	scp.Lock()
   131  	defer scp.Unlock()
   132  	clients := make([]*JetClient, 0, len(scp.clients))
   133  	for _, c := range scp.clients {
   134  		clients = append(clients, c)
   135  	}
   136  	return clients
   137  }
   138  
   139  // LightCleanup starts async cleanup on all heavy synchronization clients (per jet cleanup).
   140  //
   141  // Waits until all cleanup will done and mesaures time.
   142  //
   143  // Under hood it uses singleflight on Jet prefix to avoid clashing on the same key space.
   144  func (scp *Pool) LightCleanup(
   145  	ctx context.Context,
   146  	untilPN core.PulseNumber,
   147  	rsp recentstorage.Provider,
   148  	jetIndexesRemoved map[core.RecordID][]core.RecordID,
   149  ) error {
   150  	inslog := inslogger.FromContext(ctx)
   151  	start := time.Now()
   152  	defer func() {
   153  		latency := time.Since(start)
   154  		inslog.Infof("cleanLightData db clean phase time spend=%v", latency)
   155  		stats.Record(ctx, statCleanLatencyDB.M(latency.Nanoseconds()/1e6))
   156  	}()
   157  
   158  	func() {
   159  		startCleanup := time.Now()
   160  		defer func() {
   161  			latency := time.Since(startCleanup)
   162  			inslog.Infof("cleanLightData db clean phase job time spend=%v", latency)
   163  		}()
   164  
   165  		// This is how we can get all jets served on light during it storage lifetime.
   166  		// jets, err := scp.db.GetAllSyncClientJets(ctx)
   167  
   168  		allClients := scp.AllClients(ctx)
   169  		var wg sync.WaitGroup
   170  
   171  		cleanupConcurrency := 8
   172  		sem := make(chan struct{}, cleanupConcurrency)
   173  
   174  		jetPrefixSeen := map[string]struct{}{}
   175  
   176  		for _, c := range allClients {
   177  			jetID := c.jetID
   178  			_, jetPrefix := jet.Jet(jetID)
   179  			prefixKey := string(jetPrefix)
   180  
   181  			_, skipRecordsCleanup := jetPrefixSeen[prefixKey]
   182  			jetPrefixSeen[prefixKey] = struct{}{}
   183  
   184  			// TODO: fill candidates here
   185  			candidates := jetIndexesRemoved[jetID]
   186  
   187  			if (len(candidates) == 0) && skipRecordsCleanup {
   188  				continue
   189  			}
   190  
   191  			wg.Add(1)
   192  			sem <- struct{}{}
   193  			go func() {
   194  				defer func() {
   195  					wg.Done()
   196  					<-sem
   197  				}()
   198  				_, _, _ = scp.cleanupGroup.Do(string(jetPrefix), func() (interface{}, error) {
   199  
   200  					inslogger.FromContext(ctx).Debugf("Start light cleanup, pulse < %v, jet = %v",
   201  						untilPN, jetID.DebugString())
   202  
   203  					if len(candidates) > 0 {
   204  						jetRecentStore := rsp.GetIndexStorage(ctx, jetID)
   205  						idxsRmStat, err := scp.cleaner.CleanJetIndexes(ctx, jetID, jetRecentStore, candidates)
   206  						if err != nil {
   207  							inslogger.FromContext(ctx).Errorf("Error on indexes cleanup (pulse < %v, jet = %v): %v",
   208  								untilPN, jetID.DebugString(), err)
   209  						}
   210  						inslogger.FromContext(ctx).Infof(
   211  							"Indexes light cleanup stat=%#v (pulse < %v, jet = %v)", idxsRmStat, untilPN, jetID.DebugString())
   212  					}
   213  
   214  					if skipRecordsCleanup {
   215  						return nil, nil
   216  					}
   217  
   218  					recsRmStat, err := scp.cleaner.CleanJetRecordsUntilPulse(ctx, jetID, untilPN)
   219  					if err != nil {
   220  						inslogger.FromContext(ctx).Errorf("Error on light cleanup (pulse < %v, jet = %v): %v",
   221  							untilPN, jetID.DebugString(), err)
   222  						return nil, nil
   223  					}
   224  					inslogger.FromContext(ctx).Infof(
   225  						"Records light cleanup, records stat=%#v (pulse < %v, jet = %v)", recsRmStat, untilPN, jetID.DebugString())
   226  					return nil, nil
   227  				})
   228  			}()
   229  		}
   230  		wg.Wait()
   231  	}()
   232  	return nil
   233  }