github.com/amazechain/amc@v0.1.3/internal/download/fetchers.go (about)

     1  // Copyright 2022 The AmazeChain Authors
     2  // This file is part of the AmazeChain library.
     3  //
     4  // The AmazeChain library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The AmazeChain library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the AmazeChain library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package download
    18  
    19  import (
    20  	"github.com/amazechain/amc/utils"
    21  	"github.com/holiman/uint256"
    22  	"google.golang.org/protobuf/proto"
    23  	"math/rand"
    24  	"time"
    25  
    26  	"github.com/amazechain/amc/api/protocol/sync_proto"
    27  	"github.com/amazechain/amc/common/message"
    28  	"github.com/amazechain/amc/log"
    29  )
    30  
    31  // fetchHeaders
    32  func (d *Downloader) fetchHeaders(from uint256.Int, latest uint256.Int) error {
    33  
    34  	// 1. create task
    35  	begin := new(uint256.Int).AddUint64(&from, 1)
    36  	difference := new(uint256.Int).Sub(&latest, begin)
    37  	tasks := new(uint256.Int).Add(new(uint256.Int).Div(difference, uint256.NewInt(maxHeaderFetch)), uint256.NewInt(1))
    38  	//
    39  	log.Infof("Starting header downloads from: %v latest: %v difference: %v task: %v", begin.Uint64(), latest.Uint64(), difference.Uint64(), tasks.Uint64())
    40  	defer log.Infof("Header download Finished")
    41  
    42  	d.headerTaskLock.Lock()
    43  	for i := 1; i <= int(tasks.Uint64()); i++ {
    44  		taskID := rand.Uint64()
    45  		d.headerTasks = append(d.headerTasks, Task{
    46  			taskID:     taskID,
    47  			IndexBegin: *begin,
    48  			//IndexEnd:   *types.Int256Min(uint256.NewInt(0).Sub(uint256.NewInt(0).Add(&from, uint256.NewInt(maxHeaderFetch)), uint256.NewInt(1)), &latest),
    49  			IsSync: false,
    50  		})
    51  		begin = begin.Add(begin, uint256.NewInt(maxHeaderFetch))
    52  	}
    53  	d.headerTaskLock.Unlock()
    54  
    55  	tick := time.NewTicker(syncPeerIntervalRequest)
    56  	defer tick.Stop()
    57  
    58  	for {
    59  		log.Tracef("header tasks count is %v, header processing tasks count is: %v", len(d.headerTasks), len(d.headerProcessingTasks))
    60  
    61  		if len(d.headerTasks) == 0 && len(d.headerProcessingTasks) == 0 { // break if there are no tasks
    62  			break
    63  		}
    64  
    65  		peerSet := d.peersInfo.findPeers(&latest, syncPeerCount)
    66  		d.headerTaskLock.Lock()
    67  		if len(d.headerTasks) > 0 {
    68  			for _, p := range peerSet {
    69  				randIndex := 0
    70  				randTask := d.headerTasks[randIndex]
    71  
    72  				//
    73  				var fetchCount uint64
    74  				if latest.Uint64()-randTask.IndexBegin.Uint64() >= maxHeaderFetch-1 {
    75  					fetchCount = maxHeaderFetch
    76  				} else {
    77  					fetchCount = latest.Uint64() - randTask.IndexBegin.Uint64() + 1
    78  				}
    79  
    80  				msg := &sync_proto.SyncTask{
    81  					Id:       randTask.taskID,
    82  					SyncType: sync_proto.SyncType_HeaderReq,
    83  					Payload: &sync_proto.SyncTask_SyncHeaderRequest{
    84  						SyncHeaderRequest: &sync_proto.SyncHeaderRequest{
    85  							Number: utils.ConvertUint256IntToH256(&randTask.IndexBegin),
    86  							Amount: utils.ConvertUint256IntToH256(uint256.NewInt(fetchCount)),
    87  						},
    88  					},
    89  				}
    90  				payload, _ := proto.Marshal(msg)
    91  				err := p.WriteMsg(message.MsgDownloader, payload)
    92  
    93  				if err == nil {
    94  					randTask.TimeBegin = time.Now()
    95  					d.headerTasks = append(d.headerTasks[:randIndex], d.headerTasks[randIndex+1:]...)
    96  					d.headerProcessingTasks[randTask.taskID] = randTask
    97  					if len(d.headerTasks) == 0 {
    98  						break
    99  					}
   100  				} else {
   101  					log.Errorf("send sync request message to peer %v err is %v", p.ID(), err)
   102  				}
   103  			}
   104  		}
   105  
   106  		// If it times out, put the task back
   107  		if len(d.headerProcessingTasks) > 0 {
   108  			for taskID, task := range d.headerProcessingTasks {
   109  				if time.Since(task.TimeBegin) > syncTimeOutPerRequest {
   110  					delete(d.headerProcessingTasks, taskID)
   111  					task.TimeBegin = time.Now()
   112  					d.headerTasks = append(d.headerTasks, task)
   113  				}
   114  			}
   115  		}
   116  		d.headerTaskLock.Unlock()
   117  
   118  		select {
   119  		case <-d.ctx.Done():
   120  			return ErrCanceled
   121  		case <-tick.C:
   122  			tick.Reset(syncPeerIntervalRequest)
   123  			continue
   124  		}
   125  	}
   126  
   127  	return nil
   128  }
   129  
   130  // fetchHeaders
   131  func (d *Downloader) fetchBodies(latest uint256.Int) error {
   132  
   133  	defer log.Info("Bodies download Finished")
   134  
   135  	tick := time.NewTicker(syncPeerIntervalRequest)
   136  	defer tick.Stop()
   137  	startProcess := false
   138  
   139  	for {
   140  		// If there are unprocessed tasks
   141  		peerSet := d.peersInfo.findPeers(&latest, syncPeerCount)
   142  
   143  		log.Tracef("downloader body task count is %d, processing task count is %d", len(d.bodyTaskPool), len(d.bodyProcessingTasks))
   144  		if startProcess && len(d.bodyTaskPool) == 0 && len(d.bodyProcessingTasks) == 0 {
   145  			return nil
   146  		}
   147  
   148  		d.once.Do(func() {
   149  			log.Info("Starting body downloads")
   150  			startProcess = true
   151  		})
   152  
   153  		d.bodyTaskPoolLock.Lock()
   154  		if len(d.bodyTaskPool) > 0 {
   155  			for _, p := range peerSet {
   156  				// first task
   157  				randIndex := 0
   158  				randTask := d.bodyTaskPool[randIndex]
   159  
   160  				msg := &sync_proto.SyncTask{
   161  					Id:       randTask.taskID,
   162  					SyncType: sync_proto.SyncType_BodyReq,
   163  					Payload: &sync_proto.SyncTask_SyncBlockRequest{
   164  						SyncBlockRequest: &sync_proto.SyncBlockRequest{
   165  							Number: utils.Uint256sToH256(randTask.number), //todo task pool
   166  						},
   167  					},
   168  				}
   169  				payload, _ := proto.Marshal(msg)
   170  				err := p.WriteMsg(message.MsgDownloader, payload)
   171  				if err == nil {
   172  					d.bodyTaskPool = append(d.bodyTaskPool[:randIndex], d.bodyTaskPool[randIndex+1:]...)
   173  					d.bodyProcessingTasks[randTask.taskID] = randTask
   174  					// finish
   175  					if len(d.bodyTaskPool) == 0 {
   176  						break
   177  					}
   178  				} else {
   179  					log.Errorf("send sync request message to peer %v err is %v", p.ID(), err)
   180  				}
   181  
   182  			}
   183  		}
   184  		d.bodyTaskPoolLock.Unlock()
   185  
   186  		select {
   187  		case <-d.ctx.Done():
   188  			return ErrCanceled
   189  
   190  		case <-tick.C:
   191  			tick.Reset(syncPeerIntervalRequest)
   192  			continue
   193  		}
   194  	}
   195  }