github.com/fozzysec/SiaPrime@v0.0.0-20190612043147-66c8e8d11fe3/modules/miningpool/miningpool.go (about)

     1  // Package pool is an implementation of the pool module, and is responsible for
     2  // creating a mining pool, accepting incoming potential block solutions and
     3  // rewarding the submitters proportionally for their shares.
     4  package pool
     5  
     6  import (
     7      "bytes"
     8      "encoding/binary"
     9      "encoding/hex"
    10      "errors"
    11      "fmt"
    12      "math/rand"
    13      "net"
    14      "path/filepath"
    15      "sync/atomic"
    16      "time"
    17      "sync"
    18  
    19      "SiaPrime/build"
    20      "SiaPrime/config"
    21      "SiaPrime/crypto"
    22      "SiaPrime/modules"
    23      "SiaPrime/persist"
    24      "SiaPrime/types"
    25      "gitlab.com/NebulousLabs/threadgroup"
    26  
    27      // blank to load the sql driver for mysql
    28      //_ "github.com/go-sql-driver/mysql"
    29      "github.com/go-redis/redis"
    30  )
    31  
    32  var (
    33      // persistMetadata is the header that gets written to the persist file, and is
    34      // used to recognize other persist files.
    35      persistMetadata = persist.Metadata{
    36          Header:  "Sia Pool",
    37          Version: "0.0.1",
    38      }
    39  
    40      // errPoolClosed gets returned when a call is rejected due to the pool
    41      // having been closed.
    42      errPoolClosed = errors.New("call is disabled because the pool is closed")
    43  
    44      // Nil dependency errors.
    45      errNilCS    = errors.New("pool cannot use a nil consensus state")
    46      errNilTpool = errors.New("pool cannot use a nil transaction pool")
    47      errNilGW    = errors.New("pool cannot use a nil gateway")
    48      //	errNilWallet = errors.New("pool cannot use a nil wallet")
    49  
    50      // Required settings to run pool
    51      errNoAddressSet = errors.New("pool operators address must be set")
    52  
    53      running  bool  // indicates if the mining pool is actually running
    54      hashRate int64 // indicates hashes per second
    55      // HeaderMemory is the number of previous calls to 'header'
    56      // that are remembered. Additionally, 'header' will only poll for a
    57      // new block every 'headerMemory / blockMemory' times it is
    58      // called. This reduces the amount of memory used, but comes at the cost of
    59      // not always having the most recent transactions.
    60      HeaderMemory = build.Select(build.Var{
    61          Standard: 10000,
    62          Dev:      500,
    63          Testing:  50,
    64      }).(int)
    65  
    66      // BlockMemory is the maximum number of blocks the miner will store
    67      // Blocks take up to 2 megabytes of memory, which is why this number is
    68      // limited.
    69      BlockMemory = build.Select(build.Var{
    70          Standard: 50,
    71          Dev:      10,
    72          Testing:  5,
    73      }).(int)
    74  
    75      // MaxSourceBlockAge is the maximum amount of time that is allowed to
    76      // elapse between generating source blocks.
    77      MaxSourceBlockAge = build.Select(build.Var{
    78          Standard: 30 * time.Second,
    79          Dev:      5 * time.Second,
    80          Testing:  1 * time.Second,
    81      }).(time.Duration)
    82  
    83      // ShiftDuration is how often we commit mining data to persistent
    84      // storage when a block hasn't been found.
    85      ShiftDuration = build.Select(build.Var{
    86          Standard: 20 * time.Second,
    87          Dev:      20 * time.Second,
    88          Testing:  1 * time.Second,
    89      }).(time.Duration)
    90  )
    91  
    92  // splitSet defines a transaction set that can be added componenet-wise to a
    93  // block. It's split because it doesn't necessarily represent the full set
    94  // prpovided by the transaction pool. Splits can be sorted so that the largest
    95  // and most valuable sets can be selected when picking transactions.
    96  type splitSet struct {
    97      averageFee   types.Currency
    98      size         uint64
    99      transactions []types.Transaction
   100  }
   101  
   102  type splitSetID int
   103  
   104  // A Pool contains all the fields necessary for storing status for clients and
   105  // performing the evaluation and rewarding on submitted shares
   106  type Pool struct {
   107      // BlockManager variables. Becaues blocks are large, one block is used to
   108      // make many headers which can be used by miners. Headers include an
   109      // arbitrary data transaction (appended to the block) to make the merkle
   110      // roots unique (preventing miners from doing redundant work). Every N
   111      // requests or M seconds, a new block is used to create headers.
   112      //
   113      // Only 'blocksMemory' blocks are kept in memory at a time, which
   114      // keeps ram usage reasonable. Miners may request many headers in parallel,
   115      // and thus may be working on different blocks. When they submit the solved
   116      // header to the block manager, the rest of the block needs to be found in
   117      // a lookup.
   118      blockMem        map[types.BlockHeader]*types.Block             // Mappings from headers to the blocks they are derived from.
   119      blockTxns       *txnList                                       // list of transactions that are supposed to be solved in the next block
   120      arbDataMem      map[types.BlockHeader][crypto.EntropySize]byte // Mappings from the headers to their unique arb data.
   121      headerMem       []types.BlockHeader                            // A circular list of headers that have been given out from the api recently.
   122      sourceBlock     types.Block                                    // The block from which new headers for mining are created.
   123      sourceBlockTime time.Time                                      // How long headers have been using the same block (different from 'recent block').
   124      memProgress     int                                            // The index of the most recent header used in headerMem.
   125  
   126      // Transaction pool variables.
   127      fullSets        map[modules.TransactionSetID][]int
   128      blockMapHeap    *mapHeap
   129      overflowMapHeap *mapHeap
   130      setCounter      int
   131      splitSets       map[splitSetID]*splitSet
   132  
   133      // Dependencies.
   134      cs     modules.ConsensusSet
   135      tpool  modules.TransactionPool
   136      wallet modules.Wallet
   137      gw     modules.Gateway
   138      dependencies
   139      modules.StorageManager
   140  
   141      // Pool ACID fields - these fields need to be updated in serial, ACID
   142      // transactions.
   143      announceConfirmed bool
   144      secretKey         crypto.SecretKey
   145      // Pool transient fields - these fields are either determined at startup or
   146      // otherwise are not critical to always be correct.
   147      workingStatus        modules.PoolWorkingStatus
   148      connectabilityStatus modules.PoolConnectabilityStatus
   149  
   150      // Utilities.
   151      //sqldb          *sql.DB
   152      redisdb        map[string]*redis.ClusterClient
   153      listener       net.Listener
   154      log            *persist.Logger
   155      dblog          *persist.Logger
   156      mu             sync.RWMutex //deadlock.RWMutex would cause deadlock error and abort the daemon when large number of miners subscribe to pool in short time
   157      blockFoundMu   sync.Mutex
   158      dbConnectionMu sync.RWMutex
   159      persistDir     string
   160      port           string
   161      tg             threadgroup.ThreadGroup
   162      persist        persistence
   163      dispatcher     *Dispatcher
   164      stratumID      uint64
   165      shiftID        uint64
   166      shiftChan      chan bool
   167      shiftTimestamp time.Time
   168      clients        map[string]*Client //client name to client pointer mapping
   169  
   170      clientSetupMutex map[string]*sync.Mutex
   171      runningMutex     sync.RWMutex
   172      running          bool
   173  }
   174  
   175  // startupRescan will rescan the blockchain in the event that the pool
   176  // persistence layer has become desynchronized from the consensus persistence
   177  // layer. This might happen if a user replaces any of the folders with backups
   178  // or deletes any of the folders.
   179  func (p *Pool) startupRescan() error {
   180      // Reset all of the variables that have relevance to the consensus set. The
   181      // operations are wrapped by an anonymous function so that the locking can
   182      // be handled using a defer statement.
   183      err := func() error {
   184          //		p.log.Debugf("Waiting to lock pool\n")
   185          p.mu.Lock()
   186          defer func() {
   187              //			p.log.Debugf("Unlocking pool\n")
   188              p.mu.Unlock()
   189          }()
   190  
   191          p.log.Println("Performing a pool rescan.")
   192          p.persist.SetRecentChange(modules.ConsensusChangeBeginning)
   193          p.persist.SetBlockHeight(0)
   194          p.persist.SetTarget(types.Target{})
   195          return p.saveSync()
   196      }()
   197      if err != nil {
   198          return err
   199      }
   200  
   201      // Subscribe to the consensus set. This is a blocking call that will not
   202      // return until the pool has fully caught up to the current block.
   203      err = p.cs.ConsensusSetSubscribe(p, modules.ConsensusChangeBeginning, p.tg.StopChan())
   204      if err != nil {
   205          return err
   206      }
   207      p.tg.OnStop(func() error {
   208          p.cs.Unsubscribe(p)
   209          return nil
   210      })
   211      return nil
   212  }
   213  
   214  func (p *Pool) monitorShifts() {
   215      p.shiftChan = make(chan bool, 1)
   216      err := p.tg.Add()
   217      if err != nil {
   218          return
   219      }
   220      defer p.tg.Done()
   221      for {
   222          select {
   223          case <-p.shiftChan:
   224          case <-time.After(ShiftDuration):
   225          case <-p.tg.StopChan():
   226              return
   227          }
   228          //p.log.Debugf("Shift change - end of shift %d\n", p.shiftID)
   229          atomic.AddUint64(&p.shiftID, 1)
   230          p.dispatcher.mu.RLock()
   231          // TODO: switch to batched insert
   232          for _, h := range p.dispatcher.handlers {
   233              //h.mu.RLock()
   234              s := h.GetSession().Shift()
   235              //h.mu.RUnlock()
   236              go func(savingShift *Shift) {
   237                  if savingShift != nil {
   238                      savingShift.SaveShift()
   239                  }
   240              }(s)
   241              sh := p.newShift(h.GetSession().GetCurrentWorker())
   242              h.GetSession().addShift(sh)
   243          }
   244          p.dispatcher.mu.RUnlock()
   245      }
   246  }
   247  
   248  func (p *Pool) startServer() {
   249      p.log.Printf("Waiting for consensus synchronization...\n")
   250      select {
   251          // if we've received the stop message before this can even be spun up, just exit
   252      case <-p.tg.StopChan():
   253          return
   254      default:
   255      }
   256      if err := p.tg.Add(); err != nil {
   257          return
   258      }
   259  
   260      defer p.tg.Done()
   261      for {
   262          if p.cs.Synced() {
   263              // If we're immediately synced upon subscription AND we never got ProcessConsensusChange
   264              // calls (this happens when we start the server and our most recent change was the latest
   265              // block in a synced chain), we will need to look at the top of the chain to set up our
   266              // source block. So let's just always do that upon first sync.
   267              finalBlock := p.cs.CurrentBlock()
   268              parentID := finalBlock.ID()
   269              p.mu.Lock()
   270              p.newSourceBlock()
   271              p.sourceBlock.ParentID = parentID
   272              p.sourceBlock.Timestamp, _ = p.cs.MinimumValidChildTimestamp(parentID)
   273              p.mu.Unlock()
   274  
   275              p.log.Printf("Starting Stratum Server\n")
   276  
   277              port := fmt.Sprintf("%d", p.InternalSettings().PoolNetworkPort)
   278              go p.dispatcher.ListenHandlers(port)
   279              p.tg.OnStop(func() error {
   280                  if p.dispatcher.ln == nil {
   281                      //panic(errors.New("network not opened yet"))
   282                  } else {
   283                      p.dispatcher.ln.Close()
   284                  }
   285                  return nil
   286              })
   287              return
   288          }
   289          time.Sleep(100 * time.Millisecond)
   290      }
   291  }
   292  
   293  // newPool returns an initialized Pool, taking a set of dependencies as input.
   294  // By making the dependencies an argument of the 'new' call, the pool can be
   295  // mocked such that the dependencies can return unexpected errors or unique
   296  // behaviors during testing, enabling easier testing of the failure modes of
   297  // the Pool.
   298  func newPool(dependencies dependencies, cs modules.ConsensusSet, tpool modules.TransactionPool, gw modules.Gateway, wallet modules.Wallet, persistDir string, initConfig config.MiningPoolConfig) (*Pool, error) {
   299      // Check that all the dependencies were provided.
   300      if cs == nil {
   301          return nil, errNilCS
   302      }
   303      if tpool == nil {
   304          return nil, errNilTpool
   305      }
   306      if gw == nil {
   307          return nil, errNilGW
   308      }
   309  
   310      // Create the pool object.
   311      p := &Pool{
   312          cs:           cs,
   313          tpool:        tpool,
   314          gw:           gw,
   315          wallet:       wallet,
   316          dependencies: dependencies,
   317  
   318          blockMem:   make(map[types.BlockHeader]*types.Block),
   319          blockTxns:  newTxnList(),
   320          arbDataMem: make(map[types.BlockHeader][crypto.EntropySize]byte),
   321          headerMem:  make([]types.BlockHeader, HeaderMemory),
   322  
   323          fullSets:  make(map[modules.TransactionSetID][]int),
   324          splitSets: make(map[splitSetID]*splitSet),
   325          blockMapHeap: &mapHeap{
   326              selectID: make(map[splitSetID]*mapElement),
   327              data:     nil,
   328              minHeap:  true,
   329          },
   330          overflowMapHeap: &mapHeap{
   331              selectID: make(map[splitSetID]*mapElement),
   332              data:     nil,
   333              minHeap:  false,
   334          },
   335  
   336          persistDir: 		persistDir,
   337          //stratumID:  		rand.Uint64(),
   338          clients:    		make(map[string]*Client),
   339          redisdb:            make(map[string]*redis.ClusterClient),
   340          clientSetupMutex:	make(map[string]*sync.Mutex),
   341      }
   342      var err error
   343      atomic.StoreUint64(&p.stratumID, rand.Uint64())
   344  
   345      // Create the perist directory if it does not yet exist.
   346      err = dependencies.mkdirAll(p.persistDir, 0700)
   347      if err != nil {
   348          return nil, err
   349      }
   350  
   351      // Initialize the logger, and set up the stop call that will close the
   352      // logger.
   353      // fmt.Println("log path:", filepath.Join(p.persistDir, logFile))
   354      p.log, err = dependencies.newLogger(filepath.Join(p.persistDir, logFile))
   355      if err != nil {
   356          return nil, err
   357      }
   358  
   359      p.dblog, err = dependencies.newLogger(filepath.Join(p.persistDir, dblogFile))
   360      if err != nil {
   361          return nil, err
   362      }
   363  
   364      p.tg.AfterStop(func() error {
   365          err = p.log.Close()
   366          if err != nil {
   367              // State of the logger is uncertain, a Println will have to
   368              // suffice.
   369              fmt.Println("Error when closing the logger:", err)
   370          }
   371          return err
   372      })
   373  
   374      // Load the prior persistence structures, and configure the pool to save
   375      // before shutting down.
   376      err = p.load()
   377      if err != nil {
   378          return nil, err
   379      }
   380      p.setPoolSettings(initConfig)
   381  
   382      p.tg.AfterStop(func() error {
   383          p.mu.Lock()
   384          err = p.saveSync()
   385          p.mu.Unlock()
   386          if err != nil {
   387              fmt.Println("Could not save pool upon shutdown:", err)
   388          }
   389          return err
   390      })
   391  
   392      p.newDbConnection()
   393  
   394      // clean old worker records for this stratum server just in case we didn't
   395      // shutdown cleanly
   396      err = p.DeleteAllWorkerRecords()
   397      if err != nil {
   398          return nil, errors.New("Failed to clean database: " + err.Error())
   399      }
   400  
   401      p.tg.OnStop(func() error {
   402          p.DeleteAllWorkerRecords()
   403          p.closeAllDB()
   404          return nil
   405      })
   406  
   407      // grab our consensus set data
   408      err = p.cs.ConsensusSetSubscribe(p, p.persist.RecentChange, p.tg.StopChan())
   409      if err == modules.ErrInvalidConsensusChangeID {
   410          // Perform a rescan of the consensus set if the change id is not found.
   411          // The id will only be not found if there has been desynchronization
   412          // between the miner and the consensus package.
   413          err = p.startupRescan()
   414          if err != nil {
   415              return nil, errors.New("mining pool startup failed - rescanning failed: " + err.Error())
   416          }
   417      } else if err != nil {
   418          return nil, errors.New("mining pool subscription failed: " + err.Error())
   419      }
   420  
   421      // spin up a go routine to handle shift changes.
   422      go p.monitorShifts()
   423  
   424      p.tg.OnStop(func() error {
   425          p.cs.Unsubscribe(p)
   426          return nil
   427      })
   428  
   429      p.tpool.TransactionPoolSubscribe(p)
   430      p.tg.OnStop(func() error {
   431          p.tpool.Unsubscribe(p)
   432          return nil
   433      })
   434  
   435      p.runningMutex.Lock()
   436      p.dispatcher = &Dispatcher{handlers: make(map[string]*Handler), mu: sync.RWMutex{}, p: p}
   437      p.dispatcher.log, _ = dependencies.newLogger(filepath.Join(p.persistDir, "stratum.log"))
   438      p.running = true
   439      p.runningMutex.Unlock()
   440  
   441      go p.startServer()
   442  
   443      return p, nil
   444  }
   445  
   446  // New returns an initialized Pool.
   447  func New(cs modules.ConsensusSet, tpool modules.TransactionPool, gw modules.Gateway, wallet modules.Wallet, persistDir string, initConfig config.MiningPoolConfig) (*Pool, error) {
   448      return newPool(productionDependencies{}, cs, tpool, gw, wallet, persistDir, initConfig)
   449  }
   450  
   451  // Close shuts down the pool.
   452  func (p *Pool) Close() error {
   453      p.log.Println("Closing pool")
   454      //defer func () {}()
   455      return p.tg.Stop()
   456  }
   457  
   458  // SetInternalSettings updates the pool's internal PoolInternalSettings object.
   459  func (p *Pool) SetInternalSettings(settings modules.PoolInternalSettings) error {
   460      p.mu.Lock()
   461      defer p.mu.Unlock()
   462  
   463      err := p.tg.Add()
   464      if err != nil {
   465          return err
   466      }
   467      defer p.tg.Done()
   468  
   469      // The pool should not be open for business if it does not have an
   470      // unlock hash.
   471      err = p.checkAddress()
   472      if err != nil {
   473          return errors.New("internal settings not updated, no operator wallet set: " + err.Error())
   474      }
   475  
   476      p.persist.SetSettings(settings)
   477      p.persist.SetRevisionNumber(p.persist.GetRevisionNumber() + 1)
   478  
   479      err = p.saveSync()
   480      if err != nil {
   481          return errors.New("internal settings updated, but failed saving to disk: " + err.Error())
   482      }
   483      return nil
   484  }
   485  
   486  // InternalSettings returns the settings of a pool.
   487  func (p *Pool) InternalSettings() modules.PoolInternalSettings {
   488      return p.persist.GetSettings()
   489  }
   490  
   491  // checkAddress checks that the miner has an address, fetching an address from
   492  // the wallet if not.
   493  func (p *Pool) checkAddress() error {
   494      if p.InternalSettings().PoolWallet == (types.UnlockHash{}) {
   495          return errNoAddressSet
   496      }
   497      return nil
   498  }
   499  
   500  // Client returns the client with the specified name that has been stored in
   501  // memory
   502  func (p *Pool) Client(name string) *Client {
   503      p.mu.RLock()
   504      defer p.mu.RUnlock()
   505  
   506      return p.clients[name]
   507  }
   508  
   509  // AddClient stores the client with the specified name into memory
   510  func (p *Pool) AddClient(c *Client) {
   511      p.mu.Lock()
   512      defer p.mu.Unlock()
   513  
   514      p.clients[c.Name()] = c
   515  
   516  }
   517  
   518  // newStratumID returns a function pointer to a unique ID generator used
   519  // for more the unique IDs within the Stratum protocol
   520  func (p *Pool) newStratumID() (f func() uint64) {
   521      f = func() uint64 {
   522          var i uint64
   523          for {
   524             i = atomic.LoadUint64(&p.stratumID)
   525             if atomic.CompareAndSwapUint64(&p.stratumID, i, i+1) {
   526                 return i+1
   527             }
   528          }
   529      }
   530      return
   531  }
   532  
   533  func (p *Pool) coinB1() types.Transaction {
   534      s := fmt.Sprintf("\000     Software: siad-miningpool-module v%d.%02d\nPool name: \"%s\"     \000", MajorVersion, MinorVersion, p.InternalSettings().PoolName)
   535      //s := fmt.Sprintf("\000     Software: siad-miningpool-module v%d.%02d\nPool name: \"%s\"     \000", MajorVersion, MinorVersion, PoolName)
   536      if ((len(modules.PrefixNonSia[:]) + len(s)) % 2) != 0 {
   537          // odd length, add extra null
   538          s = s + "\000"
   539      }
   540      cb := make([]byte, len(modules.PrefixNonSia[:])+len(s)) // represents the bytes appended later
   541      n := copy(cb, modules.PrefixNonSia[:])
   542      copy(cb[n:], s)
   543      return types.Transaction{
   544          ArbitraryData: [][]byte{cb},
   545      }
   546  }
   547  
   548  func (p *Pool) coinB1Txn() string {
   549      coinbaseTxn := p.coinB1()
   550      buf := new(bytes.Buffer)
   551      coinbaseTxn.MarshalSiaNoSignatures(buf)
   552      b := buf.Bytes()
   553      // extranonce1 and extranonce2 are 4 bytes each, and they will be part of the
   554      // arbitrary transaction via the arbitrary data field. when the arbitrary data
   555      // field is marshalled, the length of the arbitrary data must be specified. thus we
   556      // leave 8 bytes for the necessary 2 extranonce fields. The position here is determined
   557      // by the format specified in the MarshalSiaNoSignatures function.
   558      binary.LittleEndian.PutUint64(b[72:87], binary.LittleEndian.Uint64(b[72:87])+8)
   559      return hex.EncodeToString(b)
   560  }
   561  
   562  func (p *Pool) coinB2() string {
   563      return "0000000000000000"
   564  }
   565  
   566  // NumConnections returns the number of tcp connections from clients the pool
   567  // currently has open
   568  func (p *Pool) NumConnections() int {
   569      p.runningMutex.RLock()
   570      defer p.runningMutex.RUnlock()
   571      if p.running {
   572          return p.dispatcher.NumConnections()
   573      }
   574      return 0
   575  }
   576  
   577  // NumConnectionsOpened returns the total number of tcp connections from clients the
   578  // pool has opened since startup
   579  func (p *Pool) NumConnectionsOpened() uint64 {
   580      p.runningMutex.RLock()
   581      defer p.runningMutex.RUnlock()
   582      if p.running {
   583          return p.dispatcher.NumConnectionsOpened()
   584      }
   585      return 0
   586  }