github.com/jimmyx0x/go-ethereum@v1.10.28/les/vflux/client/serverpool.go (about) 1 // Copyright 2021 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package client 18 19 import ( 20 "errors" 21 "math/rand" 22 "reflect" 23 "sync" 24 "sync/atomic" 25 "time" 26 27 "github.com/ethereum/go-ethereum/common/mclock" 28 "github.com/ethereum/go-ethereum/ethdb" 29 "github.com/ethereum/go-ethereum/les/utils" 30 "github.com/ethereum/go-ethereum/log" 31 "github.com/ethereum/go-ethereum/metrics" 32 "github.com/ethereum/go-ethereum/p2p/enode" 33 "github.com/ethereum/go-ethereum/p2p/enr" 34 "github.com/ethereum/go-ethereum/p2p/nodestate" 35 "github.com/ethereum/go-ethereum/rlp" 36 ) 37 38 const ( 39 minTimeout = time.Millisecond * 500 // minimum request timeout suggested by the server pool 40 timeoutRefresh = time.Second * 5 // recalculate timeout if older than this 41 dialCost = 10000 // cost of a TCP dial (used for known node selection weight calculation) 42 dialWaitStep = 1.5 // exponential multiplier of redial wait time when no value was provided by the server 43 queryCost = 500 // cost of a UDP pre-negotiation query 44 queryWaitStep = 1.02 // exponential multiplier of redial wait time when no value was provided by the server 45 waitThreshold = time.Hour * 2000 // drop node if waiting time is over the threshold 46 nodeWeightMul = 1000000 // multiplier constant for node weight calculation 47 nodeWeightThreshold = 100 // minimum weight for keeping a node in the known (valuable) set 48 minRedialWait = 10 // minimum redial wait time in seconds 49 preNegLimit = 5 // maximum number of simultaneous pre-negotiation queries 50 warnQueryFails = 20 // number of consecutive UDP query failures before we print a warning 51 maxQueryFails = 100 // number of consecutive UDP query failures when then chance of skipping a query reaches 50% 52 ) 53 54 // ServerPool provides a node iterator for dial candidates. The output is a mix of newly discovered 55 // nodes, a weighted random selection of known (previously valuable) nodes and trusted/paid nodes. 56 type ServerPool struct { 57 clock mclock.Clock 58 unixTime func() int64 59 db ethdb.KeyValueStore 60 61 ns *nodestate.NodeStateMachine 62 vt *ValueTracker 63 mixer *enode.FairMix 64 mixSources []enode.Iterator 65 dialIterator enode.Iterator 66 validSchemes enr.IdentityScheme 67 trustedURLs []string 68 fillSet *FillSet 69 started, queryFails uint32 70 71 timeoutLock sync.RWMutex 72 timeout time.Duration 73 timeWeights ResponseTimeWeights 74 timeoutRefreshed mclock.AbsTime 75 76 suggestedTimeoutGauge, totalValueGauge metrics.Gauge 77 sessionValueMeter metrics.Meter 78 } 79 80 // nodeHistory keeps track of dial costs which determine node weight together with the 81 // service value calculated by ValueTracker. 82 type nodeHistory struct { 83 dialCost utils.ExpiredValue 84 redialWaitStart, redialWaitEnd int64 // unix time (seconds) 85 } 86 87 type nodeHistoryEnc struct { 88 DialCost utils.ExpiredValue 89 RedialWaitStart, RedialWaitEnd uint64 90 } 91 92 // QueryFunc sends a pre-negotiation query and blocks until a response arrives or timeout occurs. 93 // It returns 1 if the remote node has confirmed that connection is possible, 0 if not 94 // possible and -1 if no response arrived (timeout). 95 type QueryFunc func(*enode.Node) int 96 97 var ( 98 clientSetup = &nodestate.Setup{Version: 2} 99 sfHasValue = clientSetup.NewPersistentFlag("hasValue") 100 sfQuery = clientSetup.NewFlag("query") 101 sfCanDial = clientSetup.NewFlag("canDial") 102 sfDialing = clientSetup.NewFlag("dialed") 103 sfWaitDialTimeout = clientSetup.NewFlag("dialTimeout") 104 sfConnected = clientSetup.NewFlag("connected") 105 sfRedialWait = clientSetup.NewFlag("redialWait") 106 sfAlwaysConnect = clientSetup.NewFlag("alwaysConnect") 107 sfDialProcess = nodestate.MergeFlags(sfQuery, sfCanDial, sfDialing, sfConnected, sfRedialWait) 108 109 sfiNodeHistory = clientSetup.NewPersistentField("nodeHistory", reflect.TypeOf(nodeHistory{}), 110 func(field interface{}) ([]byte, error) { 111 if n, ok := field.(nodeHistory); ok { 112 ne := nodeHistoryEnc{ 113 DialCost: n.dialCost, 114 RedialWaitStart: uint64(n.redialWaitStart), 115 RedialWaitEnd: uint64(n.redialWaitEnd), 116 } 117 enc, err := rlp.EncodeToBytes(&ne) 118 return enc, err 119 } 120 return nil, errors.New("invalid field type") 121 }, 122 func(enc []byte) (interface{}, error) { 123 var ne nodeHistoryEnc 124 err := rlp.DecodeBytes(enc, &ne) 125 n := nodeHistory{ 126 dialCost: ne.DialCost, 127 redialWaitStart: int64(ne.RedialWaitStart), 128 redialWaitEnd: int64(ne.RedialWaitEnd), 129 } 130 return n, err 131 }, 132 ) 133 sfiNodeWeight = clientSetup.NewField("nodeWeight", reflect.TypeOf(uint64(0))) 134 sfiConnectedStats = clientSetup.NewField("connectedStats", reflect.TypeOf(ResponseTimeStats{})) 135 sfiLocalAddress = clientSetup.NewPersistentField("localAddress", reflect.TypeOf(&enr.Record{}), 136 func(field interface{}) ([]byte, error) { 137 if enr, ok := field.(*enr.Record); ok { 138 enc, err := rlp.EncodeToBytes(enr) 139 return enc, err 140 } 141 return nil, errors.New("invalid field type") 142 }, 143 func(enc []byte) (interface{}, error) { 144 var enr enr.Record 145 if err := rlp.DecodeBytes(enc, &enr); err != nil { 146 return nil, err 147 } 148 return &enr, nil 149 }, 150 ) 151 ) 152 153 // NewServerPool creates a new server pool 154 func NewServerPool(db ethdb.KeyValueStore, dbKey []byte, mixTimeout time.Duration, query QueryFunc, clock mclock.Clock, trustedURLs []string, requestList []RequestInfo) (*ServerPool, enode.Iterator) { 155 s := &ServerPool{ 156 db: db, 157 clock: clock, 158 unixTime: func() int64 { return time.Now().Unix() }, 159 validSchemes: enode.ValidSchemes, 160 trustedURLs: trustedURLs, 161 vt: NewValueTracker(db, &mclock.System{}, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000)), 162 ns: nodestate.NewNodeStateMachine(db, []byte(string(dbKey)+"ns:"), clock, clientSetup), 163 } 164 s.recalTimeout() 165 s.mixer = enode.NewFairMix(mixTimeout) 166 knownSelector := NewWrsIterator(s.ns, sfHasValue, sfDialProcess, sfiNodeWeight) 167 alwaysConnect := NewQueueIterator(s.ns, sfAlwaysConnect, sfDialProcess, true, nil) 168 s.mixSources = append(s.mixSources, knownSelector) 169 s.mixSources = append(s.mixSources, alwaysConnect) 170 171 s.dialIterator = s.mixer 172 if query != nil { 173 s.dialIterator = s.addPreNegFilter(s.dialIterator, query) 174 } 175 176 s.ns.SubscribeState(nodestate.MergeFlags(sfWaitDialTimeout, sfConnected), func(n *enode.Node, oldState, newState nodestate.Flags) { 177 if oldState.Equals(sfWaitDialTimeout) && newState.IsEmpty() { 178 // dial timeout, no connection 179 s.setRedialWait(n, dialCost, dialWaitStep) 180 s.ns.SetStateSub(n, nodestate.Flags{}, sfDialing, 0) 181 } 182 }) 183 184 return s, &serverPoolIterator{ 185 dialIterator: s.dialIterator, 186 nextFn: func(node *enode.Node) { 187 s.ns.Operation(func() { 188 s.ns.SetStateSub(node, sfDialing, sfCanDial, 0) 189 s.ns.SetStateSub(node, sfWaitDialTimeout, nodestate.Flags{}, time.Second*10) 190 }) 191 }, 192 nodeFn: s.DialNode, 193 } 194 } 195 196 type serverPoolIterator struct { 197 dialIterator enode.Iterator 198 nextFn func(*enode.Node) 199 nodeFn func(*enode.Node) *enode.Node 200 } 201 202 // Next implements enode.Iterator 203 func (s *serverPoolIterator) Next() bool { 204 if s.dialIterator.Next() { 205 s.nextFn(s.dialIterator.Node()) 206 return true 207 } 208 return false 209 } 210 211 // Node implements enode.Iterator 212 func (s *serverPoolIterator) Node() *enode.Node { 213 return s.nodeFn(s.dialIterator.Node()) 214 } 215 216 // Close implements enode.Iterator 217 func (s *serverPoolIterator) Close() { 218 s.dialIterator.Close() 219 } 220 221 // AddMetrics adds metrics to the server pool. Should be called before Start(). 222 func (s *ServerPool) AddMetrics( 223 suggestedTimeoutGauge, totalValueGauge, serverSelectableGauge, serverConnectedGauge metrics.Gauge, 224 sessionValueMeter, serverDialedMeter metrics.Meter) { 225 s.suggestedTimeoutGauge = suggestedTimeoutGauge 226 s.totalValueGauge = totalValueGauge 227 s.sessionValueMeter = sessionValueMeter 228 if serverSelectableGauge != nil { 229 s.ns.AddLogMetrics(sfHasValue, sfDialProcess, "selectable", nil, nil, serverSelectableGauge) 230 } 231 if serverDialedMeter != nil { 232 s.ns.AddLogMetrics(sfDialing, nodestate.Flags{}, "dialed", serverDialedMeter, nil, nil) 233 } 234 if serverConnectedGauge != nil { 235 s.ns.AddLogMetrics(sfConnected, nodestate.Flags{}, "connected", nil, nil, serverConnectedGauge) 236 } 237 } 238 239 // AddSource adds a node discovery source to the server pool (should be called before start) 240 func (s *ServerPool) AddSource(source enode.Iterator) { 241 if source != nil { 242 s.mixSources = append(s.mixSources, source) 243 } 244 } 245 246 // addPreNegFilter installs a node filter mechanism that performs a pre-negotiation query. 247 // Nodes that are filtered out and does not appear on the output iterator are put back 248 // into redialWait state. 249 func (s *ServerPool) addPreNegFilter(input enode.Iterator, query QueryFunc) enode.Iterator { 250 s.fillSet = NewFillSet(s.ns, input, sfQuery) 251 s.ns.SubscribeState(sfDialProcess, func(n *enode.Node, oldState, newState nodestate.Flags) { 252 if !newState.Equals(sfQuery) { 253 if newState.HasAll(sfQuery) { 254 // remove query flag if the node is already somewhere in the dial process 255 s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0) 256 } 257 return 258 } 259 fails := atomic.LoadUint32(&s.queryFails) 260 failMax := fails 261 if failMax > maxQueryFails { 262 failMax = maxQueryFails 263 } 264 if rand.Intn(maxQueryFails*2) < int(failMax) { 265 // skip pre-negotiation with increasing chance, max 50% 266 // this ensures that the client can operate even if UDP is not working at all 267 s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10) 268 // set canDial before resetting queried so that FillSet will not read more 269 // candidates unnecessarily 270 s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0) 271 return 272 } 273 go func() { 274 q := query(n) 275 if q == -1 { 276 atomic.AddUint32(&s.queryFails, 1) 277 fails++ 278 if fails%warnQueryFails == 0 { 279 // warn if a large number of consecutive queries have failed 280 log.Warn("UDP connection queries failed", "count", fails) 281 } 282 } else { 283 atomic.StoreUint32(&s.queryFails, 0) 284 } 285 s.ns.Operation(func() { 286 // we are no longer running in the operation that the callback belongs to, start a new one because of setRedialWait 287 if q == 1 { 288 s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10) 289 } else { 290 s.setRedialWait(n, queryCost, queryWaitStep) 291 } 292 s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0) 293 }) 294 }() 295 }) 296 return NewQueueIterator(s.ns, sfCanDial, nodestate.Flags{}, false, func(waiting bool) { 297 if waiting { 298 s.fillSet.SetTarget(preNegLimit) 299 } else { 300 s.fillSet.SetTarget(0) 301 } 302 }) 303 } 304 305 // Start starts the server pool. Note that NodeStateMachine should be started first. 306 func (s *ServerPool) Start() { 307 s.ns.Start() 308 for _, iter := range s.mixSources { 309 // add sources to mixer at startup because the mixer instantly tries to read them 310 // which should only happen after NodeStateMachine has been started 311 s.mixer.AddSource(iter) 312 } 313 for _, url := range s.trustedURLs { 314 if node, err := enode.Parse(s.validSchemes, url); err == nil { 315 s.ns.SetState(node, sfAlwaysConnect, nodestate.Flags{}, 0) 316 } else { 317 log.Error("Invalid trusted server URL", "url", url, "error", err) 318 } 319 } 320 unixTime := s.unixTime() 321 s.ns.Operation(func() { 322 s.ns.ForEach(sfHasValue, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { 323 s.calculateWeight(node) 324 if n, ok := s.ns.GetField(node, sfiNodeHistory).(nodeHistory); ok && n.redialWaitEnd > unixTime { 325 wait := n.redialWaitEnd - unixTime 326 lastWait := n.redialWaitEnd - n.redialWaitStart 327 if wait > lastWait { 328 // if the time until expiration is larger than the last suggested 329 // waiting time then the system clock was probably adjusted 330 wait = lastWait 331 } 332 s.ns.SetStateSub(node, sfRedialWait, nodestate.Flags{}, time.Duration(wait)*time.Second) 333 } 334 }) 335 }) 336 atomic.StoreUint32(&s.started, 1) 337 } 338 339 // Stop stops the server pool 340 func (s *ServerPool) Stop() { 341 if s.fillSet != nil { 342 s.fillSet.Close() 343 } 344 s.ns.Operation(func() { 345 s.ns.ForEach(sfConnected, nodestate.Flags{}, func(n *enode.Node, state nodestate.Flags) { 346 // recalculate weight of connected nodes in order to update hasValue flag if necessary 347 s.calculateWeight(n) 348 }) 349 }) 350 s.ns.Stop() 351 s.vt.Stop() 352 } 353 354 // RegisterNode implements serverPeerSubscriber 355 func (s *ServerPool) RegisterNode(node *enode.Node) (*NodeValueTracker, error) { 356 if atomic.LoadUint32(&s.started) == 0 { 357 return nil, errors.New("server pool not started yet") 358 } 359 nvt := s.vt.Register(node.ID()) 360 s.ns.Operation(func() { 361 s.ns.SetStateSub(node, sfConnected, sfDialing.Or(sfWaitDialTimeout), 0) 362 s.ns.SetFieldSub(node, sfiConnectedStats, nvt.RtStats()) 363 if node.IP().IsLoopback() { 364 s.ns.SetFieldSub(node, sfiLocalAddress, node.Record()) 365 } 366 }) 367 return nvt, nil 368 } 369 370 // UnregisterNode implements serverPeerSubscriber 371 func (s *ServerPool) UnregisterNode(node *enode.Node) { 372 s.ns.Operation(func() { 373 s.setRedialWait(node, dialCost, dialWaitStep) 374 s.ns.SetStateSub(node, nodestate.Flags{}, sfConnected, 0) 375 s.ns.SetFieldSub(node, sfiConnectedStats, nil) 376 }) 377 s.vt.Unregister(node.ID()) 378 } 379 380 // recalTimeout calculates the current recommended timeout. This value is used by 381 // the client as a "soft timeout" value. It also affects the service value calculation 382 // of individual nodes. 383 func (s *ServerPool) recalTimeout() { 384 // Use cached result if possible, avoid recalculating too frequently. 385 s.timeoutLock.RLock() 386 refreshed := s.timeoutRefreshed 387 s.timeoutLock.RUnlock() 388 now := s.clock.Now() 389 if refreshed != 0 && time.Duration(now-refreshed) < timeoutRefresh { 390 return 391 } 392 // Cached result is stale, recalculate a new one. 393 rts := s.vt.RtStats() 394 395 // Add a fake statistic here. It is an easy way to initialize with some 396 // conservative values when the database is new. As soon as we have a 397 // considerable amount of real stats this small value won't matter. 398 rts.Add(time.Second*2, 10, s.vt.StatsExpFactor()) 399 400 // Use either 10% failure rate timeout or twice the median response time 401 // as the recommended timeout. 402 timeout := minTimeout 403 if t := rts.Timeout(0.1); t > timeout { 404 timeout = t 405 } 406 if t := rts.Timeout(0.5) * 2; t > timeout { 407 timeout = t 408 } 409 s.timeoutLock.Lock() 410 if s.timeout != timeout { 411 s.timeout = timeout 412 s.timeWeights = TimeoutWeights(s.timeout) 413 414 if s.suggestedTimeoutGauge != nil { 415 s.suggestedTimeoutGauge.Update(int64(s.timeout / time.Millisecond)) 416 } 417 if s.totalValueGauge != nil { 418 s.totalValueGauge.Update(int64(rts.Value(s.timeWeights, s.vt.StatsExpFactor()))) 419 } 420 } 421 s.timeoutRefreshed = now 422 s.timeoutLock.Unlock() 423 } 424 425 // GetTimeout returns the recommended request timeout. 426 func (s *ServerPool) GetTimeout() time.Duration { 427 s.recalTimeout() 428 s.timeoutLock.RLock() 429 defer s.timeoutLock.RUnlock() 430 return s.timeout 431 } 432 433 // getTimeoutAndWeight returns the recommended request timeout as well as the 434 // response time weight which is necessary to calculate service value. 435 func (s *ServerPool) getTimeoutAndWeight() (time.Duration, ResponseTimeWeights) { 436 s.recalTimeout() 437 s.timeoutLock.RLock() 438 defer s.timeoutLock.RUnlock() 439 return s.timeout, s.timeWeights 440 } 441 442 // addDialCost adds the given amount of dial cost to the node history and returns the current 443 // amount of total dial cost 444 func (s *ServerPool) addDialCost(n *nodeHistory, amount int64) uint64 { 445 logOffset := s.vt.StatsExpirer().LogOffset(s.clock.Now()) 446 if amount > 0 { 447 n.dialCost.Add(amount, logOffset) 448 } 449 totalDialCost := n.dialCost.Value(logOffset) 450 if totalDialCost < dialCost { 451 totalDialCost = dialCost 452 } 453 return totalDialCost 454 } 455 456 // serviceValue returns the service value accumulated in this session and in total 457 func (s *ServerPool) serviceValue(node *enode.Node) (sessionValue, totalValue float64) { 458 nvt := s.vt.GetNode(node.ID()) 459 if nvt == nil { 460 return 0, 0 461 } 462 currentStats := nvt.RtStats() 463 _, timeWeights := s.getTimeoutAndWeight() 464 expFactor := s.vt.StatsExpFactor() 465 466 totalValue = currentStats.Value(timeWeights, expFactor) 467 if connStats, ok := s.ns.GetField(node, sfiConnectedStats).(ResponseTimeStats); ok { 468 diff := currentStats 469 diff.SubStats(&connStats) 470 sessionValue = diff.Value(timeWeights, expFactor) 471 if s.sessionValueMeter != nil { 472 s.sessionValueMeter.Mark(int64(sessionValue)) 473 } 474 } 475 return 476 } 477 478 // updateWeight calculates the node weight and updates the nodeWeight field and the 479 // hasValue flag. It also saves the node state if necessary. 480 // Note: this function should run inside a NodeStateMachine operation 481 func (s *ServerPool) updateWeight(node *enode.Node, totalValue float64, totalDialCost uint64) { 482 weight := uint64(totalValue * nodeWeightMul / float64(totalDialCost)) 483 if weight >= nodeWeightThreshold { 484 s.ns.SetStateSub(node, sfHasValue, nodestate.Flags{}, 0) 485 s.ns.SetFieldSub(node, sfiNodeWeight, weight) 486 } else { 487 s.ns.SetStateSub(node, nodestate.Flags{}, sfHasValue, 0) 488 s.ns.SetFieldSub(node, sfiNodeWeight, nil) 489 s.ns.SetFieldSub(node, sfiNodeHistory, nil) 490 s.ns.SetFieldSub(node, sfiLocalAddress, nil) 491 } 492 s.ns.Persist(node) // saved if node history or hasValue changed 493 } 494 495 // setRedialWait calculates and sets the redialWait timeout based on the service value 496 // and dial cost accumulated during the last session/attempt and in total. 497 // The waiting time is raised exponentially if no service value has been received in order 498 // to prevent dialing an unresponsive node frequently for a very long time just because it 499 // was useful in the past. It can still be occasionally dialed though and once it provides 500 // a significant amount of service value again its waiting time is quickly reduced or reset 501 // to the minimum. 502 // Note: node weight is also recalculated and updated by this function. 503 // Note 2: this function should run inside a NodeStateMachine operation 504 func (s *ServerPool) setRedialWait(node *enode.Node, addDialCost int64, waitStep float64) { 505 n, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory) 506 sessionValue, totalValue := s.serviceValue(node) 507 totalDialCost := s.addDialCost(&n, addDialCost) 508 509 // if the current dial session has yielded at least the average value/dial cost ratio 510 // then the waiting time should be reset to the minimum. If the session value 511 // is below average but still positive then timeout is limited to the ratio of 512 // average / current service value multiplied by the minimum timeout. If the attempt 513 // was unsuccessful then timeout is raised exponentially without limitation. 514 // Note: dialCost is used in the formula below even if dial was not attempted at all 515 // because the pre-negotiation query did not return a positive result. In this case 516 // the ratio has no meaning anyway and waitFactor is always raised, though in smaller 517 // steps because queries are cheaper and therefore we can allow more failed attempts. 518 unixTime := s.unixTime() 519 plannedTimeout := float64(n.redialWaitEnd - n.redialWaitStart) // last planned redialWait timeout 520 var actualWait float64 // actual waiting time elapsed 521 if unixTime > n.redialWaitEnd { 522 // the planned timeout has elapsed 523 actualWait = plannedTimeout 524 } else { 525 // if the node was redialed earlier then we do not raise the planned timeout 526 // exponentially because that could lead to the timeout rising very high in 527 // a short amount of time 528 // Note that in case of an early redial actualWait also includes the dial 529 // timeout or connection time of the last attempt but it still serves its 530 // purpose of preventing the timeout rising quicker than linearly as a function 531 // of total time elapsed without a successful connection. 532 actualWait = float64(unixTime - n.redialWaitStart) 533 } 534 // raise timeout exponentially if the last planned timeout has elapsed 535 // (use at least the last planned timeout otherwise) 536 nextTimeout := actualWait * waitStep 537 if plannedTimeout > nextTimeout { 538 nextTimeout = plannedTimeout 539 } 540 // we reduce the waiting time if the server has provided service value during the 541 // connection (but never under the minimum) 542 a := totalValue * dialCost * float64(minRedialWait) 543 b := float64(totalDialCost) * sessionValue 544 if a < b*nextTimeout { 545 nextTimeout = a / b 546 } 547 if nextTimeout < minRedialWait { 548 nextTimeout = minRedialWait 549 } 550 wait := time.Duration(float64(time.Second) * nextTimeout) 551 if wait < waitThreshold { 552 n.redialWaitStart = unixTime 553 n.redialWaitEnd = unixTime + int64(nextTimeout) 554 s.ns.SetFieldSub(node, sfiNodeHistory, n) 555 s.ns.SetStateSub(node, sfRedialWait, nodestate.Flags{}, wait) 556 s.updateWeight(node, totalValue, totalDialCost) 557 } else { 558 // discard known node statistics if waiting time is very long because the node 559 // hasn't been responsive for a very long time 560 s.ns.SetFieldSub(node, sfiNodeHistory, nil) 561 s.ns.SetFieldSub(node, sfiNodeWeight, nil) 562 s.ns.SetStateSub(node, nodestate.Flags{}, sfHasValue, 0) 563 } 564 } 565 566 // calculateWeight calculates and sets the node weight without altering the node history. 567 // This function should be called during startup and shutdown only, otherwise setRedialWait 568 // will keep the weights updated as the underlying statistics are adjusted. 569 // Note: this function should run inside a NodeStateMachine operation 570 func (s *ServerPool) calculateWeight(node *enode.Node) { 571 n, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory) 572 _, totalValue := s.serviceValue(node) 573 totalDialCost := s.addDialCost(&n, 0) 574 s.updateWeight(node, totalValue, totalDialCost) 575 } 576 577 // API returns the vflux client API 578 func (s *ServerPool) API() *PrivateClientAPI { 579 return NewPrivateClientAPI(s.vt) 580 } 581 582 type dummyIdentity enode.ID 583 584 func (id dummyIdentity) Verify(r *enr.Record, sig []byte) error { return nil } 585 func (id dummyIdentity) NodeAddr(r *enr.Record) []byte { return id[:] } 586 587 // DialNode replaces the given enode with a locally generated one containing the ENR 588 // stored in the sfiLocalAddress field if present. This workaround ensures that nodes 589 // on the local network can be dialed at the local address if a connection has been 590 // successfully established previously. 591 // Note that NodeStateMachine always remembers the enode with the latest version of 592 // the remote signed ENR. ENR filtering should be performed on that version while 593 // dialNode should be used for dialing the node over TCP or UDP. 594 func (s *ServerPool) DialNode(n *enode.Node) *enode.Node { 595 if enr, ok := s.ns.GetField(n, sfiLocalAddress).(*enr.Record); ok { 596 n, _ := enode.New(dummyIdentity(n.ID()), enr) 597 return n 598 } 599 return n 600 } 601 602 // Persist immediately stores the state of a node in the node database 603 func (s *ServerPool) Persist(n *enode.Node) { 604 s.ns.Persist(n) 605 }