github.com/hardtosaygoodbye/go-ethereum@v1.10.16-0.20220122011429-97003b9e6c15/les/vflux/client/serverpool.go (about) 1 // Copyright 2020 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package client 18 19 import ( 20 "errors" 21 "math/rand" 22 "reflect" 23 "sync" 24 "sync/atomic" 25 "time" 26 27 "github.com/hardtosaygoodbye/go-ethereum/common/mclock" 28 "github.com/hardtosaygoodbye/go-ethereum/ethdb" 29 "github.com/hardtosaygoodbye/go-ethereum/les/utils" 30 "github.com/hardtosaygoodbye/go-ethereum/log" 31 "github.com/hardtosaygoodbye/go-ethereum/metrics" 32 "github.com/hardtosaygoodbye/go-ethereum/p2p/enode" 33 "github.com/hardtosaygoodbye/go-ethereum/p2p/enr" 34 "github.com/hardtosaygoodbye/go-ethereum/p2p/nodestate" 35 "github.com/hardtosaygoodbye/go-ethereum/rlp" 36 ) 37 38 const ( 39 minTimeout = time.Millisecond * 500 // minimum request timeout suggested by the server pool 40 timeoutRefresh = time.Second * 5 // recalculate timeout if older than this 41 dialCost = 10000 // cost of a TCP dial (used for known node selection weight calculation) 42 dialWaitStep = 1.5 // exponential multiplier of redial wait time when no value was provided by the server 43 queryCost = 500 // cost of a UDP pre-negotiation query 44 queryWaitStep = 1.02 // exponential multiplier of redial wait time when no value was provided by the server 45 waitThreshold = time.Hour * 2000 // drop node if waiting time is over the threshold 46 nodeWeightMul = 1000000 // multiplier constant for node weight calculation 47 nodeWeightThreshold = 100 // minimum weight for keeping a node in the known (valuable) set 48 minRedialWait = 10 // minimum redial wait time in seconds 49 preNegLimit = 5 // maximum number of simultaneous pre-negotiation queries 50 warnQueryFails = 20 // number of consecutive UDP query failures before we print a warning 51 maxQueryFails = 100 // number of consecutive UDP query failures when then chance of skipping a query reaches 50% 52 ) 53 54 // ServerPool provides a node iterator for dial candidates. The output is a mix of newly discovered 55 // nodes, a weighted random selection of known (previously valuable) nodes and trusted/paid nodes. 56 type ServerPool struct { 57 clock mclock.Clock 58 unixTime func() int64 59 db ethdb.KeyValueStore 60 61 ns *nodestate.NodeStateMachine 62 vt *ValueTracker 63 mixer *enode.FairMix 64 mixSources []enode.Iterator 65 dialIterator enode.Iterator 66 validSchemes enr.IdentityScheme 67 trustedURLs []string 68 fillSet *FillSet 69 started, queryFails uint32 70 71 timeoutLock sync.RWMutex 72 timeout time.Duration 73 timeWeights ResponseTimeWeights 74 timeoutRefreshed mclock.AbsTime 75 76 suggestedTimeoutGauge, totalValueGauge metrics.Gauge 77 sessionValueMeter metrics.Meter 78 } 79 80 // nodeHistory keeps track of dial costs which determine node weight together with the 81 // service value calculated by ValueTracker. 82 type nodeHistory struct { 83 dialCost utils.ExpiredValue 84 redialWaitStart, redialWaitEnd int64 // unix time (seconds) 85 } 86 87 type nodeHistoryEnc struct { 88 DialCost utils.ExpiredValue 89 RedialWaitStart, RedialWaitEnd uint64 90 } 91 92 // queryFunc sends a pre-negotiation query and blocks until a response arrives or timeout occurs. 93 // It returns 1 if the remote node has confirmed that connection is possible, 0 if not 94 // possible and -1 if no response arrived (timeout). 95 type QueryFunc func(*enode.Node) int 96 97 var ( 98 clientSetup = &nodestate.Setup{Version: 2} 99 sfHasValue = clientSetup.NewPersistentFlag("hasValue") 100 sfQuery = clientSetup.NewFlag("query") 101 sfCanDial = clientSetup.NewFlag("canDial") 102 sfDialing = clientSetup.NewFlag("dialed") 103 sfWaitDialTimeout = clientSetup.NewFlag("dialTimeout") 104 sfConnected = clientSetup.NewFlag("connected") 105 sfRedialWait = clientSetup.NewFlag("redialWait") 106 sfAlwaysConnect = clientSetup.NewFlag("alwaysConnect") 107 sfDialProcess = nodestate.MergeFlags(sfQuery, sfCanDial, sfDialing, sfConnected, sfRedialWait) 108 109 sfiNodeHistory = clientSetup.NewPersistentField("nodeHistory", reflect.TypeOf(nodeHistory{}), 110 func(field interface{}) ([]byte, error) { 111 if n, ok := field.(nodeHistory); ok { 112 ne := nodeHistoryEnc{ 113 DialCost: n.dialCost, 114 RedialWaitStart: uint64(n.redialWaitStart), 115 RedialWaitEnd: uint64(n.redialWaitEnd), 116 } 117 enc, err := rlp.EncodeToBytes(&ne) 118 return enc, err 119 } 120 return nil, errors.New("invalid field type") 121 }, 122 func(enc []byte) (interface{}, error) { 123 var ne nodeHistoryEnc 124 err := rlp.DecodeBytes(enc, &ne) 125 n := nodeHistory{ 126 dialCost: ne.DialCost, 127 redialWaitStart: int64(ne.RedialWaitStart), 128 redialWaitEnd: int64(ne.RedialWaitEnd), 129 } 130 return n, err 131 }, 132 ) 133 sfiNodeWeight = clientSetup.NewField("nodeWeight", reflect.TypeOf(uint64(0))) 134 sfiConnectedStats = clientSetup.NewField("connectedStats", reflect.TypeOf(ResponseTimeStats{})) 135 sfiLocalAddress = clientSetup.NewPersistentField("localAddress", reflect.TypeOf(&enr.Record{}), 136 func(field interface{}) ([]byte, error) { 137 if enr, ok := field.(*enr.Record); ok { 138 enc, err := rlp.EncodeToBytes(enr) 139 return enc, err 140 } 141 return nil, errors.New("invalid field type") 142 }, 143 func(enc []byte) (interface{}, error) { 144 var enr enr.Record 145 if err := rlp.DecodeBytes(enc, &enr); err != nil { 146 return nil, err 147 } 148 return &enr, nil 149 }, 150 ) 151 ) 152 153 // NewServerPool creates a new server pool 154 func NewServerPool(db ethdb.KeyValueStore, dbKey []byte, mixTimeout time.Duration, query QueryFunc, clock mclock.Clock, trustedURLs []string, requestList []RequestInfo) (*ServerPool, enode.Iterator) { 155 s := &ServerPool{ 156 db: db, 157 clock: clock, 158 unixTime: func() int64 { return time.Now().Unix() }, 159 validSchemes: enode.ValidSchemes, 160 trustedURLs: trustedURLs, 161 vt: NewValueTracker(db, &mclock.System{}, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000)), 162 ns: nodestate.NewNodeStateMachine(db, []byte(string(dbKey)+"ns:"), clock, clientSetup), 163 } 164 s.recalTimeout() 165 s.mixer = enode.NewFairMix(mixTimeout) 166 knownSelector := NewWrsIterator(s.ns, sfHasValue, sfDialProcess, sfiNodeWeight) 167 alwaysConnect := NewQueueIterator(s.ns, sfAlwaysConnect, sfDialProcess, true, nil) 168 s.mixSources = append(s.mixSources, knownSelector) 169 s.mixSources = append(s.mixSources, alwaysConnect) 170 171 s.dialIterator = s.mixer 172 if query != nil { 173 s.dialIterator = s.addPreNegFilter(s.dialIterator, query) 174 } 175 176 s.ns.SubscribeState(nodestate.MergeFlags(sfWaitDialTimeout, sfConnected), func(n *enode.Node, oldState, newState nodestate.Flags) { 177 if oldState.Equals(sfWaitDialTimeout) && newState.IsEmpty() { 178 // dial timeout, no connection 179 s.setRedialWait(n, dialCost, dialWaitStep) 180 s.ns.SetStateSub(n, nodestate.Flags{}, sfDialing, 0) 181 } 182 }) 183 184 return s, &serverPoolIterator{ 185 dialIterator: s.dialIterator, 186 nextFn: func(node *enode.Node) { 187 s.ns.Operation(func() { 188 s.ns.SetStateSub(node, sfDialing, sfCanDial, 0) 189 s.ns.SetStateSub(node, sfWaitDialTimeout, nodestate.Flags{}, time.Second*10) 190 }) 191 }, 192 nodeFn: s.DialNode, 193 } 194 } 195 196 type serverPoolIterator struct { 197 dialIterator enode.Iterator 198 nextFn func(*enode.Node) 199 nodeFn func(*enode.Node) *enode.Node 200 } 201 202 // Next implements enode.Iterator 203 func (s *serverPoolIterator) Next() bool { 204 if s.dialIterator.Next() { 205 s.nextFn(s.dialIterator.Node()) 206 return true 207 } 208 return false 209 } 210 211 // Node implements enode.Iterator 212 func (s *serverPoolIterator) Node() *enode.Node { 213 return s.nodeFn(s.dialIterator.Node()) 214 } 215 216 // Close implements enode.Iterator 217 func (s *serverPoolIterator) Close() { 218 s.dialIterator.Close() 219 } 220 221 // AddMetrics adds metrics to the server pool. Should be called before Start(). 222 func (s *ServerPool) AddMetrics( 223 suggestedTimeoutGauge, totalValueGauge, serverSelectableGauge, serverConnectedGauge metrics.Gauge, 224 sessionValueMeter, serverDialedMeter metrics.Meter) { 225 226 s.suggestedTimeoutGauge = suggestedTimeoutGauge 227 s.totalValueGauge = totalValueGauge 228 s.sessionValueMeter = sessionValueMeter 229 if serverSelectableGauge != nil { 230 s.ns.AddLogMetrics(sfHasValue, sfDialProcess, "selectable", nil, nil, serverSelectableGauge) 231 } 232 if serverDialedMeter != nil { 233 s.ns.AddLogMetrics(sfDialing, nodestate.Flags{}, "dialed", serverDialedMeter, nil, nil) 234 } 235 if serverConnectedGauge != nil { 236 s.ns.AddLogMetrics(sfConnected, nodestate.Flags{}, "connected", nil, nil, serverConnectedGauge) 237 } 238 } 239 240 // AddSource adds a node discovery source to the server pool (should be called before start) 241 func (s *ServerPool) AddSource(source enode.Iterator) { 242 if source != nil { 243 s.mixSources = append(s.mixSources, source) 244 } 245 } 246 247 // addPreNegFilter installs a node filter mechanism that performs a pre-negotiation query. 248 // Nodes that are filtered out and does not appear on the output iterator are put back 249 // into redialWait state. 250 func (s *ServerPool) addPreNegFilter(input enode.Iterator, query QueryFunc) enode.Iterator { 251 s.fillSet = NewFillSet(s.ns, input, sfQuery) 252 s.ns.SubscribeState(sfDialProcess, func(n *enode.Node, oldState, newState nodestate.Flags) { 253 if !newState.Equals(sfQuery) { 254 if newState.HasAll(sfQuery) { 255 // remove query flag if the node is already somewhere in the dial process 256 s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0) 257 } 258 return 259 } 260 fails := atomic.LoadUint32(&s.queryFails) 261 failMax := fails 262 if failMax > maxQueryFails { 263 failMax = maxQueryFails 264 } 265 if rand.Intn(maxQueryFails*2) < int(failMax) { 266 // skip pre-negotiation with increasing chance, max 50% 267 // this ensures that the client can operate even if UDP is not working at all 268 s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10) 269 // set canDial before resetting queried so that FillSet will not read more 270 // candidates unnecessarily 271 s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0) 272 return 273 } 274 go func() { 275 q := query(n) 276 if q == -1 { 277 atomic.AddUint32(&s.queryFails, 1) 278 fails++ 279 if fails%warnQueryFails == 0 { 280 // warn if a large number of consecutive queries have failed 281 log.Warn("UDP connection queries failed", "count", fails) 282 } 283 } else { 284 atomic.StoreUint32(&s.queryFails, 0) 285 } 286 s.ns.Operation(func() { 287 // we are no longer running in the operation that the callback belongs to, start a new one because of setRedialWait 288 if q == 1 { 289 s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10) 290 } else { 291 s.setRedialWait(n, queryCost, queryWaitStep) 292 } 293 s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0) 294 }) 295 }() 296 }) 297 return NewQueueIterator(s.ns, sfCanDial, nodestate.Flags{}, false, func(waiting bool) { 298 if waiting { 299 s.fillSet.SetTarget(preNegLimit) 300 } else { 301 s.fillSet.SetTarget(0) 302 } 303 }) 304 } 305 306 // start starts the server pool. Note that NodeStateMachine should be started first. 307 func (s *ServerPool) Start() { 308 s.ns.Start() 309 for _, iter := range s.mixSources { 310 // add sources to mixer at startup because the mixer instantly tries to read them 311 // which should only happen after NodeStateMachine has been started 312 s.mixer.AddSource(iter) 313 } 314 for _, url := range s.trustedURLs { 315 if node, err := enode.Parse(s.validSchemes, url); err == nil { 316 s.ns.SetState(node, sfAlwaysConnect, nodestate.Flags{}, 0) 317 } else { 318 log.Error("Invalid trusted server URL", "url", url, "error", err) 319 } 320 } 321 unixTime := s.unixTime() 322 s.ns.Operation(func() { 323 s.ns.ForEach(sfHasValue, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { 324 s.calculateWeight(node) 325 if n, ok := s.ns.GetField(node, sfiNodeHistory).(nodeHistory); ok && n.redialWaitEnd > unixTime { 326 wait := n.redialWaitEnd - unixTime 327 lastWait := n.redialWaitEnd - n.redialWaitStart 328 if wait > lastWait { 329 // if the time until expiration is larger than the last suggested 330 // waiting time then the system clock was probably adjusted 331 wait = lastWait 332 } 333 s.ns.SetStateSub(node, sfRedialWait, nodestate.Flags{}, time.Duration(wait)*time.Second) 334 } 335 }) 336 }) 337 atomic.StoreUint32(&s.started, 1) 338 } 339 340 // stop stops the server pool 341 func (s *ServerPool) Stop() { 342 if s.fillSet != nil { 343 s.fillSet.Close() 344 } 345 s.ns.Operation(func() { 346 s.ns.ForEach(sfConnected, nodestate.Flags{}, func(n *enode.Node, state nodestate.Flags) { 347 // recalculate weight of connected nodes in order to update hasValue flag if necessary 348 s.calculateWeight(n) 349 }) 350 }) 351 s.ns.Stop() 352 s.vt.Stop() 353 } 354 355 // RegisterNode implements serverPeerSubscriber 356 func (s *ServerPool) RegisterNode(node *enode.Node) (*NodeValueTracker, error) { 357 if atomic.LoadUint32(&s.started) == 0 { 358 return nil, errors.New("server pool not started yet") 359 } 360 nvt := s.vt.Register(node.ID()) 361 s.ns.Operation(func() { 362 s.ns.SetStateSub(node, sfConnected, sfDialing.Or(sfWaitDialTimeout), 0) 363 s.ns.SetFieldSub(node, sfiConnectedStats, nvt.RtStats()) 364 if node.IP().IsLoopback() { 365 s.ns.SetFieldSub(node, sfiLocalAddress, node.Record()) 366 } 367 }) 368 return nvt, nil 369 } 370 371 // UnregisterNode implements serverPeerSubscriber 372 func (s *ServerPool) UnregisterNode(node *enode.Node) { 373 s.ns.Operation(func() { 374 s.setRedialWait(node, dialCost, dialWaitStep) 375 s.ns.SetStateSub(node, nodestate.Flags{}, sfConnected, 0) 376 s.ns.SetFieldSub(node, sfiConnectedStats, nil) 377 }) 378 s.vt.Unregister(node.ID()) 379 } 380 381 // recalTimeout calculates the current recommended timeout. This value is used by 382 // the client as a "soft timeout" value. It also affects the service value calculation 383 // of individual nodes. 384 func (s *ServerPool) recalTimeout() { 385 // Use cached result if possible, avoid recalculating too frequently. 386 s.timeoutLock.RLock() 387 refreshed := s.timeoutRefreshed 388 s.timeoutLock.RUnlock() 389 now := s.clock.Now() 390 if refreshed != 0 && time.Duration(now-refreshed) < timeoutRefresh { 391 return 392 } 393 // Cached result is stale, recalculate a new one. 394 rts := s.vt.RtStats() 395 396 // Add a fake statistic here. It is an easy way to initialize with some 397 // conservative values when the database is new. As soon as we have a 398 // considerable amount of real stats this small value won't matter. 399 rts.Add(time.Second*2, 10, s.vt.StatsExpFactor()) 400 401 // Use either 10% failure rate timeout or twice the median response time 402 // as the recommended timeout. 403 timeout := minTimeout 404 if t := rts.Timeout(0.1); t > timeout { 405 timeout = t 406 } 407 if t := rts.Timeout(0.5) * 2; t > timeout { 408 timeout = t 409 } 410 s.timeoutLock.Lock() 411 if s.timeout != timeout { 412 s.timeout = timeout 413 s.timeWeights = TimeoutWeights(s.timeout) 414 415 if s.suggestedTimeoutGauge != nil { 416 s.suggestedTimeoutGauge.Update(int64(s.timeout / time.Millisecond)) 417 } 418 if s.totalValueGauge != nil { 419 s.totalValueGauge.Update(int64(rts.Value(s.timeWeights, s.vt.StatsExpFactor()))) 420 } 421 } 422 s.timeoutRefreshed = now 423 s.timeoutLock.Unlock() 424 } 425 426 // GetTimeout returns the recommended request timeout. 427 func (s *ServerPool) GetTimeout() time.Duration { 428 s.recalTimeout() 429 s.timeoutLock.RLock() 430 defer s.timeoutLock.RUnlock() 431 return s.timeout 432 } 433 434 // getTimeoutAndWeight returns the recommended request timeout as well as the 435 // response time weight which is necessary to calculate service value. 436 func (s *ServerPool) getTimeoutAndWeight() (time.Duration, ResponseTimeWeights) { 437 s.recalTimeout() 438 s.timeoutLock.RLock() 439 defer s.timeoutLock.RUnlock() 440 return s.timeout, s.timeWeights 441 } 442 443 // addDialCost adds the given amount of dial cost to the node history and returns the current 444 // amount of total dial cost 445 func (s *ServerPool) addDialCost(n *nodeHistory, amount int64) uint64 { 446 logOffset := s.vt.StatsExpirer().LogOffset(s.clock.Now()) 447 if amount > 0 { 448 n.dialCost.Add(amount, logOffset) 449 } 450 totalDialCost := n.dialCost.Value(logOffset) 451 if totalDialCost < dialCost { 452 totalDialCost = dialCost 453 } 454 return totalDialCost 455 } 456 457 // serviceValue returns the service value accumulated in this session and in total 458 func (s *ServerPool) serviceValue(node *enode.Node) (sessionValue, totalValue float64) { 459 nvt := s.vt.GetNode(node.ID()) 460 if nvt == nil { 461 return 0, 0 462 } 463 currentStats := nvt.RtStats() 464 _, timeWeights := s.getTimeoutAndWeight() 465 expFactor := s.vt.StatsExpFactor() 466 467 totalValue = currentStats.Value(timeWeights, expFactor) 468 if connStats, ok := s.ns.GetField(node, sfiConnectedStats).(ResponseTimeStats); ok { 469 diff := currentStats 470 diff.SubStats(&connStats) 471 sessionValue = diff.Value(timeWeights, expFactor) 472 if s.sessionValueMeter != nil { 473 s.sessionValueMeter.Mark(int64(sessionValue)) 474 } 475 } 476 return 477 } 478 479 // updateWeight calculates the node weight and updates the nodeWeight field and the 480 // hasValue flag. It also saves the node state if necessary. 481 // Note: this function should run inside a NodeStateMachine operation 482 func (s *ServerPool) updateWeight(node *enode.Node, totalValue float64, totalDialCost uint64) { 483 weight := uint64(totalValue * nodeWeightMul / float64(totalDialCost)) 484 if weight >= nodeWeightThreshold { 485 s.ns.SetStateSub(node, sfHasValue, nodestate.Flags{}, 0) 486 s.ns.SetFieldSub(node, sfiNodeWeight, weight) 487 } else { 488 s.ns.SetStateSub(node, nodestate.Flags{}, sfHasValue, 0) 489 s.ns.SetFieldSub(node, sfiNodeWeight, nil) 490 s.ns.SetFieldSub(node, sfiNodeHistory, nil) 491 s.ns.SetFieldSub(node, sfiLocalAddress, nil) 492 } 493 s.ns.Persist(node) // saved if node history or hasValue changed 494 } 495 496 // setRedialWait calculates and sets the redialWait timeout based on the service value 497 // and dial cost accumulated during the last session/attempt and in total. 498 // The waiting time is raised exponentially if no service value has been received in order 499 // to prevent dialing an unresponsive node frequently for a very long time just because it 500 // was useful in the past. It can still be occasionally dialed though and once it provides 501 // a significant amount of service value again its waiting time is quickly reduced or reset 502 // to the minimum. 503 // Note: node weight is also recalculated and updated by this function. 504 // Note 2: this function should run inside a NodeStateMachine operation 505 func (s *ServerPool) setRedialWait(node *enode.Node, addDialCost int64, waitStep float64) { 506 n, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory) 507 sessionValue, totalValue := s.serviceValue(node) 508 totalDialCost := s.addDialCost(&n, addDialCost) 509 510 // if the current dial session has yielded at least the average value/dial cost ratio 511 // then the waiting time should be reset to the minimum. If the session value 512 // is below average but still positive then timeout is limited to the ratio of 513 // average / current service value multiplied by the minimum timeout. If the attempt 514 // was unsuccessful then timeout is raised exponentially without limitation. 515 // Note: dialCost is used in the formula below even if dial was not attempted at all 516 // because the pre-negotiation query did not return a positive result. In this case 517 // the ratio has no meaning anyway and waitFactor is always raised, though in smaller 518 // steps because queries are cheaper and therefore we can allow more failed attempts. 519 unixTime := s.unixTime() 520 plannedTimeout := float64(n.redialWaitEnd - n.redialWaitStart) // last planned redialWait timeout 521 var actualWait float64 // actual waiting time elapsed 522 if unixTime > n.redialWaitEnd { 523 // the planned timeout has elapsed 524 actualWait = plannedTimeout 525 } else { 526 // if the node was redialed earlier then we do not raise the planned timeout 527 // exponentially because that could lead to the timeout rising very high in 528 // a short amount of time 529 // Note that in case of an early redial actualWait also includes the dial 530 // timeout or connection time of the last attempt but it still serves its 531 // purpose of preventing the timeout rising quicker than linearly as a function 532 // of total time elapsed without a successful connection. 533 actualWait = float64(unixTime - n.redialWaitStart) 534 } 535 // raise timeout exponentially if the last planned timeout has elapsed 536 // (use at least the last planned timeout otherwise) 537 nextTimeout := actualWait * waitStep 538 if plannedTimeout > nextTimeout { 539 nextTimeout = plannedTimeout 540 } 541 // we reduce the waiting time if the server has provided service value during the 542 // connection (but never under the minimum) 543 a := totalValue * dialCost * float64(minRedialWait) 544 b := float64(totalDialCost) * sessionValue 545 if a < b*nextTimeout { 546 nextTimeout = a / b 547 } 548 if nextTimeout < minRedialWait { 549 nextTimeout = minRedialWait 550 } 551 wait := time.Duration(float64(time.Second) * nextTimeout) 552 if wait < waitThreshold { 553 n.redialWaitStart = unixTime 554 n.redialWaitEnd = unixTime + int64(nextTimeout) 555 s.ns.SetFieldSub(node, sfiNodeHistory, n) 556 s.ns.SetStateSub(node, sfRedialWait, nodestate.Flags{}, wait) 557 s.updateWeight(node, totalValue, totalDialCost) 558 } else { 559 // discard known node statistics if waiting time is very long because the node 560 // hasn't been responsive for a very long time 561 s.ns.SetFieldSub(node, sfiNodeHistory, nil) 562 s.ns.SetFieldSub(node, sfiNodeWeight, nil) 563 s.ns.SetStateSub(node, nodestate.Flags{}, sfHasValue, 0) 564 } 565 } 566 567 // calculateWeight calculates and sets the node weight without altering the node history. 568 // This function should be called during startup and shutdown only, otherwise setRedialWait 569 // will keep the weights updated as the underlying statistics are adjusted. 570 // Note: this function should run inside a NodeStateMachine operation 571 func (s *ServerPool) calculateWeight(node *enode.Node) { 572 n, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory) 573 _, totalValue := s.serviceValue(node) 574 totalDialCost := s.addDialCost(&n, 0) 575 s.updateWeight(node, totalValue, totalDialCost) 576 } 577 578 // API returns the vflux client API 579 func (s *ServerPool) API() *PrivateClientAPI { 580 return NewPrivateClientAPI(s.vt) 581 } 582 583 type dummyIdentity enode.ID 584 585 func (id dummyIdentity) Verify(r *enr.Record, sig []byte) error { return nil } 586 func (id dummyIdentity) NodeAddr(r *enr.Record) []byte { return id[:] } 587 588 // DialNode replaces the given enode with a locally generated one containing the ENR 589 // stored in the sfiLocalAddress field if present. This workaround ensures that nodes 590 // on the local network can be dialed at the local address if a connection has been 591 // successfully established previously. 592 // Note that NodeStateMachine always remembers the enode with the latest version of 593 // the remote signed ENR. ENR filtering should be performed on that version while 594 // dialNode should be used for dialing the node over TCP or UDP. 595 func (s *ServerPool) DialNode(n *enode.Node) *enode.Node { 596 if enr, ok := s.ns.GetField(n, sfiLocalAddress).(*enr.Record); ok { 597 n, _ := enode.New(dummyIdentity(n.ID()), enr) 598 return n 599 } 600 return n 601 } 602 603 // Persist immediately stores the state of a node in the node database 604 func (s *ServerPool) Persist(n *enode.Node) { 605 s.ns.Persist(n) 606 }