github.com/decred/dcrlnd@v0.7.6/routing/localchans/manager.go (about) 1 package localchans 2 3 import ( 4 "errors" 5 "fmt" 6 "sync" 7 8 "github.com/decred/dcrd/wire" 9 "github.com/decred/dcrlnd/channeldb" 10 "github.com/decred/dcrlnd/discovery" 11 "github.com/decred/dcrlnd/htlcswitch" 12 "github.com/decred/dcrlnd/kvdb" 13 "github.com/decred/dcrlnd/lnrpc" 14 "github.com/decred/dcrlnd/lnwire" 15 "github.com/decred/dcrlnd/routing" 16 ) 17 18 // Manager manages the node's local channels. The only operation that is 19 // currently implemented is updating forwarding policies. 20 type Manager struct { 21 // UpdateForwardingPolicies is used by the manager to update active 22 // links with a new policy. 23 UpdateForwardingPolicies func( 24 chanPolicies map[wire.OutPoint]htlcswitch.ForwardingPolicy) 25 26 // PropagateChanPolicyUpdate is called to persist a new policy to disk 27 // and broadcast it to the network. 28 PropagateChanPolicyUpdate func( 29 edgesToUpdate []discovery.EdgeWithInfo) error 30 31 // ForAllOutgoingChannels is required to iterate over all our local 32 // channels. 33 ForAllOutgoingChannels func(cb func(kvdb.RTx, 34 *channeldb.ChannelEdgeInfo, 35 *channeldb.ChannelEdgePolicy) error) error 36 37 // FetchChannel is used to query local channel parameters. Optionally an 38 // existing db tx can be supplied. 39 FetchChannel func(tx kvdb.RTx, chanPoint wire.OutPoint) ( 40 *channeldb.OpenChannel, error) 41 42 // policyUpdateLock ensures that the database and the link do not fall 43 // out of sync if there are concurrent fee update calls. Without it, 44 // there is a chance that policy A updates the database, then policy B 45 // updates the database, then policy B updates the link, then policy A 46 // updates the link. 47 policyUpdateLock sync.Mutex 48 } 49 50 // UpdatePolicy updates the policy for the specified channels on disk and in 51 // the active links. 52 func (r *Manager) UpdatePolicy(newSchema routing.ChannelPolicy, 53 chanPoints ...wire.OutPoint) ([]*lnrpc.FailedUpdate, error) { 54 55 r.policyUpdateLock.Lock() 56 defer r.policyUpdateLock.Unlock() 57 58 // First, we'll construct a set of all the channels that we are 59 // trying to update. 60 unprocessedChans := make(map[wire.OutPoint]struct{}) 61 for _, chanPoint := range chanPoints { 62 unprocessedChans[chanPoint] = struct{}{} 63 } 64 65 haveChanFilter := len(unprocessedChans) != 0 66 67 var failedUpdates []*lnrpc.FailedUpdate 68 var edgesToUpdate []discovery.EdgeWithInfo 69 policiesToUpdate := make(map[wire.OutPoint]htlcswitch.ForwardingPolicy) 70 71 // Next, we'll loop over all the outgoing channels the router knows of. 72 // If we have a filter then we'll only collected those channels, 73 // otherwise we'll collect them all. 74 err := r.ForAllOutgoingChannels(func( 75 tx kvdb.RTx, 76 info *channeldb.ChannelEdgeInfo, 77 edge *channeldb.ChannelEdgePolicy) error { 78 79 // If we have a channel filter, and this channel isn't a part 80 // of it, then we'll skip it. 81 _, ok := unprocessedChans[info.ChannelPoint] 82 if !ok && haveChanFilter { 83 return nil 84 } 85 86 // Mark this channel as found by removing it. unprocessedChans 87 // will be used to report invalid channels later on. 88 delete(unprocessedChans, info.ChannelPoint) 89 90 // Apply the new policy to the edge. 91 err := r.updateEdge(tx, info.ChannelPoint, edge, newSchema) 92 if err != nil { 93 failedUpdates = append(failedUpdates, 94 makeFailureItem(info.ChannelPoint, 95 lnrpc.UpdateFailure_UPDATE_FAILURE_INVALID_PARAMETER, 96 err.Error(), 97 )) 98 99 return nil 100 } 101 102 // Add updated edge to list of edges to send to gossiper. 103 edgesToUpdate = append(edgesToUpdate, discovery.EdgeWithInfo{ 104 Info: info, 105 Edge: edge, 106 }) 107 108 // Add updated policy to list of policies to send to switch. 109 policiesToUpdate[info.ChannelPoint] = htlcswitch.ForwardingPolicy{ 110 BaseFee: edge.FeeBaseMAtoms, 111 FeeRate: edge.FeeProportionalMillionths, 112 TimeLockDelta: uint32(edge.TimeLockDelta), 113 MinHTLCOut: edge.MinHTLC, 114 MaxHTLC: edge.MaxHTLC, 115 } 116 117 return nil 118 }) 119 if err != nil { 120 return nil, err 121 } 122 123 // Construct a list of failed policy updates. 124 for chanPoint := range unprocessedChans { 125 channel, err := r.FetchChannel(nil, chanPoint) 126 switch { 127 case errors.Is(err, channeldb.ErrChannelNotFound): 128 failedUpdates = append(failedUpdates, 129 makeFailureItem(chanPoint, 130 lnrpc.UpdateFailure_UPDATE_FAILURE_NOT_FOUND, 131 "not found", 132 )) 133 134 case err != nil: 135 failedUpdates = append(failedUpdates, 136 makeFailureItem(chanPoint, 137 lnrpc.UpdateFailure_UPDATE_FAILURE_INTERNAL_ERR, 138 err.Error(), 139 )) 140 141 case channel.IsPending: 142 failedUpdates = append(failedUpdates, 143 makeFailureItem(chanPoint, 144 lnrpc.UpdateFailure_UPDATE_FAILURE_PENDING, 145 "not yet confirmed", 146 )) 147 148 default: 149 failedUpdates = append(failedUpdates, 150 makeFailureItem(chanPoint, 151 lnrpc.UpdateFailure_UPDATE_FAILURE_UNKNOWN, 152 "could not update policies", 153 )) 154 } 155 } 156 157 // Commit the policy updates to disk and broadcast to the network. We 158 // validated the new policy above, so we expect no validation errors. If 159 // this would happen because of a bug, the link policy will be 160 // desynchronized. It is currently not possible to atomically commit 161 // multiple edge updates. 162 err = r.PropagateChanPolicyUpdate(edgesToUpdate) 163 if err != nil { 164 return nil, err 165 } 166 167 // Update active links. 168 r.UpdateForwardingPolicies(policiesToUpdate) 169 170 return failedUpdates, nil 171 } 172 173 // updateEdge updates the given edge with the new schema. 174 func (r *Manager) updateEdge(tx kvdb.RTx, chanPoint wire.OutPoint, 175 edge *channeldb.ChannelEdgePolicy, 176 newSchema routing.ChannelPolicy) error { 177 178 // Update forwarding fee scheme and required time lock delta. 179 edge.FeeBaseMAtoms = newSchema.BaseFee 180 edge.FeeProportionalMillionths = lnwire.MilliAtom( 181 newSchema.FeeRate, 182 ) 183 edge.TimeLockDelta = uint16(newSchema.TimeLockDelta) 184 185 // Retrieve negotiated channel htlc amt limits. 186 amtMin, amtMax, err := r.getHtlcAmtLimits(tx, chanPoint) 187 if err != nil { 188 return nil 189 } 190 191 // We now update the edge max htlc value. 192 switch { 193 194 // If a non-zero max htlc was specified, use it to update the edge. 195 // Otherwise keep the value unchanged. 196 case newSchema.MaxHTLC != 0: 197 edge.MaxHTLC = newSchema.MaxHTLC 198 199 // If this edge still doesn't have a max htlc set, set it to the max. 200 // This is an on-the-fly migration. 201 case !edge.MessageFlags.HasMaxHtlc(): 202 edge.MaxHTLC = amtMax 203 204 // If this edge has a max htlc that exceeds what the channel can 205 // actually carry, correct it now. This can happen, because we 206 // previously set the max htlc to the channel capacity. 207 case edge.MaxHTLC > amtMax: 208 edge.MaxHTLC = amtMax 209 } 210 211 // If a new min htlc is specified, update the edge. 212 if newSchema.MinHTLC != nil { 213 edge.MinHTLC = *newSchema.MinHTLC 214 } 215 216 // If the MaxHtlc flag wasn't already set, we can set it now. 217 edge.MessageFlags |= lnwire.ChanUpdateOptionMaxHtlc 218 219 // Validate htlc amount constraints. 220 switch { 221 case edge.MinHTLC < amtMin: 222 return fmt.Errorf( 223 "min htlc amount of %v is below min htlc parameter of %v", 224 edge.MinHTLC, amtMin, 225 ) 226 227 case edge.MaxHTLC > amtMax: 228 return fmt.Errorf( 229 "max htlc size of %v is above max pending amount of %v", 230 edge.MaxHTLC, amtMax, 231 ) 232 233 case edge.MinHTLC > edge.MaxHTLC: 234 return fmt.Errorf( 235 "min_htlc %v greater than max_htlc %v", 236 edge.MinHTLC, edge.MaxHTLC, 237 ) 238 } 239 240 // Clear signature to help prevent usage of the previous signature. 241 edge.SetSigBytes(nil) 242 243 return nil 244 } 245 246 // getHtlcAmtLimits retrieves the negotiated channel min and max htlc amount 247 // constraints. 248 func (r *Manager) getHtlcAmtLimits(tx kvdb.RTx, chanPoint wire.OutPoint) ( 249 lnwire.MilliAtom, lnwire.MilliAtom, error) { 250 251 ch, err := r.FetchChannel(tx, chanPoint) 252 if err != nil { 253 return 0, 0, err 254 } 255 256 // The max htlc policy field must be less than or equal to the channel 257 // capacity AND less than or equal to the max in-flight HTLC value. 258 // Since the latter is always less than or equal to the former, just 259 // return the max in-flight value. 260 maxAmt := ch.LocalChanCfg.ChannelConstraints.MaxPendingAmount 261 262 return ch.LocalChanCfg.MinHTLC, maxAmt, nil 263 } 264 265 // makeFailureItem creates a lnrpc.FailedUpdate object. 266 func makeFailureItem(outPoint wire.OutPoint, updateFailure lnrpc.UpdateFailure, 267 errStr string) *lnrpc.FailedUpdate { 268 269 outpoint := &lnrpc.OutPoint{ 270 TxidBytes: outPoint.Hash[:], 271 TxidStr: outPoint.Hash.String(), 272 OutputIndex: outPoint.Index, 273 } 274 275 return &lnrpc.FailedUpdate{ 276 Outpoint: outpoint, 277 Reason: updateFailure, 278 UpdateError: errStr, 279 } 280 }