github.com/mdaxf/iac@v0.0.0-20240519030858-58a061660378/vendor_skip/go.mongodb.org/mongo-driver/mongo/description/server_selector.go (about) 1 // Copyright (C) MongoDB, Inc. 2017-present. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); you may 4 // not use this file except in compliance with the License. You may obtain 5 // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 6 7 package description 8 9 import ( 10 "fmt" 11 "math" 12 "time" 13 14 "go.mongodb.org/mongo-driver/mongo/readpref" 15 "go.mongodb.org/mongo-driver/tag" 16 ) 17 18 // ServerSelector is an interface implemented by types that can perform server selection given a topology description 19 // and list of candidate servers. The selector should filter the provided candidates list and return a subset that 20 // matches some criteria. 21 type ServerSelector interface { 22 SelectServer(Topology, []Server) ([]Server, error) 23 } 24 25 // ServerSelectorFunc is a function that can be used as a ServerSelector. 26 type ServerSelectorFunc func(Topology, []Server) ([]Server, error) 27 28 // SelectServer implements the ServerSelector interface. 29 func (ssf ServerSelectorFunc) SelectServer(t Topology, s []Server) ([]Server, error) { 30 return ssf(t, s) 31 } 32 33 type compositeSelector struct { 34 selectors []ServerSelector 35 } 36 37 // CompositeSelector combines multiple selectors into a single selector by applying them in order to the candidates 38 // list. 39 // 40 // For example, if the initial candidates list is [s0, s1, s2, s3] and two selectors are provided where the first 41 // matches s0 and s1 and the second matches s1 and s2, the following would occur during server selection: 42 // 43 // 1. firstSelector([s0, s1, s2, s3]) -> [s0, s1] 44 // 2. secondSelector([s0, s1]) -> [s1] 45 // 46 // The final list of candidates returned by the composite selector would be [s1]. 47 func CompositeSelector(selectors []ServerSelector) ServerSelector { 48 return &compositeSelector{selectors: selectors} 49 } 50 51 func (cs *compositeSelector) SelectServer(t Topology, candidates []Server) ([]Server, error) { 52 var err error 53 for _, sel := range cs.selectors { 54 candidates, err = sel.SelectServer(t, candidates) 55 if err != nil { 56 return nil, err 57 } 58 } 59 return candidates, nil 60 } 61 62 type latencySelector struct { 63 latency time.Duration 64 } 65 66 // LatencySelector creates a ServerSelector which selects servers based on their average RTT values. 67 func LatencySelector(latency time.Duration) ServerSelector { 68 return &latencySelector{latency: latency} 69 } 70 71 func (ls *latencySelector) SelectServer(t Topology, candidates []Server) ([]Server, error) { 72 if ls.latency < 0 { 73 return candidates, nil 74 } 75 if t.Kind == LoadBalanced { 76 // In LoadBalanced mode, there should only be one server in the topology and it must be selected. 77 return candidates, nil 78 } 79 80 switch len(candidates) { 81 case 0, 1: 82 return candidates, nil 83 default: 84 min := time.Duration(math.MaxInt64) 85 for _, candidate := range candidates { 86 if candidate.AverageRTTSet { 87 if candidate.AverageRTT < min { 88 min = candidate.AverageRTT 89 } 90 } 91 } 92 93 if min == math.MaxInt64 { 94 return candidates, nil 95 } 96 97 max := min + ls.latency 98 99 viableIndexes := make([]int, 0, len(candidates)) 100 for i, candidate := range candidates { 101 if candidate.AverageRTTSet { 102 if candidate.AverageRTT <= max { 103 viableIndexes = append(viableIndexes, i) 104 } 105 } 106 } 107 if len(viableIndexes) == len(candidates) { 108 return candidates, nil 109 } 110 result := make([]Server, len(viableIndexes)) 111 for i, idx := range viableIndexes { 112 result[i] = candidates[idx] 113 } 114 return result, nil 115 } 116 } 117 118 // WriteSelector selects all the writable servers. 119 func WriteSelector() ServerSelector { 120 return ServerSelectorFunc(func(t Topology, candidates []Server) ([]Server, error) { 121 switch t.Kind { 122 case Single, LoadBalanced: 123 return candidates, nil 124 default: 125 result := []Server{} 126 for _, candidate := range candidates { 127 switch candidate.Kind { 128 case Mongos, RSPrimary, Standalone: 129 result = append(result, candidate) 130 } 131 } 132 return result, nil 133 } 134 }) 135 } 136 137 // ReadPrefSelector selects servers based on the provided read preference. 138 func ReadPrefSelector(rp *readpref.ReadPref) ServerSelector { 139 return readPrefSelector(rp, false) 140 } 141 142 // OutputAggregateSelector selects servers based on the provided read preference given that the underlying operation is 143 // aggregate with an output stage. 144 func OutputAggregateSelector(rp *readpref.ReadPref) ServerSelector { 145 return readPrefSelector(rp, true) 146 } 147 148 func readPrefSelector(rp *readpref.ReadPref, isOutputAggregate bool) ServerSelector { 149 return ServerSelectorFunc(func(t Topology, candidates []Server) ([]Server, error) { 150 if t.Kind == LoadBalanced { 151 // In LoadBalanced mode, there should only be one server in the topology and it must be selected. We check 152 // this before checking MaxStaleness support because there's no monitoring in this mode, so the candidate 153 // server wouldn't have a wire version set, which would result in an error. 154 return candidates, nil 155 } 156 157 if _, set := rp.MaxStaleness(); set { 158 for _, s := range candidates { 159 if s.Kind != Unknown { 160 if err := maxStalenessSupported(s.WireVersion); err != nil { 161 return nil, err 162 } 163 } 164 } 165 } 166 167 switch t.Kind { 168 case Single: 169 return candidates, nil 170 case ReplicaSetNoPrimary, ReplicaSetWithPrimary: 171 return selectForReplicaSet(rp, isOutputAggregate, t, candidates) 172 case Sharded: 173 return selectByKind(candidates, Mongos), nil 174 } 175 176 return nil, nil 177 }) 178 } 179 180 // maxStalenessSupported returns an error if the given server version does not support max staleness. 181 func maxStalenessSupported(wireVersion *VersionRange) error { 182 if wireVersion != nil && wireVersion.Max < 5 { 183 return fmt.Errorf("max staleness is only supported for servers 3.4 or newer") 184 } 185 186 return nil 187 } 188 189 func selectForReplicaSet(rp *readpref.ReadPref, isOutputAggregate bool, t Topology, candidates []Server) ([]Server, error) { 190 if err := verifyMaxStaleness(rp, t); err != nil { 191 return nil, err 192 } 193 194 // If underlying operation is an aggregate with an output stage, only apply read preference 195 // if all candidates are 5.0+. Otherwise, operate under primary read preference. 196 if isOutputAggregate { 197 for _, s := range candidates { 198 if s.WireVersion.Max < 13 { 199 return selectByKind(candidates, RSPrimary), nil 200 } 201 } 202 } 203 204 switch rp.Mode() { 205 case readpref.PrimaryMode: 206 return selectByKind(candidates, RSPrimary), nil 207 case readpref.PrimaryPreferredMode: 208 selected := selectByKind(candidates, RSPrimary) 209 210 if len(selected) == 0 { 211 selected = selectSecondaries(rp, candidates) 212 return selectByTagSet(selected, rp.TagSets()), nil 213 } 214 215 return selected, nil 216 case readpref.SecondaryPreferredMode: 217 selected := selectSecondaries(rp, candidates) 218 selected = selectByTagSet(selected, rp.TagSets()) 219 if len(selected) > 0 { 220 return selected, nil 221 } 222 return selectByKind(candidates, RSPrimary), nil 223 case readpref.SecondaryMode: 224 selected := selectSecondaries(rp, candidates) 225 return selectByTagSet(selected, rp.TagSets()), nil 226 case readpref.NearestMode: 227 selected := selectByKind(candidates, RSPrimary) 228 selected = append(selected, selectSecondaries(rp, candidates)...) 229 return selectByTagSet(selected, rp.TagSets()), nil 230 } 231 232 return nil, fmt.Errorf("unsupported mode: %d", rp.Mode()) 233 } 234 235 func selectSecondaries(rp *readpref.ReadPref, candidates []Server) []Server { 236 secondaries := selectByKind(candidates, RSSecondary) 237 if len(secondaries) == 0 { 238 return secondaries 239 } 240 if maxStaleness, set := rp.MaxStaleness(); set { 241 primaries := selectByKind(candidates, RSPrimary) 242 if len(primaries) == 0 { 243 baseTime := secondaries[0].LastWriteTime 244 for i := 1; i < len(secondaries); i++ { 245 if secondaries[i].LastWriteTime.After(baseTime) { 246 baseTime = secondaries[i].LastWriteTime 247 } 248 } 249 250 var selected []Server 251 for _, secondary := range secondaries { 252 estimatedStaleness := baseTime.Sub(secondary.LastWriteTime) + secondary.HeartbeatInterval 253 if estimatedStaleness <= maxStaleness { 254 selected = append(selected, secondary) 255 } 256 } 257 258 return selected 259 } 260 261 primary := primaries[0] 262 263 var selected []Server 264 for _, secondary := range secondaries { 265 estimatedStaleness := secondary.LastUpdateTime.Sub(secondary.LastWriteTime) - primary.LastUpdateTime.Sub(primary.LastWriteTime) + secondary.HeartbeatInterval 266 if estimatedStaleness <= maxStaleness { 267 selected = append(selected, secondary) 268 } 269 } 270 return selected 271 } 272 273 return secondaries 274 } 275 276 func selectByTagSet(candidates []Server, tagSets []tag.Set) []Server { 277 if len(tagSets) == 0 { 278 return candidates 279 } 280 281 for _, ts := range tagSets { 282 // If this tag set is empty, we can take a fast path because the empty list is a subset of all tag sets, so 283 // all candidate servers will be selected. 284 if len(ts) == 0 { 285 return candidates 286 } 287 288 var results []Server 289 for _, s := range candidates { 290 // ts is non-empty, so only servers with a non-empty set of tags need to be checked. 291 if len(s.Tags) > 0 && s.Tags.ContainsAll(ts) { 292 results = append(results, s) 293 } 294 } 295 296 if len(results) > 0 { 297 return results 298 } 299 } 300 301 return []Server{} 302 } 303 304 func selectByKind(candidates []Server, kind ServerKind) []Server { 305 // Record the indices of viable candidates first and then append those to the returned slice 306 // to avoid appending costly Server structs directly as an optimization. 307 viableIndexes := make([]int, 0, len(candidates)) 308 for i, s := range candidates { 309 if s.Kind == kind { 310 viableIndexes = append(viableIndexes, i) 311 } 312 } 313 if len(viableIndexes) == len(candidates) { 314 return candidates 315 } 316 result := make([]Server, len(viableIndexes)) 317 for i, idx := range viableIndexes { 318 result[i] = candidates[idx] 319 } 320 return result 321 } 322 323 func verifyMaxStaleness(rp *readpref.ReadPref, t Topology) error { 324 maxStaleness, set := rp.MaxStaleness() 325 if !set { 326 return nil 327 } 328 329 if maxStaleness < 90*time.Second { 330 return fmt.Errorf("max staleness (%s) must be greater than or equal to 90s", maxStaleness) 331 } 332 333 if len(t.Servers) < 1 { 334 // Maybe we should return an error here instead? 335 return nil 336 } 337 338 // we'll assume all candidates have the same heartbeat interval. 339 s := t.Servers[0] 340 idleWritePeriod := 10 * time.Second 341 342 if maxStaleness < s.HeartbeatInterval+idleWritePeriod { 343 return fmt.Errorf( 344 "max staleness (%s) must be greater than or equal to the heartbeat interval (%s) plus idle write period (%s)", 345 maxStaleness, s.HeartbeatInterval, idleWritePeriod, 346 ) 347 } 348 349 return nil 350 }