agones.dev/agones@v1.53.0/pkg/gameservers/pernodecounter.go (about) 1 // Copyright 2018 Google LLC All Rights Reserved. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package gameservers 16 17 import ( 18 "context" 19 "sync" 20 21 agonesv1 "agones.dev/agones/pkg/apis/agones/v1" 22 "agones.dev/agones/pkg/client/informers/externalversions" 23 listerv1 "agones.dev/agones/pkg/client/listers/agones/v1" 24 "agones.dev/agones/pkg/util/runtime" 25 "github.com/pkg/errors" 26 "github.com/sirupsen/logrus" 27 corev1 "k8s.io/api/core/v1" 28 "k8s.io/apimachinery/pkg/labels" 29 "k8s.io/client-go/informers" 30 "k8s.io/client-go/tools/cache" 31 ) 32 33 // PerNodeCounter counts how many Allocated and 34 // Ready GameServers currently exist on each node. 35 // This is useful for scheduling allocations, fleet management 36 // mostly under a Packed strategy 37 // 38 //nolint:govet // ignore fieldalignment, singleton 39 type PerNodeCounter struct { 40 logger *logrus.Entry 41 gameServerSynced cache.InformerSynced 42 gameServerLister listerv1.GameServerLister 43 countMutex sync.RWMutex 44 counts map[string]*NodeCount 45 } 46 47 // NodeCount is just a convenience data structure for 48 // keeping relevant GameServer counts about Nodes 49 type NodeCount struct { 50 // Ready is ready count 51 Ready int64 52 // Allocated is allocated out 53 Allocated int64 54 } 55 56 // NewPerNodeCounter returns a new PerNodeCounter 57 func NewPerNodeCounter( 58 kubeInformerFactory informers.SharedInformerFactory, 59 agonesInformerFactory externalversions.SharedInformerFactory) *PerNodeCounter { 60 61 gameServers := agonesInformerFactory.Agones().V1().GameServers() 62 gsInformer := gameServers.Informer() 63 64 ac := &PerNodeCounter{ 65 gameServerSynced: gsInformer.HasSynced, 66 gameServerLister: gameServers.Lister(), 67 countMutex: sync.RWMutex{}, 68 counts: map[string]*NodeCount{}, 69 } 70 71 ac.logger = runtime.NewLoggerWithType(ac) 72 73 _, _ = gsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ 74 AddFunc: func(obj interface{}) { 75 gs := obj.(*agonesv1.GameServer) 76 77 switch gs.Status.State { 78 case agonesv1.GameServerStateReady: 79 ac.inc(gs, 1, 0) 80 case agonesv1.GameServerStateAllocated: 81 ac.inc(gs, 0, 1) 82 } 83 }, 84 UpdateFunc: func(oldObj, newObj interface{}) { 85 oldGS := oldObj.(*agonesv1.GameServer) 86 newGS := newObj.(*agonesv1.GameServer) 87 88 var ready int64 89 var allocated int64 90 91 if oldGS.Status.State == agonesv1.GameServerStateReady && newGS.Status.State != agonesv1.GameServerStateReady { 92 ready = -1 93 } else if newGS.Status.State == agonesv1.GameServerStateReady && oldGS.Status.State != agonesv1.GameServerStateReady { 94 ready = 1 95 } 96 97 if oldGS.Status.State == agonesv1.GameServerStateAllocated && newGS.Status.State != agonesv1.GameServerStateAllocated { 98 allocated = -1 99 } else if newGS.Status.State == agonesv1.GameServerStateAllocated && oldGS.Status.State != agonesv1.GameServerStateAllocated { 100 allocated = 1 101 } 102 103 ac.inc(newGS, ready, allocated) 104 }, 105 DeleteFunc: func(obj interface{}) { 106 gs, ok := obj.(*agonesv1.GameServer) 107 if !ok { 108 return 109 } 110 111 switch gs.Status.State { 112 case agonesv1.GameServerStateReady: 113 ac.inc(gs, -1, 0) 114 case agonesv1.GameServerStateAllocated: 115 ac.inc(gs, 0, -1) 116 } 117 }, 118 }) 119 120 // remove the record when the node is deleted 121 _, _ = kubeInformerFactory.Core().V1().Nodes().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ 122 DeleteFunc: func(obj interface{}) { 123 node, ok := obj.(*corev1.Node) 124 if !ok { 125 return 126 } 127 128 ac.countMutex.Lock() 129 defer ac.countMutex.Unlock() 130 131 delete(ac.counts, node.ObjectMeta.Name) 132 }, 133 }) 134 135 return ac 136 } 137 138 // Run sets up the current state GameServer counts across nodes 139 // non blocking Run function. 140 func (pnc *PerNodeCounter) Run(ctx context.Context, _ int) error { 141 pnc.countMutex.Lock() 142 defer pnc.countMutex.Unlock() 143 144 pnc.logger.Debug("Running") 145 146 if !cache.WaitForCacheSync(ctx.Done(), pnc.gameServerSynced) { 147 return errors.New("failed to wait for caches to sync") 148 } 149 150 gsList, err := pnc.gameServerLister.List(labels.Everything()) 151 if err != nil { 152 return errors.Wrap(err, "error attempting to list all GameServers") 153 } 154 155 counts := map[string]*NodeCount{} 156 for _, gs := range gsList { 157 _, ok := counts[gs.Status.NodeName] 158 if !ok { 159 counts[gs.Status.NodeName] = &NodeCount{} 160 } 161 162 switch gs.Status.State { 163 case agonesv1.GameServerStateReady: 164 counts[gs.Status.NodeName].Ready++ 165 case agonesv1.GameServerStateAllocated: 166 counts[gs.Status.NodeName].Allocated++ 167 } 168 } 169 170 pnc.counts = counts 171 return nil 172 } 173 174 // Counts returns the NodeCount map in a thread safe way 175 func (pnc *PerNodeCounter) Counts() map[string]NodeCount { 176 pnc.countMutex.RLock() 177 defer pnc.countMutex.RUnlock() 178 179 result := make(map[string]NodeCount, len(pnc.counts)) 180 181 // return a copy, so it's thread safe 182 for k, v := range pnc.counts { 183 result[k] = *v 184 } 185 186 return result 187 } 188 189 func (pnc *PerNodeCounter) inc(gs *agonesv1.GameServer, ready, allocated int64) { 190 pnc.countMutex.Lock() 191 defer pnc.countMutex.Unlock() 192 193 _, ok := pnc.counts[gs.Status.NodeName] 194 if !ok { 195 pnc.counts[gs.Status.NodeName] = &NodeCount{} 196 } 197 198 pnc.counts[gs.Status.NodeName].Allocated += allocated 199 pnc.counts[gs.Status.NodeName].Ready += ready 200 201 // just in case 202 if pnc.counts[gs.Status.NodeName].Allocated < 0 { 203 pnc.counts[gs.Status.NodeName].Allocated = 0 204 } 205 206 if pnc.counts[gs.Status.NodeName].Ready < 0 { 207 pnc.counts[gs.Status.NodeName].Ready = 0 208 } 209 }