github.com/cloud-foundations/dominator@v0.0.0-20221004181915-6e4fee580046/dom/herd/mdb.go (about) 1 package herd 2 3 import ( 4 "reflect" 5 "time" 6 7 filegenclient "github.com/Cloud-Foundations/Dominator/lib/filegen/client" 8 "github.com/Cloud-Foundations/Dominator/lib/mdb" 9 "github.com/Cloud-Foundations/Dominator/lib/srpc" 10 ) 11 12 func (herd *Herd) mdbUpdate(mdb *mdb.Mdb) { 13 numNew, numDeleted, numChanged, wantedImages, clientResourcesToDelete := 14 herd.mdbUpdateGetLock(mdb) 15 for _, clientResource := range clientResourcesToDelete { 16 clientResource.ScheduleClose() 17 } 18 // Clean up unreferenced images. 19 herd.imageManager.SetImageInterestList(wantedImages, true) 20 pluralNew := "s" 21 if numNew == 1 { 22 pluralNew = "" 23 } 24 pluralDeleted := "s" 25 if numDeleted == 1 { 26 pluralDeleted = "" 27 } 28 pluralChanged := "s" 29 if numChanged == 1 { 30 pluralChanged = "" 31 } 32 herd.logger.Printf( 33 "MDB update: %d new sub%s, %d removed sub%s, %d changed sub%s", 34 numNew, pluralNew, numDeleted, pluralDeleted, numChanged, pluralChanged) 35 } 36 37 func (herd *Herd) mdbUpdateGetLock(mdb *mdb.Mdb) ( 38 int, int, int, map[string]struct{}, []*srpc.ClientResource) { 39 herd.LockWithTimeout(time.Minute) 40 defer herd.Unlock() 41 startTime := time.Now() 42 numNew := 0 43 numDeleted := 0 44 numChanged := 0 45 herd.subsByIndex = make([]*Sub, 0, len(mdb.Machines)) 46 // Mark for delete all current subs, then later unmark ones in the new MDB. 47 subsToDelete := make(map[string]struct{}) 48 for _, sub := range herd.subsByName { 49 subsToDelete[sub.mdb.Hostname] = struct{}{} 50 } 51 wantedImages := make(map[string]struct{}) 52 wantedImages[herd.defaultImageName] = struct{}{} 53 wantedImages[herd.nextDefaultImageName] = struct{}{} 54 for _, machine := range mdb.Machines { // Sorted by Hostname. 55 if machine.Hostname == "" { 56 herd.logger.Printf("Empty Hostname field, ignoring \"%s\"\n", 57 machine) 58 continue 59 } 60 sub := herd.subsByName[machine.Hostname] 61 wantedImages[machine.RequiredImage] = struct{}{} 62 wantedImages[machine.PlannedImage] = struct{}{} 63 img := herd.imageManager.GetNoError(machine.RequiredImage) 64 if sub == nil { 65 sub = &Sub{ 66 herd: herd, 67 mdb: machine, 68 cancelChannel: make(chan struct{}), 69 } 70 herd.subsByName[machine.Hostname] = sub 71 sub.fileUpdateChannel = herd.computedFilesManager.Add( 72 filegenclient.Machine{machine, sub.getComputedFiles(img)}, 16) 73 numNew++ 74 } else { 75 if sub.mdb.RequiredImage != machine.RequiredImage { 76 if sub.status == statusSynced { 77 sub.status = statusWaitingToPoll 78 } 79 } 80 if !reflect.DeepEqual(sub.mdb, machine) { 81 sub.mdb = machine 82 sub.generationCount = 0 // Force a full poll. 83 herd.computedFilesManager.Update( 84 filegenclient.Machine{machine, sub.getComputedFiles(img)}) 85 sub.sendCancel() 86 numChanged++ 87 } 88 } 89 delete(subsToDelete, machine.Hostname) 90 herd.subsByIndex = append(herd.subsByIndex, sub) 91 img = herd.imageManager.GetNoError(machine.PlannedImage) 92 if img == nil { 93 sub.havePlannedImage = false 94 } else { 95 sub.havePlannedImage = true 96 } 97 } 98 delete(wantedImages, "") 99 // Delete flagged subs (those not in the new MDB). 100 clientResourcesToDelete := make([]*srpc.ClientResource, 0) 101 for subHostname := range subsToDelete { 102 sub := herd.subsByName[subHostname] 103 sub.deletingFlagMutex.Lock() 104 sub.deleting = true 105 if sub.clientResource != nil { 106 clientResourcesToDelete = append(clientResourcesToDelete, 107 sub.clientResource) 108 } 109 sub.deletingFlagMutex.Unlock() 110 herd.computedFilesManager.Remove(subHostname) 111 delete(herd.subsByName, subHostname) 112 numDeleted++ 113 } 114 mdbUpdateTimeDistribution.Add(time.Since(startTime)) 115 return numNew, numDeleted, numChanged, wantedImages, clientResourcesToDelete 116 }