github.com/niedbalski/juju@v0.0.0-20190215020005-8ff100488e47/worker/provisioner/provisioner.go (about) 1 // Copyright 2012, 2013 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package provisioner 5 6 import ( 7 "sync" 8 "time" 9 10 "github.com/juju/errors" 11 "github.com/juju/loggo" 12 "gopkg.in/juju/names.v2" 13 "gopkg.in/juju/worker.v1" 14 "gopkg.in/juju/worker.v1/catacomb" 15 16 "github.com/juju/juju/agent" 17 apiprovisioner "github.com/juju/juju/api/provisioner" 18 "github.com/juju/juju/controller/authentication" 19 "github.com/juju/juju/core/instance" 20 "github.com/juju/juju/core/watcher" 21 "github.com/juju/juju/environs" 22 "github.com/juju/juju/environs/config" 23 "github.com/juju/juju/environs/context" 24 "github.com/juju/juju/worker/common" 25 ) 26 27 var logger = loggo.GetLogger("juju.provisioner") 28 29 // Ensure our structs implement the required Provisioner interface. 30 var _ Provisioner = (*environProvisioner)(nil) 31 var _ Provisioner = (*containerProvisioner)(nil) 32 33 var ( 34 retryStrategyDelay = 10 * time.Second 35 retryStrategyCount = 10 36 ) 37 38 // Provisioner represents a running provisioner worker. 39 type Provisioner interface { 40 worker.Worker 41 getMachineWatcher() (watcher.StringsWatcher, error) 42 getRetryWatcher() (watcher.NotifyWatcher, error) 43 getProfileWatcher() (watcher.StringsWatcher, error) 44 } 45 46 // environProvisioner represents a running provisioning worker for machine nodes 47 // belonging to an environment. 48 type environProvisioner struct { 49 provisioner 50 environ environs.Environ 51 configObserver configObserver 52 } 53 54 // containerProvisioner represents a running provisioning worker for containers 55 // hosted on a machine. 56 type containerProvisioner struct { 57 provisioner 58 containerType instance.ContainerType 59 machine apiprovisioner.MachineProvisioner 60 configObserver configObserver 61 } 62 63 // provisioner providers common behaviour for a running provisioning worker. 64 type provisioner struct { 65 Provisioner 66 st *apiprovisioner.State 67 agentConfig agent.Config 68 broker environs.InstanceBroker 69 distributionGroupFinder DistributionGroupFinder 70 toolsFinder ToolsFinder 71 catacomb catacomb.Catacomb 72 callContext context.ProviderCallContext 73 } 74 75 // RetryStrategy defines the retry behavior when encountering a retryable 76 // error during provisioning. 77 // 78 // TODO(katco): 2016-08-09: lp:1611427 79 type RetryStrategy struct { 80 retryDelay time.Duration 81 retryCount int 82 } 83 84 // NewRetryStrategy returns a new retry strategy with the specified delay and 85 // count for use with retryable provisioning errors. 86 func NewRetryStrategy(delay time.Duration, count int) RetryStrategy { 87 return RetryStrategy{ 88 retryDelay: delay, 89 retryCount: count, 90 } 91 } 92 93 // configObserver is implemented so that tests can see when the environment 94 // configuration changes. 95 // The catacomb is set in export_test to the provider's member. 96 // This is used to prevent notify from blocking a provisioner that has had its 97 // Kill method invoked. 98 type configObserver struct { 99 sync.Mutex 100 observer chan<- *config.Config 101 catacomb *catacomb.Catacomb 102 } 103 104 // notify notifies the observer of a configuration change. 105 func (o *configObserver) notify(cfg *config.Config) { 106 o.Lock() 107 if o.observer != nil { 108 select { 109 case o.observer <- cfg: 110 case <-o.catacomb.Dying(): 111 } 112 } 113 o.Unlock() 114 } 115 116 // Kill implements worker.Worker.Kill. 117 func (p *provisioner) Kill() { 118 p.catacomb.Kill(nil) 119 } 120 121 // Wait implements worker.Worker.Wait. 122 func (p *provisioner) Wait() error { 123 return p.catacomb.Wait() 124 } 125 126 // getToolsFinder returns a ToolsFinder for the provided State. 127 // This exists for mocking. 128 var getToolsFinder = func(st *apiprovisioner.State) ToolsFinder { 129 return st 130 } 131 132 // getDistributionGroupFinder returns a DistributionGroupFinder 133 // for the provided State. This exists for mocking. 134 var getDistributionGroupFinder = func(st *apiprovisioner.State) DistributionGroupFinder { 135 return st 136 } 137 138 // getStartTask creates a new worker for the provisioner, 139 func (p *provisioner) getStartTask(harvestMode config.HarvestMode) (ProvisionerTask, error) { 140 auth, err := authentication.NewAPIAuthenticator(p.st) 141 if err != nil { 142 return nil, err 143 } 144 // Start responding to changes in machines, and to any further updates 145 // to the environment config. 146 machineWatcher, err := p.getMachineWatcher() 147 if err != nil { 148 return nil, err 149 } 150 retryWatcher, err := p.getRetryWatcher() 151 if err != nil && !errors.IsNotImplemented(err) { 152 return nil, err 153 } 154 profileWatcher, err := p.getProfileWatcher() 155 if err != nil { 156 return nil, err 157 } 158 tag := p.agentConfig.Tag() 159 machineTag, ok := tag.(names.MachineTag) 160 if !ok { 161 errors.Errorf("expected names.MachineTag, got %T", tag) 162 } 163 164 modelCfg, err := p.st.ModelConfig() 165 if err != nil { 166 return nil, errors.Annotate(err, "could not retrieve the model config.") 167 } 168 169 controllerCfg, err := p.st.ControllerConfig() 170 if err != nil { 171 return nil, errors.Annotate(err, "could not retrieve the controller config.") 172 } 173 174 task, err := NewProvisionerTask( 175 controllerCfg.ControllerUUID(), 176 machineTag, 177 harvestMode, 178 p.st, 179 p.distributionGroupFinder, 180 p.toolsFinder, 181 machineWatcher, 182 retryWatcher, 183 profileWatcher, 184 p.broker, 185 auth, 186 modelCfg.ImageStream(), 187 RetryStrategy{retryDelay: retryStrategyDelay, retryCount: retryStrategyCount}, 188 p.callContext, 189 ) 190 if err != nil { 191 return nil, errors.Trace(err) 192 } 193 return task, nil 194 } 195 196 // NewEnvironProvisioner returns a new Provisioner for an environment. 197 // When new machines are added to the state, it allocates instances 198 // from the environment and allocates them to the new machines. 199 func NewEnvironProvisioner(st *apiprovisioner.State, 200 agentConfig agent.Config, 201 environ environs.Environ, 202 credentialAPI common.CredentialAPI, 203 ) (Provisioner, error) { 204 p := &environProvisioner{ 205 provisioner: provisioner{ 206 st: st, 207 agentConfig: agentConfig, 208 toolsFinder: getToolsFinder(st), 209 distributionGroupFinder: getDistributionGroupFinder(st), 210 callContext: common.NewCloudCallContext(credentialAPI, nil), 211 }, 212 environ: environ, 213 } 214 p.Provisioner = p 215 p.broker = environ 216 logger.Tracef("Starting environ provisioner for %q", p.agentConfig.Tag()) 217 218 err := catacomb.Invoke(catacomb.Plan{ 219 Site: &p.catacomb, 220 Work: p.loop, 221 }) 222 if err != nil { 223 return nil, errors.Trace(err) 224 } 225 return p, nil 226 } 227 228 func (p *environProvisioner) loop() error { 229 // TODO(mjs channeling axw) - It would be better if there were 230 // APIs to watch and fetch provisioner specific config instead of 231 // watcher for all changes to model config. This would avoid the 232 // need for a full model config. 233 var modelConfigChanges <-chan struct{} 234 modelWatcher, err := p.st.WatchForModelConfigChanges() 235 if err != nil { 236 return loggedErrorStack(errors.Trace(err)) 237 } 238 if err := p.catacomb.Add(modelWatcher); err != nil { 239 return errors.Trace(err) 240 } 241 modelConfigChanges = modelWatcher.Changes() 242 243 modelConfig := p.environ.Config() 244 p.configObserver.notify(modelConfig) 245 harvestMode := modelConfig.ProvisionerHarvestMode() 246 task, err := p.getStartTask(harvestMode) 247 if err != nil { 248 return loggedErrorStack(errors.Trace(err)) 249 } 250 if err := p.catacomb.Add(task); err != nil { 251 return errors.Trace(err) 252 } 253 254 for { 255 select { 256 case <-p.catacomb.Dying(): 257 return p.catacomb.ErrDying() 258 case _, ok := <-modelConfigChanges: 259 if !ok { 260 return errors.New("model configuration watcher closed") 261 } 262 modelConfig, err := p.st.ModelConfig() 263 if err != nil { 264 return errors.Annotate(err, "cannot load model configuration") 265 } 266 if err := p.setConfig(modelConfig); err != nil { 267 return errors.Annotate(err, "loaded invalid model configuration") 268 } 269 task.SetHarvestMode(modelConfig.ProvisionerHarvestMode()) 270 } 271 } 272 } 273 274 func (p *environProvisioner) getMachineWatcher() (watcher.StringsWatcher, error) { 275 return p.st.WatchModelMachines() 276 } 277 278 func (p *environProvisioner) getRetryWatcher() (watcher.NotifyWatcher, error) { 279 return p.st.WatchMachineErrorRetry() 280 } 281 282 func (p *environProvisioner) getProfileWatcher() (watcher.StringsWatcher, error) { 283 return p.st.WatchModelMachinesCharmProfiles() 284 } 285 286 // setConfig updates the environment configuration and notifies 287 // the config observer. 288 func (p *environProvisioner) setConfig(modelConfig *config.Config) error { 289 if err := p.environ.SetConfig(modelConfig); err != nil { 290 return errors.Trace(err) 291 } 292 p.configObserver.notify(modelConfig) 293 return nil 294 } 295 296 // NewContainerProvisioner returns a new Provisioner. When new machines 297 // are added to the state, it allocates instances from the environment 298 // and allocates them to the new machines. 299 func NewContainerProvisioner( 300 containerType instance.ContainerType, 301 st *apiprovisioner.State, 302 agentConfig agent.Config, 303 broker environs.InstanceBroker, 304 toolsFinder ToolsFinder, 305 distributionGroupFinder DistributionGroupFinder, 306 credentialAPI common.CredentialAPI, 307 ) (Provisioner, error) { 308 p := &containerProvisioner{ 309 provisioner: provisioner{ 310 st: st, 311 agentConfig: agentConfig, 312 broker: broker, 313 toolsFinder: toolsFinder, 314 distributionGroupFinder: distributionGroupFinder, 315 callContext: common.NewCloudCallContext(credentialAPI, nil), 316 }, 317 containerType: containerType, 318 } 319 p.Provisioner = p 320 logger.Tracef("Starting %s provisioner for %q", p.containerType, p.agentConfig.Tag()) 321 322 err := catacomb.Invoke(catacomb.Plan{ 323 Site: &p.catacomb, 324 Work: p.loop, 325 }) 326 if err != nil { 327 return nil, errors.Trace(err) 328 } 329 return p, nil 330 } 331 332 func (p *containerProvisioner) loop() error { 333 modelWatcher, err := p.st.WatchForModelConfigChanges() 334 if err != nil { 335 return errors.Trace(err) 336 } 337 if err := p.catacomb.Add(modelWatcher); err != nil { 338 return errors.Trace(err) 339 } 340 341 modelConfig, err := p.st.ModelConfig() 342 if err != nil { 343 return errors.Trace(err) 344 } 345 p.configObserver.notify(modelConfig) 346 harvestMode := modelConfig.ProvisionerHarvestMode() 347 348 task, err := p.getStartTask(harvestMode) 349 if err != nil { 350 return loggedErrorStack(errors.Trace(err)) 351 } 352 if err := p.catacomb.Add(task); err != nil { 353 return errors.Trace(err) 354 } 355 356 for { 357 select { 358 case <-p.catacomb.Dying(): 359 return p.catacomb.ErrDying() 360 case _, ok := <-modelWatcher.Changes(): 361 if !ok { 362 return errors.New("model configuration watch closed") 363 } 364 modelConfig, err := p.st.ModelConfig() 365 if err != nil { 366 return errors.Annotate(err, "cannot load model configuration") 367 } 368 p.configObserver.notify(modelConfig) 369 task.SetHarvestMode(modelConfig.ProvisionerHarvestMode()) 370 } 371 } 372 } 373 374 func (p *containerProvisioner) getMachine() (apiprovisioner.MachineProvisioner, error) { 375 if p.machine == nil { 376 tag := p.agentConfig.Tag() 377 machineTag, ok := tag.(names.MachineTag) 378 if !ok { 379 return nil, errors.Errorf("expected names.MachineTag, got %T", tag) 380 } 381 result, err := p.st.Machines(machineTag) 382 if err != nil { 383 logger.Errorf("error retrieving %s from state", machineTag) 384 return nil, err 385 } 386 if result[0].Err != nil { 387 logger.Errorf("%s is not in state", machineTag) 388 return nil, err 389 } 390 p.machine = result[0].Machine 391 } 392 return p.machine, nil 393 } 394 395 func (p *containerProvisioner) getMachineWatcher() (watcher.StringsWatcher, error) { 396 machine, err := p.getMachine() 397 if err != nil { 398 return nil, err 399 } 400 return machine.WatchContainers(p.containerType) 401 } 402 403 func (p *containerProvisioner) getRetryWatcher() (watcher.NotifyWatcher, error) { 404 return nil, errors.NotImplementedf("getRetryWatcher") 405 } 406 407 func (p *containerProvisioner) getProfileWatcher() (watcher.StringsWatcher, error) { 408 // Note: we don't care what type the container is when watching. The 409 // provisioner task will make this become a no-op. 410 // Also we'll always clean up any documents once the uniter has finished 411 // deploying/upgrading a charm. 412 machine, err := p.getMachine() 413 if err != nil { 414 return nil, err 415 } 416 return machine.WatchContainersCharmProfiles(p.containerType) 417 }