github.com/niedbalski/juju@v0.0.0-20190215020005-8ff100488e47/worker/storageprovisioner/filesystem_ops.go (about) 1 // Copyright 2015 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package storageprovisioner 5 6 import ( 7 "path/filepath" 8 9 "github.com/juju/errors" 10 "gopkg.in/juju/names.v2" 11 12 "github.com/juju/juju/apiserver/params" 13 "github.com/juju/juju/core/status" 14 environscontext "github.com/juju/juju/environs/context" 15 "github.com/juju/juju/storage" 16 ) 17 18 // createFilesystems creates filesystems with the specified parameters. 19 func createFilesystems(ctx *context, ops map[names.FilesystemTag]*createFilesystemOp) error { 20 filesystemParams := make([]storage.FilesystemParams, 0, len(ops)) 21 for _, op := range ops { 22 filesystemParams = append(filesystemParams, op.args) 23 } 24 paramsBySource, filesystemSources, err := filesystemParamsBySource( 25 ctx.config.StorageDir, 26 filesystemParams, 27 ctx.managedFilesystemSource, 28 ctx.config.Registry, 29 ) 30 if err != nil { 31 return errors.Trace(err) 32 } 33 var reschedule []scheduleOp 34 var filesystems []storage.Filesystem 35 var statuses []params.EntityStatusArgs 36 for sourceName, filesystemParams := range paramsBySource { 37 logger.Debugf("creating filesystems: %v", filesystemParams) 38 filesystemSource := filesystemSources[sourceName] 39 validFilesystemParams, validationErrors := validateFilesystemParams( 40 filesystemSource, filesystemParams, 41 ) 42 for i, err := range validationErrors { 43 if err == nil { 44 continue 45 } 46 statuses = append(statuses, params.EntityStatusArgs{ 47 Tag: filesystemParams[i].Tag.String(), 48 Status: status.Error.String(), 49 Info: err.Error(), 50 }) 51 logger.Debugf( 52 "failed to validate parameters for %s: %v", 53 names.ReadableString(filesystemParams[i].Tag), err, 54 ) 55 } 56 filesystemParams = validFilesystemParams 57 if len(filesystemParams) == 0 { 58 continue 59 } 60 results, err := filesystemSource.CreateFilesystems(ctx.config.CloudCallContext, filesystemParams) 61 if err != nil { 62 return errors.Annotatef(err, "creating filesystems from source %q", sourceName) 63 } 64 for i, result := range results { 65 statuses = append(statuses, params.EntityStatusArgs{ 66 Tag: filesystemParams[i].Tag.String(), 67 Status: status.Attaching.String(), 68 }) 69 entityStatus := &statuses[len(statuses)-1] 70 if result.Error != nil { 71 // Reschedule the filesystem creation. 72 reschedule = append(reschedule, ops[filesystemParams[i].Tag]) 73 74 // Note: we keep the status as "pending" to indicate 75 // that we will retry. When we distinguish between 76 // transient and permanent errors, we will set the 77 // status to "error" for permanent errors. 78 entityStatus.Status = status.Pending.String() 79 entityStatus.Info = result.Error.Error() 80 logger.Debugf( 81 "failed to create %s: %v", 82 names.ReadableString(filesystemParams[i].Tag), 83 result.Error, 84 ) 85 continue 86 } 87 filesystems = append(filesystems, *result.Filesystem) 88 } 89 } 90 scheduleOperations(ctx, reschedule...) 91 setStatus(ctx, statuses) 92 if len(filesystems) == 0 { 93 return nil 94 } 95 // TODO(axw) we need to be able to list filesystems in the provider, 96 // by environment, so that we can "harvest" them if they're 97 // unknown. This will take care of killing filesystems that we fail 98 // to record in state. 99 errorResults, err := ctx.config.Filesystems.SetFilesystemInfo(filesystemsFromStorage(filesystems)) 100 if err != nil { 101 return errors.Annotate(err, "publishing filesystems to state") 102 } 103 for i, result := range errorResults { 104 if result.Error != nil { 105 logger.Errorf( 106 "publishing filesystem %s to state: %v", 107 filesystems[i].Tag.Id(), 108 result.Error, 109 ) 110 } 111 } 112 for _, v := range filesystems { 113 updateFilesystem(ctx, v) 114 } 115 return nil 116 } 117 118 // attachFilesystems creates filesystem attachments with the specified parameters. 119 func attachFilesystems(ctx *context, ops map[params.MachineStorageId]*attachFilesystemOp) error { 120 filesystemAttachmentParams := make([]storage.FilesystemAttachmentParams, 0, len(ops)) 121 for _, op := range ops { 122 args := op.args 123 if args.Path == "" { 124 args.Path = filepath.Join(ctx.config.StorageDir, args.Filesystem.Id()) 125 } 126 filesystemAttachmentParams = append(filesystemAttachmentParams, args) 127 } 128 paramsBySource, filesystemSources, err := filesystemAttachmentParamsBySource( 129 ctx.config.StorageDir, 130 filesystemAttachmentParams, 131 ctx.filesystems, 132 ctx.managedFilesystemSource, 133 ctx.config.Registry, 134 ) 135 if err != nil { 136 return errors.Trace(err) 137 } 138 var reschedule []scheduleOp 139 var filesystemAttachments []storage.FilesystemAttachment 140 var statuses []params.EntityStatusArgs 141 for sourceName, filesystemAttachmentParams := range paramsBySource { 142 logger.Debugf("attaching filesystems: %+v", filesystemAttachmentParams) 143 filesystemSource := filesystemSources[sourceName] 144 results, err := filesystemSource.AttachFilesystems(ctx.config.CloudCallContext, filesystemAttachmentParams) 145 if err != nil { 146 return errors.Annotatef(err, "attaching filesystems from source %q", sourceName) 147 } 148 for i, result := range results { 149 p := filesystemAttachmentParams[i] 150 statuses = append(statuses, params.EntityStatusArgs{ 151 Tag: p.Filesystem.String(), 152 Status: status.Attached.String(), 153 }) 154 entityStatus := &statuses[len(statuses)-1] 155 if result.Error != nil { 156 // Reschedule the filesystem attachment. 157 id := params.MachineStorageId{ 158 MachineTag: p.Machine.String(), 159 AttachmentTag: p.Filesystem.String(), 160 } 161 reschedule = append(reschedule, ops[id]) 162 163 // Note: we keep the status as "attaching" to 164 // indicate that we will retry. When we distinguish 165 // between transient and permanent errors, we will 166 // set the status to "error" for permanent errors. 167 entityStatus.Status = status.Attaching.String() 168 entityStatus.Info = result.Error.Error() 169 logger.Debugf( 170 "failed to attach %s to %s: %v", 171 names.ReadableString(p.Filesystem), 172 names.ReadableString(p.Machine), 173 result.Error, 174 ) 175 continue 176 } 177 filesystemAttachments = append(filesystemAttachments, *result.FilesystemAttachment) 178 } 179 } 180 scheduleOperations(ctx, reschedule...) 181 setStatus(ctx, statuses) 182 if err := setFilesystemAttachmentInfo(ctx, filesystemAttachments); err != nil { 183 return errors.Trace(err) 184 } 185 return nil 186 } 187 188 // removeFilesystems destroys or releases filesystems with the specified parameters. 189 func removeFilesystems(ctx *context, ops map[names.FilesystemTag]*removeFilesystemOp) error { 190 tags := make([]names.FilesystemTag, 0, len(ops)) 191 for tag := range ops { 192 tags = append(tags, tag) 193 } 194 removeFilesystemParams, err := removeFilesystemParams(ctx, tags) 195 if err != nil { 196 return errors.Trace(err) 197 } 198 filesystemParams := make([]storage.FilesystemParams, len(tags)) 199 removeFilesystemParamsByTag := make(map[names.FilesystemTag]params.RemoveFilesystemParams) 200 for i, args := range removeFilesystemParams { 201 removeFilesystemParamsByTag[tags[i]] = args 202 filesystemParams[i] = storage.FilesystemParams{ 203 Tag: tags[i], 204 Provider: storage.ProviderType(args.Provider), 205 } 206 } 207 paramsBySource, filesystemSources, err := filesystemParamsBySource( 208 ctx.config.StorageDir, 209 filesystemParams, 210 ctx.managedFilesystemSource, 211 ctx.config.Registry, 212 ) 213 if err != nil { 214 return errors.Trace(err) 215 } 216 var remove []names.Tag 217 var reschedule []scheduleOp 218 var statuses []params.EntityStatusArgs 219 removeFilesystems := func(tags []names.FilesystemTag, ids []string, f func(environscontext.ProviderCallContext, []string) ([]error, error)) error { 220 if len(ids) == 0 { 221 return nil 222 } 223 errs, err := f(ctx.config.CloudCallContext, ids) 224 if err != nil { 225 return errors.Trace(err) 226 } 227 for i, err := range errs { 228 tag := tags[i] 229 if err == nil { 230 remove = append(remove, tag) 231 continue 232 } 233 // Failed to destroy or release filesystem; reschedule and update status. 234 reschedule = append(reschedule, ops[tag]) 235 statuses = append(statuses, params.EntityStatusArgs{ 236 Tag: tag.String(), 237 Status: status.Error.String(), 238 Info: errors.Annotate(err, "removing filesystem").Error(), 239 }) 240 } 241 return nil 242 } 243 for sourceName, filesystemParams := range paramsBySource { 244 logger.Debugf("removing filesystems from %q: %v", sourceName, filesystemParams) 245 filesystemSource := filesystemSources[sourceName] 246 removeTags := make([]names.FilesystemTag, len(filesystemParams)) 247 removeParams := make([]params.RemoveFilesystemParams, len(filesystemParams)) 248 for i, args := range filesystemParams { 249 removeTags[i] = args.Tag 250 removeParams[i] = removeFilesystemParamsByTag[args.Tag] 251 } 252 destroyTags, destroyIds, releaseTags, releaseIds := partitionRemoveFilesystemParams(removeTags, removeParams) 253 if err := removeFilesystems(destroyTags, destroyIds, filesystemSource.DestroyFilesystems); err != nil { 254 if err != nil { 255 return errors.Trace(err) 256 } 257 } 258 if err := removeFilesystems(releaseTags, releaseIds, filesystemSource.ReleaseFilesystems); err != nil { 259 if err != nil { 260 return errors.Trace(err) 261 } 262 } 263 } 264 scheduleOperations(ctx, reschedule...) 265 setStatus(ctx, statuses) 266 if err := removeEntities(ctx, remove); err != nil { 267 return errors.Annotate(err, "removing filesystems from state") 268 } 269 return nil 270 } 271 272 func partitionRemoveFilesystemParams(removeTags []names.FilesystemTag, removeParams []params.RemoveFilesystemParams) ( 273 destroyTags []names.FilesystemTag, destroyIds []string, 274 releaseTags []names.FilesystemTag, releaseIds []string, 275 ) { 276 destroyTags = make([]names.FilesystemTag, 0, len(removeParams)) 277 destroyIds = make([]string, 0, len(removeParams)) 278 releaseTags = make([]names.FilesystemTag, 0, len(removeParams)) 279 releaseIds = make([]string, 0, len(removeParams)) 280 for i, args := range removeParams { 281 tag := removeTags[i] 282 if args.Destroy { 283 destroyTags = append(destroyTags, tag) 284 destroyIds = append(destroyIds, args.FilesystemId) 285 } else { 286 releaseTags = append(releaseTags, tag) 287 releaseIds = append(releaseIds, args.FilesystemId) 288 } 289 } 290 return 291 } 292 293 // detachFilesystems destroys filesystem attachments with the specified parameters. 294 func detachFilesystems(ctx *context, ops map[params.MachineStorageId]*detachFilesystemOp) error { 295 filesystemAttachmentParams := make([]storage.FilesystemAttachmentParams, 0, len(ops)) 296 for _, op := range ops { 297 filesystemAttachmentParams = append(filesystemAttachmentParams, op.args) 298 } 299 paramsBySource, filesystemSources, err := filesystemAttachmentParamsBySource( 300 ctx.config.StorageDir, 301 filesystemAttachmentParams, 302 ctx.filesystems, 303 ctx.managedFilesystemSource, 304 ctx.config.Registry, 305 ) 306 if err != nil { 307 return errors.Trace(err) 308 } 309 var reschedule []scheduleOp 310 var statuses []params.EntityStatusArgs 311 var remove []params.MachineStorageId 312 for sourceName, filesystemAttachmentParams := range paramsBySource { 313 logger.Debugf("detaching filesystems: %+v", filesystemAttachmentParams) 314 filesystemSource, ok := filesystemSources[sourceName] 315 if !ok && ctx.isApplicationKind() { 316 continue 317 } 318 errs, err := filesystemSource.DetachFilesystems(ctx.config.CloudCallContext, filesystemAttachmentParams) 319 if err != nil { 320 return errors.Annotatef(err, "detaching filesystems from source %q", sourceName) 321 } 322 for i, err := range errs { 323 p := filesystemAttachmentParams[i] 324 statuses = append(statuses, params.EntityStatusArgs{ 325 Tag: p.Filesystem.String(), 326 // TODO(axw) when we support multiple 327 // attachment, we'll have to check if 328 // there are any other attachments 329 // before saying the status "detached". 330 Status: status.Detached.String(), 331 }) 332 id := params.MachineStorageId{ 333 MachineTag: p.Machine.String(), 334 AttachmentTag: p.Filesystem.String(), 335 } 336 entityStatus := &statuses[len(statuses)-1] 337 if err != nil { 338 reschedule = append(reschedule, ops[id]) 339 entityStatus.Status = status.Detaching.String() 340 entityStatus.Info = err.Error() 341 logger.Debugf( 342 "failed to detach %s from %s: %v", 343 names.ReadableString(p.Filesystem), 344 names.ReadableString(p.Machine), 345 err, 346 ) 347 continue 348 } 349 remove = append(remove, id) 350 } 351 } 352 scheduleOperations(ctx, reschedule...) 353 setStatus(ctx, statuses) 354 if err := removeAttachments(ctx, remove); err != nil { 355 return errors.Annotate(err, "removing attachments from state") 356 } 357 for _, id := range remove { 358 delete(ctx.filesystemAttachments, id) 359 } 360 return nil 361 } 362 363 // filesystemParamsBySource separates the filesystem parameters by filesystem source. 364 func filesystemParamsBySource( 365 baseStorageDir string, 366 params []storage.FilesystemParams, 367 managedFilesystemSource storage.FilesystemSource, 368 registry storage.ProviderRegistry, 369 ) (map[string][]storage.FilesystemParams, map[string]storage.FilesystemSource, error) { 370 // TODO(axw) later we may have multiple instantiations (sources) 371 // for a storage provider, e.g. multiple Ceph installations. For 372 // now we assume a single source for each provider type, with no 373 // configuration. 374 filesystemSources := make(map[string]storage.FilesystemSource) 375 for _, params := range params { 376 sourceName := string(params.Provider) 377 if _, ok := filesystemSources[sourceName]; ok { 378 continue 379 } 380 if params.Volume != (names.VolumeTag{}) { 381 filesystemSources[sourceName] = managedFilesystemSource 382 continue 383 } 384 filesystemSource, err := filesystemSource( 385 baseStorageDir, sourceName, params.Provider, registry, 386 ) 387 if errors.Cause(err) == errNonDynamic { 388 filesystemSource = nil 389 } else if err != nil { 390 return nil, nil, errors.Annotate(err, "getting filesystem source") 391 } 392 filesystemSources[sourceName] = filesystemSource 393 } 394 paramsBySource := make(map[string][]storage.FilesystemParams) 395 for _, param := range params { 396 sourceName := string(param.Provider) 397 filesystemSource := filesystemSources[sourceName] 398 if filesystemSource == nil { 399 // Ignore nil filesystem sources; this means that the 400 // filesystem should be created by the machine-provisioner. 401 continue 402 } 403 paramsBySource[sourceName] = append(paramsBySource[sourceName], param) 404 } 405 return paramsBySource, filesystemSources, nil 406 } 407 408 // validateFilesystemParams validates a collection of filesystem parameters. 409 func validateFilesystemParams( 410 filesystemSource storage.FilesystemSource, 411 filesystemParams []storage.FilesystemParams, 412 ) ([]storage.FilesystemParams, []error) { 413 valid := make([]storage.FilesystemParams, 0, len(filesystemParams)) 414 results := make([]error, len(filesystemParams)) 415 for i, params := range filesystemParams { 416 err := filesystemSource.ValidateFilesystemParams(params) 417 if err == nil { 418 valid = append(valid, params) 419 } 420 results[i] = err 421 } 422 return valid, results 423 } 424 425 // filesystemAttachmentParamsBySource separates the filesystem attachment parameters by filesystem source. 426 func filesystemAttachmentParamsBySource( 427 baseStorageDir string, 428 filesystemAttachmentParams []storage.FilesystemAttachmentParams, 429 filesystems map[names.FilesystemTag]storage.Filesystem, 430 managedFilesystemSource storage.FilesystemSource, 431 registry storage.ProviderRegistry, 432 ) (map[string][]storage.FilesystemAttachmentParams, map[string]storage.FilesystemSource, error) { 433 // TODO(axw) later we may have multiple instantiations (sources) 434 // for a storage provider, e.g. multiple Ceph installations. For 435 // now we assume a single source for each provider type, with no 436 // configuration. 437 filesystemSources := make(map[string]storage.FilesystemSource) 438 paramsBySource := make(map[string][]storage.FilesystemAttachmentParams) 439 for _, params := range filesystemAttachmentParams { 440 sourceName := string(params.Provider) 441 paramsBySource[sourceName] = append(paramsBySource[sourceName], params) 442 if _, ok := filesystemSources[sourceName]; ok { 443 continue 444 } 445 filesystem, ok := filesystems[params.Filesystem] 446 if !ok || filesystem.Volume != (names.VolumeTag{}) { 447 filesystemSources[sourceName] = managedFilesystemSource 448 continue 449 } 450 filesystemSource, err := filesystemSource( 451 baseStorageDir, sourceName, params.Provider, registry, 452 ) 453 if err != nil { 454 return nil, nil, errors.Annotate(err, "getting filesystem source") 455 } 456 filesystemSources[sourceName] = filesystemSource 457 } 458 return paramsBySource, filesystemSources, nil 459 } 460 461 func setFilesystemAttachmentInfo(ctx *context, filesystemAttachments []storage.FilesystemAttachment) error { 462 if len(filesystemAttachments) == 0 { 463 return nil 464 } 465 // TODO(axw) we need to be able to list filesystem attachments in the 466 // provider, by environment, so that we can "harvest" them if they're 467 // unknown. This will take care of killing filesystems that we fail to 468 // record in state. 469 errorResults, err := ctx.config.Filesystems.SetFilesystemAttachmentInfo( 470 filesystemAttachmentsFromStorage(filesystemAttachments), 471 ) 472 if err != nil { 473 return errors.Annotate(err, "publishing filesystems to state") 474 } 475 for i, result := range errorResults { 476 if result.Error != nil { 477 return errors.Annotatef( 478 result.Error, "publishing attachment of %s to %s to state", 479 names.ReadableString(filesystemAttachments[i].Filesystem), 480 names.ReadableString(filesystemAttachments[i].Machine), 481 ) 482 } 483 // Record the filesystem attachment in the context. 484 id := params.MachineStorageId{ 485 MachineTag: filesystemAttachments[i].Machine.String(), 486 AttachmentTag: filesystemAttachments[i].Filesystem.String(), 487 } 488 ctx.filesystemAttachments[id] = filesystemAttachments[i] 489 removePendingFilesystemAttachment(ctx, id) 490 } 491 return nil 492 } 493 494 func filesystemsFromStorage(in []storage.Filesystem) []params.Filesystem { 495 out := make([]params.Filesystem, len(in)) 496 for i, f := range in { 497 paramsFilesystem := params.Filesystem{ 498 f.Tag.String(), 499 "", 500 params.FilesystemInfo{ 501 f.FilesystemId, 502 "", // pool 503 f.Size, 504 }, 505 } 506 if f.Volume != (names.VolumeTag{}) { 507 paramsFilesystem.VolumeTag = f.Volume.String() 508 } 509 out[i] = paramsFilesystem 510 } 511 return out 512 } 513 514 func filesystemAttachmentsFromStorage(in []storage.FilesystemAttachment) []params.FilesystemAttachment { 515 out := make([]params.FilesystemAttachment, len(in)) 516 for i, f := range in { 517 out[i] = params.FilesystemAttachment{ 518 f.Filesystem.String(), 519 f.Machine.String(), 520 params.FilesystemAttachmentInfo{ 521 f.Path, 522 f.ReadOnly, 523 }, 524 } 525 } 526 return out 527 } 528 529 type createFilesystemOp struct { 530 exponentialBackoff 531 args storage.FilesystemParams 532 } 533 534 func (op *createFilesystemOp) key() interface{} { 535 return op.args.Tag 536 } 537 538 type removeFilesystemOp struct { 539 exponentialBackoff 540 tag names.FilesystemTag 541 } 542 543 func (op *removeFilesystemOp) key() interface{} { 544 return op.tag 545 } 546 547 type attachFilesystemOp struct { 548 exponentialBackoff 549 args storage.FilesystemAttachmentParams 550 } 551 552 func (op *attachFilesystemOp) key() interface{} { 553 return params.MachineStorageId{ 554 MachineTag: op.args.Machine.String(), 555 AttachmentTag: op.args.Filesystem.String(), 556 } 557 } 558 559 type detachFilesystemOp struct { 560 exponentialBackoff 561 args storage.FilesystemAttachmentParams 562 } 563 564 func (op *detachFilesystemOp) key() interface{} { 565 return params.MachineStorageId{ 566 MachineTag: op.args.Machine.String(), 567 AttachmentTag: op.args.Filesystem.String(), 568 } 569 }