github.com/david-imola/snapd@v0.0.0-20210611180407-2de8ddeece6d/overlord/ifacestate/handlers.go (about) 1 // -*- Mode: Go; indent-tabs-mode: t -*- 2 3 /* 4 * Copyright (C) 2016 Canonical Ltd 5 * 6 * This program is free software: you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 3 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 */ 19 20 package ifacestate 21 22 import ( 23 "fmt" 24 "reflect" 25 "sort" 26 "strings" 27 "time" 28 29 "gopkg.in/tomb.v2" 30 31 "github.com/snapcore/snapd/i18n" 32 "github.com/snapcore/snapd/interfaces" 33 "github.com/snapcore/snapd/interfaces/hotplug" 34 "github.com/snapcore/snapd/logger" 35 "github.com/snapcore/snapd/overlord/hookstate" 36 "github.com/snapcore/snapd/overlord/snapstate" 37 "github.com/snapcore/snapd/overlord/state" 38 "github.com/snapcore/snapd/snap" 39 "github.com/snapcore/snapd/timings" 40 ) 41 42 // confinementOptions returns interfaces.ConfinementOptions from snapstate.Flags. 43 func confinementOptions(flags snapstate.Flags) interfaces.ConfinementOptions { 44 return interfaces.ConfinementOptions{ 45 DevMode: flags.DevMode, 46 JailMode: flags.JailMode, 47 Classic: flags.Classic, 48 } 49 } 50 51 func (m *InterfaceManager) setupAffectedSnaps(task *state.Task, affectingSnap string, affectedSnaps []string, tm timings.Measurer) error { 52 st := task.State() 53 54 // Setup security of the affected snaps. 55 for _, affectedInstanceName := range affectedSnaps { 56 // the snap that triggered the change needs to be skipped 57 if affectedInstanceName == affectingSnap { 58 continue 59 } 60 var snapst snapstate.SnapState 61 if err := snapstate.Get(st, affectedInstanceName, &snapst); err != nil { 62 task.Errorf("skipping security profiles setup for snap %q when handling snap %q: %v", affectedInstanceName, affectingSnap, err) 63 continue 64 } 65 affectedSnapInfo, err := snapst.CurrentInfo() 66 if err != nil { 67 return err 68 } 69 if err := addImplicitSlots(st, affectedSnapInfo); err != nil { 70 return err 71 } 72 opts := confinementOptions(snapst.Flags) 73 if err := m.setupSnapSecurity(task, affectedSnapInfo, opts, tm); err != nil { 74 return err 75 } 76 } 77 return nil 78 } 79 80 func (m *InterfaceManager) doSetupProfiles(task *state.Task, tomb *tomb.Tomb) error { 81 task.State().Lock() 82 defer task.State().Unlock() 83 84 perfTimings := state.TimingsForTask(task) 85 defer perfTimings.Save(task.State()) 86 87 // Get snap.Info from bits handed by the snap manager. 88 snapsup, err := snapstate.TaskSnapSetup(task) 89 if err != nil { 90 return err 91 } 92 93 snapInfo, err := snap.ReadInfo(snapsup.InstanceName(), snapsup.SideInfo) 94 if err != nil { 95 return err 96 } 97 98 if len(snapInfo.BadInterfaces) > 0 { 99 task.State().Warnf("%s", snap.BadInterfacesSummary(snapInfo)) 100 } 101 102 // We no longer do/need core-phase-2, see 103 // https://github.com/snapcore/snapd/pull/5301 104 // This code is just here to deal with old state that may still 105 // have the 2nd setup-profiles with this flag set. 106 var corePhase2 bool 107 if err := task.Get("core-phase-2", &corePhase2); err != nil && err != state.ErrNoState { 108 return err 109 } 110 if corePhase2 { 111 // nothing to do 112 return nil 113 } 114 115 opts := confinementOptions(snapsup.Flags) 116 return m.setupProfilesForSnap(task, tomb, snapInfo, opts, perfTimings) 117 } 118 119 func (m *InterfaceManager) setupProfilesForSnap(task *state.Task, _ *tomb.Tomb, snapInfo *snap.Info, opts interfaces.ConfinementOptions, tm timings.Measurer) error { 120 st := task.State() 121 122 if err := addImplicitSlots(task.State(), snapInfo); err != nil { 123 return err 124 } 125 126 snapName := snapInfo.InstanceName() 127 128 // The snap may have been updated so perform the following operation to 129 // ensure that we are always working on the correct state: 130 // 131 // - disconnect all connections to/from the given snap 132 // - remembering the snaps that were affected by this operation 133 // - remove the (old) snap from the interfaces repository 134 // - add the (new) snap to the interfaces repository 135 // - restore connections based on what is kept in the state 136 // - if a connection cannot be restored then remove it from the state 137 // - setup the security of all the affected snaps 138 disconnectedSnaps, err := m.repo.DisconnectSnap(snapName) 139 if err != nil { 140 return err 141 } 142 // XXX: what about snap renames? We should remove the old name (or switch 143 // to IDs in the interfaces repository) 144 if err := m.repo.RemoveSnap(snapName); err != nil { 145 return err 146 } 147 if err := m.repo.AddSnap(snapInfo); err != nil { 148 return err 149 } 150 if len(snapInfo.BadInterfaces) > 0 { 151 task.Logf("%s", snap.BadInterfacesSummary(snapInfo)) 152 } 153 154 // Reload the connections and compute the set of affected snaps. The set 155 // affectedSet set contains name of all the affected snap instances. The 156 // arrays affectedNames and affectedSnaps contain, arrays of snap names and 157 // snapInfo's, respectively. The arrays are sorted by name with the special 158 // exception that the snap being setup is always first. The affectedSnaps 159 // array may be shorter than the set of affected snaps in case any of the 160 // snaps cannot be found in the state. 161 reconnectedSnaps, err := m.reloadConnections(snapName) 162 if err != nil { 163 return err 164 } 165 affectedSet := make(map[string]bool) 166 for _, name := range disconnectedSnaps { 167 affectedSet[name] = true 168 } 169 for _, name := range reconnectedSnaps { 170 affectedSet[name] = true 171 } 172 173 // Sort the set of affected names, ensuring that the snap being setup 174 // is first regardless of the name it has. 175 affectedNames := make([]string, 0, len(affectedSet)) 176 for name := range affectedSet { 177 if name != snapName { 178 affectedNames = append(affectedNames, name) 179 } 180 } 181 sort.Strings(affectedNames) 182 affectedNames = append([]string{snapName}, affectedNames...) 183 184 // Obtain snap.Info for each affected snap, skipping those that cannot be 185 // found and compute the confinement options that apply to it. 186 affectedSnaps := make([]*snap.Info, 0, len(affectedSet)) 187 confinementOpts := make([]interfaces.ConfinementOptions, 0, len(affectedSet)) 188 // For the snap being setup we know exactly what was requested. 189 affectedSnaps = append(affectedSnaps, snapInfo) 190 confinementOpts = append(confinementOpts, opts) 191 // For remaining snaps we need to interrogate the state. 192 for _, name := range affectedNames[1:] { 193 var snapst snapstate.SnapState 194 if err := snapstate.Get(st, name, &snapst); err != nil { 195 task.Errorf("cannot obtain state of snap %s: %s", name, err) 196 continue 197 } 198 snapInfo, err := snapst.CurrentInfo() 199 if err != nil { 200 return err 201 } 202 if err := addImplicitSlots(st, snapInfo); err != nil { 203 return err 204 } 205 affectedSnaps = append(affectedSnaps, snapInfo) 206 confinementOpts = append(confinementOpts, confinementOptions(snapst.Flags)) 207 } 208 209 return m.setupSecurityByBackend(task, affectedSnaps, confinementOpts, tm) 210 } 211 212 func (m *InterfaceManager) doRemoveProfiles(task *state.Task, tomb *tomb.Tomb) error { 213 st := task.State() 214 st.Lock() 215 defer st.Unlock() 216 217 perfTimings := state.TimingsForTask(task) 218 defer perfTimings.Save(st) 219 220 // Get SnapSetup for this snap. This is gives us the name of the snap. 221 snapSetup, err := snapstate.TaskSnapSetup(task) 222 if err != nil { 223 return err 224 } 225 snapName := snapSetup.InstanceName() 226 227 return m.removeProfilesForSnap(task, tomb, snapName, perfTimings) 228 } 229 230 func (m *InterfaceManager) removeProfilesForSnap(task *state.Task, _ *tomb.Tomb, snapName string, tm timings.Measurer) error { 231 // Disconnect the snap entirely. 232 // This is required to remove the snap from the interface repository. 233 // The returned list of affected snaps will need to have its security setup 234 // to reflect the change. 235 affectedSnaps, err := m.repo.DisconnectSnap(snapName) 236 if err != nil { 237 return err 238 } 239 if err := m.setupAffectedSnaps(task, snapName, affectedSnaps, tm); err != nil { 240 return err 241 } 242 243 // Remove the snap from the interface repository. 244 // This discards all the plugs and slots belonging to that snap. 245 if err := m.repo.RemoveSnap(snapName); err != nil { 246 return err 247 } 248 249 // Remove security artefacts of the snap. 250 if err := m.removeSnapSecurity(task, snapName); err != nil { 251 return err 252 } 253 254 return nil 255 } 256 257 func (m *InterfaceManager) undoSetupProfiles(task *state.Task, tomb *tomb.Tomb) error { 258 st := task.State() 259 st.Lock() 260 defer st.Unlock() 261 262 perfTimings := state.TimingsForTask(task) 263 defer perfTimings.Save(st) 264 265 var corePhase2 bool 266 if err := task.Get("core-phase-2", &corePhase2); err != nil && err != state.ErrNoState { 267 return err 268 } 269 if corePhase2 { 270 // let the first setup-profiles deal with this 271 return nil 272 } 273 274 snapsup, err := snapstate.TaskSnapSetup(task) 275 if err != nil { 276 return err 277 } 278 snapName := snapsup.InstanceName() 279 280 // Get the name from SnapSetup and use it to find the current SideInfo 281 // about the snap, if there is one. 282 var snapst snapstate.SnapState 283 err = snapstate.Get(st, snapName, &snapst) 284 if err != nil && err != state.ErrNoState { 285 return err 286 } 287 sideInfo := snapst.CurrentSideInfo() 288 if sideInfo == nil { 289 // The snap was not installed before so undo should remove security profiles. 290 return m.removeProfilesForSnap(task, tomb, snapName, perfTimings) 291 } else { 292 // The snap was installed before so undo should setup the old security profiles. 293 snapInfo, err := snap.ReadInfo(snapName, sideInfo) 294 if err != nil { 295 return err 296 } 297 opts := confinementOptions(snapst.Flags) 298 return m.setupProfilesForSnap(task, tomb, snapInfo, opts, perfTimings) 299 } 300 } 301 302 func (m *InterfaceManager) doDiscardConns(task *state.Task, _ *tomb.Tomb) error { 303 st := task.State() 304 st.Lock() 305 defer st.Unlock() 306 307 snapSetup, err := snapstate.TaskSnapSetup(task) 308 if err != nil { 309 return err 310 } 311 312 instanceName := snapSetup.InstanceName() 313 314 var snapst snapstate.SnapState 315 err = snapstate.Get(st, instanceName, &snapst) 316 if err != nil && err != state.ErrNoState { 317 return err 318 } 319 320 if err == nil && len(snapst.Sequence) != 0 { 321 return fmt.Errorf("cannot discard connections for snap %q while it is present", instanceName) 322 } 323 conns, err := getConns(st) 324 if err != nil { 325 return err 326 } 327 removed := make(map[string]*connState) 328 for id := range conns { 329 connRef, err := interfaces.ParseConnRef(id) 330 if err != nil { 331 return err 332 } 333 if connRef.PlugRef.Snap == instanceName || connRef.SlotRef.Snap == instanceName { 334 removed[id] = conns[id] 335 delete(conns, id) 336 } 337 } 338 task.Set("removed", removed) 339 setConns(st, conns) 340 return nil 341 } 342 343 func (m *InterfaceManager) undoDiscardConns(task *state.Task, _ *tomb.Tomb) error { 344 st := task.State() 345 st.Lock() 346 defer st.Unlock() 347 348 var removed map[string]*connState 349 err := task.Get("removed", &removed) 350 if err != nil && err != state.ErrNoState { 351 return err 352 } 353 354 conns, err := getConns(st) 355 if err != nil { 356 return err 357 } 358 359 for id, connState := range removed { 360 conns[id] = connState 361 } 362 setConns(st, conns) 363 task.Set("removed", nil) 364 return nil 365 } 366 367 func getDynamicHookAttributes(task *state.Task) (plugAttrs, slotAttrs map[string]interface{}, err error) { 368 if err = task.Get("plug-dynamic", &plugAttrs); err != nil && err != state.ErrNoState { 369 return nil, nil, err 370 } 371 if err = task.Get("slot-dynamic", &slotAttrs); err != nil && err != state.ErrNoState { 372 return nil, nil, err 373 } 374 if plugAttrs == nil { 375 plugAttrs = make(map[string]interface{}) 376 } 377 if slotAttrs == nil { 378 slotAttrs = make(map[string]interface{}) 379 } 380 381 return plugAttrs, slotAttrs, nil 382 } 383 384 func setDynamicHookAttributes(task *state.Task, plugAttrs, slotAttrs map[string]interface{}) { 385 task.Set("plug-dynamic", plugAttrs) 386 task.Set("slot-dynamic", slotAttrs) 387 } 388 389 func (m *InterfaceManager) doConnect(task *state.Task, _ *tomb.Tomb) error { 390 st := task.State() 391 st.Lock() 392 defer st.Unlock() 393 394 perfTimings := state.TimingsForTask(task) 395 defer perfTimings.Save(st) 396 397 plugRef, slotRef, err := getPlugAndSlotRefs(task) 398 if err != nil { 399 return err 400 } 401 402 var autoConnect bool 403 if err := task.Get("auto", &autoConnect); err != nil && err != state.ErrNoState { 404 return err 405 } 406 var byGadget bool 407 if err := task.Get("by-gadget", &byGadget); err != nil && err != state.ErrNoState { 408 return err 409 } 410 var delayedSetupProfiles bool 411 if err := task.Get("delayed-setup-profiles", &delayedSetupProfiles); err != nil && err != state.ErrNoState { 412 return err 413 } 414 415 deviceCtx, err := snapstate.DeviceCtx(st, task, nil) 416 if err != nil { 417 return err 418 } 419 420 conns, err := getConns(st) 421 if err != nil { 422 return err 423 } 424 425 connRef := &interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef} 426 427 var plugSnapst snapstate.SnapState 428 if err := snapstate.Get(st, plugRef.Snap, &plugSnapst); err != nil { 429 if autoConnect && err == state.ErrNoState { 430 // conflict logic should prevent this 431 return fmt.Errorf("internal error: snap %q is no longer available for auto-connecting", plugRef.Snap) 432 } 433 return err 434 } 435 436 var slotSnapst snapstate.SnapState 437 if err := snapstate.Get(st, slotRef.Snap, &slotSnapst); err != nil { 438 if autoConnect && err == state.ErrNoState { 439 // conflict logic should prevent this 440 return fmt.Errorf("internal error: snap %q is no longer available for auto-connecting", slotRef.Snap) 441 } 442 return err 443 } 444 445 plug := m.repo.Plug(connRef.PlugRef.Snap, connRef.PlugRef.Name) 446 if plug == nil { 447 // conflict logic should prevent this 448 return fmt.Errorf("snap %q has no %q plug", connRef.PlugRef.Snap, connRef.PlugRef.Name) 449 } 450 451 slot := m.repo.Slot(connRef.SlotRef.Snap, connRef.SlotRef.Name) 452 if slot == nil { 453 // conflict logic should prevent this 454 return fmt.Errorf("snap %q has no %q slot", connRef.SlotRef.Snap, connRef.SlotRef.Name) 455 } 456 457 // attributes are always present, even if there are no hooks (they're initialized by Connect). 458 plugDynamicAttrs, slotDynamicAttrs, err := getDynamicHookAttributes(task) 459 if err != nil { 460 return fmt.Errorf("failed to get hook attributes: %s", err) 461 } 462 463 var policyChecker interfaces.PolicyFunc 464 465 // manual connections and connections by the gadget obey the 466 // policy "connection" rules, other auto-connections obey the 467 // "auto-connection" rules 468 if autoConnect && !byGadget { 469 autochecker, err := newAutoConnectChecker(st, task, m.repo, deviceCtx) 470 if err != nil { 471 return err 472 } 473 policyChecker = func(plug *interfaces.ConnectedPlug, slot *interfaces.ConnectedSlot) (bool, error) { 474 ok, _, err := autochecker.check(plug, slot) 475 return ok, err 476 } 477 } else { 478 policyCheck, err := newConnectChecker(st, deviceCtx) 479 if err != nil { 480 return err 481 } 482 policyChecker = policyCheck.check 483 } 484 485 // static attributes of the plug and slot not provided, the ones from snap infos will be used 486 conn, err := m.repo.Connect(connRef, nil, plugDynamicAttrs, nil, slotDynamicAttrs, policyChecker) 487 if err != nil || conn == nil { 488 return err 489 } 490 491 if !delayedSetupProfiles { 492 slotOpts := confinementOptions(slotSnapst.Flags) 493 if err := m.setupSnapSecurity(task, slot.Snap, slotOpts, perfTimings); err != nil { 494 return err 495 } 496 497 plugOpts := confinementOptions(plugSnapst.Flags) 498 if err := m.setupSnapSecurity(task, plug.Snap, plugOpts, perfTimings); err != nil { 499 return err 500 } 501 } else { 502 logger.Debugf("Connect handler: skipping setupSnapSecurity for snaps %q and %q", plug.Snap.InstanceName(), slot.Snap.InstanceName()) 503 } 504 505 // For undo handler. We need to remember old state of the connection only 506 // if undesired flag is set because that means there was a remembered 507 // inactive connection already and we should restore its properties 508 // in case of undo. Otherwise we don't have to keep old-conn because undo 509 // can simply delete any trace of the connection. 510 if old, ok := conns[connRef.ID()]; ok && old.Undesired { 511 task.Set("old-conn", old) 512 } 513 514 conns[connRef.ID()] = &connState{ 515 Interface: conn.Interface(), 516 StaticPlugAttrs: conn.Plug.StaticAttrs(), 517 DynamicPlugAttrs: conn.Plug.DynamicAttrs(), 518 StaticSlotAttrs: conn.Slot.StaticAttrs(), 519 DynamicSlotAttrs: conn.Slot.DynamicAttrs(), 520 Auto: autoConnect, 521 ByGadget: byGadget, 522 HotplugKey: slot.HotplugKey, 523 } 524 setConns(st, conns) 525 526 // the dynamic attributes might have been updated by the interface's BeforeConnectPlug/Slot code, 527 // so we need to update the task for connect-plug- and connect-slot- hooks to see new values. 528 setDynamicHookAttributes(task, conn.Plug.DynamicAttrs(), conn.Slot.DynamicAttrs()) 529 return nil 530 } 531 532 func (m *InterfaceManager) doDisconnect(task *state.Task, _ *tomb.Tomb) error { 533 st := task.State() 534 st.Lock() 535 defer st.Unlock() 536 537 perfTimings := state.TimingsForTask(task) 538 defer perfTimings.Save(st) 539 540 plugRef, slotRef, err := getPlugAndSlotRefs(task) 541 if err != nil { 542 return err 543 } 544 545 cref := interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef} 546 547 conns, err := getConns(st) 548 if err != nil { 549 return err 550 } 551 552 // forget flag can be passed with snap disconnect --forget 553 var forget bool 554 if err := task.Get("forget", &forget); err != nil && err != state.ErrNoState { 555 return fmt.Errorf("internal error: cannot read 'forget' flag: %s", err) 556 } 557 558 var snapStates []snapstate.SnapState 559 for _, instanceName := range []string{plugRef.Snap, slotRef.Snap} { 560 var snapst snapstate.SnapState 561 if err := snapstate.Get(st, instanceName, &snapst); err != nil { 562 if err == state.ErrNoState { 563 task.Logf("skipping disconnect operation for connection %s %s, snap %q doesn't exist", plugRef, slotRef, instanceName) 564 return nil 565 } 566 task.Errorf("skipping security profiles setup for snap %q when disconnecting %s from %s: %v", instanceName, plugRef, slotRef, err) 567 } else { 568 snapStates = append(snapStates, snapst) 569 } 570 } 571 572 conn, ok := conns[cref.ID()] 573 if !ok { 574 return fmt.Errorf("internal error: connection %q not found in state", cref.ID()) 575 } 576 577 // store old connection for undo 578 task.Set("old-conn", conn) 579 580 err = m.repo.Disconnect(plugRef.Snap, plugRef.Name, slotRef.Snap, slotRef.Name) 581 if err != nil { 582 _, notConnected := err.(*interfaces.NotConnectedError) 583 _, noPlugOrSlot := err.(*interfaces.NoPlugOrSlotError) 584 // not connected, just forget it. 585 if forget && (notConnected || noPlugOrSlot) { 586 delete(conns, cref.ID()) 587 setConns(st, conns) 588 return nil 589 } 590 return fmt.Errorf("snapd changed, please retry the operation: %v", err) 591 } 592 593 for _, snapst := range snapStates { 594 snapInfo, err := snapst.CurrentInfo() 595 if err != nil { 596 return err 597 } 598 opts := confinementOptions(snapst.Flags) 599 if err := m.setupSnapSecurity(task, snapInfo, opts, perfTimings); err != nil { 600 return err 601 } 602 } 603 604 // "auto-disconnect" flag indicates it's a disconnect triggered automatically as part of snap removal; 605 // such disconnects should not set undesired flag and instead just remove the connection. 606 var autoDisconnect bool 607 if err := task.Get("auto-disconnect", &autoDisconnect); err != nil && err != state.ErrNoState { 608 return fmt.Errorf("internal error: failed to read 'auto-disconnect' flag: %s", err) 609 } 610 611 // "by-hotplug" flag indicates it's a disconnect triggered by hotplug remove event; 612 // we want to keep information of the connection and just mark it as hotplug-gone. 613 var byHotplug bool 614 if err := task.Get("by-hotplug", &byHotplug); err != nil && err != state.ErrNoState { 615 return fmt.Errorf("internal error: cannot read 'by-hotplug' flag: %s", err) 616 } 617 618 switch { 619 case forget: 620 delete(conns, cref.ID()) 621 case byHotplug: 622 conn.HotplugGone = true 623 conns[cref.ID()] = conn 624 case conn.Auto && !autoDisconnect: 625 conn.Undesired = true 626 conn.DynamicPlugAttrs = nil 627 conn.DynamicSlotAttrs = nil 628 conn.StaticPlugAttrs = nil 629 conn.StaticSlotAttrs = nil 630 conns[cref.ID()] = conn 631 default: 632 delete(conns, cref.ID()) 633 } 634 setConns(st, conns) 635 636 return nil 637 } 638 639 func (m *InterfaceManager) undoDisconnect(task *state.Task, _ *tomb.Tomb) error { 640 st := task.State() 641 st.Lock() 642 defer st.Unlock() 643 644 perfTimings := state.TimingsForTask(task) 645 defer perfTimings.Save(st) 646 647 var oldconn connState 648 err := task.Get("old-conn", &oldconn) 649 if err == state.ErrNoState { 650 return nil 651 } 652 if err != nil { 653 return err 654 } 655 656 var forget bool 657 if err := task.Get("forget", &forget); err != nil && err != state.ErrNoState { 658 return fmt.Errorf("internal error: cannot read 'forget' flag: %s", err) 659 } 660 661 plugRef, slotRef, err := getPlugAndSlotRefs(task) 662 if err != nil { 663 return err 664 } 665 666 conns, err := getConns(st) 667 if err != nil { 668 return err 669 } 670 671 var plugSnapst snapstate.SnapState 672 if err := snapstate.Get(st, plugRef.Snap, &plugSnapst); err != nil { 673 return err 674 } 675 var slotSnapst snapstate.SnapState 676 if err := snapstate.Get(st, slotRef.Snap, &slotSnapst); err != nil { 677 return err 678 } 679 680 connRef := &interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef} 681 682 plug := m.repo.Plug(connRef.PlugRef.Snap, connRef.PlugRef.Name) 683 slot := m.repo.Slot(connRef.SlotRef.Snap, connRef.SlotRef.Name) 684 if forget && (plug == nil || slot == nil) { 685 // we were trying to forget an inactive connection that was 686 // referring to a non-existing plug or slot; just restore it 687 // in the conns state but do not reconnect via repository. 688 conns[connRef.ID()] = &oldconn 689 setConns(st, conns) 690 return nil 691 } 692 if plug == nil { 693 return fmt.Errorf("snap %q has no %q plug", connRef.PlugRef.Snap, connRef.PlugRef.Name) 694 } 695 if slot == nil { 696 return fmt.Errorf("snap %q has no %q slot", connRef.SlotRef.Snap, connRef.SlotRef.Name) 697 } 698 699 _, err = m.repo.Connect(connRef, nil, oldconn.DynamicPlugAttrs, nil, oldconn.DynamicSlotAttrs, nil) 700 if err != nil { 701 return err 702 } 703 704 slotOpts := confinementOptions(slotSnapst.Flags) 705 if err := m.setupSnapSecurity(task, slot.Snap, slotOpts, perfTimings); err != nil { 706 return err 707 } 708 plugOpts := confinementOptions(plugSnapst.Flags) 709 if err := m.setupSnapSecurity(task, plug.Snap, plugOpts, perfTimings); err != nil { 710 return err 711 } 712 713 conns[connRef.ID()] = &oldconn 714 setConns(st, conns) 715 716 return nil 717 } 718 719 func (m *InterfaceManager) undoConnect(task *state.Task, _ *tomb.Tomb) error { 720 st := task.State() 721 st.Lock() 722 defer st.Unlock() 723 724 perfTimings := state.TimingsForTask(task) 725 defer perfTimings.Save(st) 726 727 plugRef, slotRef, err := getPlugAndSlotRefs(task) 728 if err != nil { 729 return err 730 } 731 connRef := interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef} 732 conns, err := getConns(st) 733 if err != nil { 734 return err 735 } 736 737 var old connState 738 err = task.Get("old-conn", &old) 739 if err != nil && err != state.ErrNoState { 740 return err 741 } 742 if err == nil { 743 conns[connRef.ID()] = &old 744 } else { 745 delete(conns, connRef.ID()) 746 } 747 setConns(st, conns) 748 749 if err := m.repo.Disconnect(connRef.PlugRef.Snap, connRef.PlugRef.Name, connRef.SlotRef.Snap, connRef.SlotRef.Name); err != nil { 750 return err 751 } 752 753 var delayedSetupProfiles bool 754 if err := task.Get("delayed-setup-profiles", &delayedSetupProfiles); err != nil && err != state.ErrNoState { 755 return err 756 } 757 if delayedSetupProfiles { 758 logger.Debugf("Connect undo handler: skipping setupSnapSecurity for snaps %q and %q", connRef.PlugRef.Snap, connRef.SlotRef.Snap) 759 return nil 760 } 761 762 plug := m.repo.Plug(connRef.PlugRef.Snap, connRef.PlugRef.Name) 763 if plug == nil { 764 return fmt.Errorf("internal error: snap %q has no %q plug", connRef.PlugRef.Snap, connRef.PlugRef.Name) 765 } 766 slot := m.repo.Slot(connRef.SlotRef.Snap, connRef.SlotRef.Name) 767 if slot == nil { 768 return fmt.Errorf("internal error: snap %q has no %q slot", connRef.SlotRef.Snap, connRef.SlotRef.Name) 769 } 770 771 var plugSnapst snapstate.SnapState 772 err = snapstate.Get(st, plugRef.Snap, &plugSnapst) 773 if err == state.ErrNoState { 774 return fmt.Errorf("internal error: snap %q is no longer available", plugRef.Snap) 775 } 776 if err != nil { 777 return err 778 } 779 var slotSnapst snapstate.SnapState 780 err = snapstate.Get(st, slotRef.Snap, &slotSnapst) 781 if err == state.ErrNoState { 782 return fmt.Errorf("internal error: snap %q is no longer available", slotRef.Snap) 783 } 784 if err != nil { 785 return err 786 } 787 slotOpts := confinementOptions(slotSnapst.Flags) 788 if err := m.setupSnapSecurity(task, slot.Snap, slotOpts, perfTimings); err != nil { 789 return err 790 } 791 plugOpts := confinementOptions(plugSnapst.Flags) 792 if err := m.setupSnapSecurity(task, plug.Snap, plugOpts, perfTimings); err != nil { 793 return err 794 } 795 796 return nil 797 } 798 799 // timeout for shared content retry 800 var contentLinkRetryTimeout = 30 * time.Second 801 802 // timeout for retrying hotplug-related tasks 803 var hotplugRetryTimeout = 300 * time.Millisecond 804 805 func obsoleteCorePhase2SetupProfiles(kind string, task *state.Task) (bool, error) { 806 if kind != "setup-profiles" { 807 return false, nil 808 } 809 810 var corePhase2 bool 811 if err := task.Get("core-phase-2", &corePhase2); err != nil && err != state.ErrNoState { 812 return false, err 813 } 814 return corePhase2, nil 815 } 816 817 func checkAutoconnectConflicts(st *state.State, autoconnectTask *state.Task, plugSnap, slotSnap string) error { 818 for _, task := range st.Tasks() { 819 if task.Status().Ready() { 820 continue 821 } 822 823 k := task.Kind() 824 if k == "connect" || k == "disconnect" { 825 // retry if we found another connect/disconnect affecting same snap; note we can only encounter 826 // connects/disconnects created by doAutoDisconnect / doAutoConnect here as manual interface ops 827 // are rejected by conflict check logic in snapstate. 828 plugRef, slotRef, err := getPlugAndSlotRefs(task) 829 if err != nil { 830 return err 831 } 832 if plugRef.Snap == plugSnap { 833 return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting plug snap %s, task %q", plugSnap, k)} 834 } 835 if slotRef.Snap == slotSnap { 836 return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting slot snap %s, task %q", slotSnap, k)} 837 } 838 continue 839 } 840 841 snapsup, err := snapstate.TaskSnapSetup(task) 842 // e.g. hook tasks don't have task snap setup 843 if err != nil { 844 continue 845 } 846 847 otherSnapName := snapsup.InstanceName() 848 849 // different snaps - no conflict 850 if otherSnapName != plugSnap && otherSnapName != slotSnap { 851 continue 852 } 853 854 // setup-profiles core-phase-2 is now no-op, we shouldn't 855 // conflict on it; note, old snapd would create this task even 856 // for regular snaps if installed with the dangerous flag. 857 obsoleteCorePhase2, err := obsoleteCorePhase2SetupProfiles(k, task) 858 if err != nil { 859 return err 860 } 861 if obsoleteCorePhase2 { 862 continue 863 } 864 865 // other snap that affects us because of plug or slot 866 if k == "unlink-snap" || k == "link-snap" || k == "setup-profiles" || k == "discard-snap" { 867 // discard-snap is scheduled as part of garbage collection during refresh, if multiple revsions are already installed. 868 // this revision check avoids conflict with own discard tasks created as part of install/refresh. 869 if k == "discard-snap" && autoconnectTask.Change() != nil && autoconnectTask.Change().ID() == task.Change().ID() { 870 continue 871 } 872 // if snap is getting removed, we will retry but the snap will be gone and auto-connect becomes no-op 873 // if snap is getting installed/refreshed - temporary conflict, retry later 874 return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting snap %s with task %q", otherSnapName, k)} 875 } 876 } 877 return nil 878 } 879 880 func checkDisconnectConflicts(st *state.State, disconnectingSnap, plugSnap, slotSnap string) error { 881 for _, task := range st.Tasks() { 882 if task.Status().Ready() { 883 continue 884 } 885 886 k := task.Kind() 887 if k == "connect" || k == "disconnect" { 888 // retry if we found another connect/disconnect affecting same snap; note we can only encounter 889 // connects/disconnects created by doAutoDisconnect / doAutoConnect here as manual interface ops 890 // are rejected by conflict check logic in snapstate. 891 plugRef, slotRef, err := getPlugAndSlotRefs(task) 892 if err != nil { 893 return err 894 } 895 if plugRef.Snap == plugSnap || slotRef.Snap == slotSnap { 896 return &state.Retry{After: connectRetryTimeout} 897 } 898 continue 899 } 900 901 snapsup, err := snapstate.TaskSnapSetup(task) 902 // e.g. hook tasks don't have task snap setup 903 if err != nil { 904 continue 905 } 906 907 otherSnapName := snapsup.InstanceName() 908 909 // different snaps - no conflict 910 if otherSnapName != plugSnap && otherSnapName != slotSnap { 911 continue 912 } 913 914 // another task related to same snap op (unrelated op would be blocked by snapstate conflict logic) 915 if otherSnapName == disconnectingSnap { 916 continue 917 } 918 919 // note, don't care about unlink-snap for the opposite end. This relies 920 // on the fact that auto-disconnect will create conflicting "disconnect" tasks that 921 // we will retry with the logic above. 922 if k == "link-snap" || k == "setup-profiles" { 923 // other snap is getting installed/refreshed - temporary conflict 924 return &state.Retry{After: connectRetryTimeout} 925 } 926 } 927 return nil 928 } 929 930 func checkHotplugDisconnectConflicts(st *state.State, plugSnap, slotSnap string) error { 931 for _, task := range st.Tasks() { 932 if task.Status().Ready() { 933 continue 934 } 935 936 k := task.Kind() 937 if k == "connect" || k == "disconnect" { 938 plugRef, slotRef, err := getPlugAndSlotRefs(task) 939 if err != nil { 940 return err 941 } 942 if plugRef.Snap == plugSnap { 943 return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting plug snap %s, task %q", plugSnap, k)} 944 } 945 if slotRef.Snap == slotSnap { 946 return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting slot snap %s, task %q", slotSnap, k)} 947 } 948 continue 949 } 950 951 snapsup, err := snapstate.TaskSnapSetup(task) 952 // e.g. hook tasks don't have task snap setup 953 if err != nil { 954 continue 955 } 956 otherSnapName := snapsup.InstanceName() 957 958 // different snaps - no conflict 959 if otherSnapName != plugSnap && otherSnapName != slotSnap { 960 continue 961 } 962 963 if k == "link-snap" || k == "setup-profiles" || k == "unlink-snap" { 964 // other snap is getting installed/refreshed/removed - temporary conflict 965 return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting snap %s with task %q", otherSnapName, k)} 966 } 967 } 968 return nil 969 } 970 971 // inSameChangeWaitChains returns true if there is a wait chain so 972 // that `startT` is run before `searchT` in the same state.Change. 973 func inSameChangeWaitChain(startT, searchT *state.Task) bool { 974 // Trivial case, tasks in different changes (they could in theory 975 // still have cross-change waits but we don't do these today). 976 // In this case, return quickly. 977 if startT.Change() != searchT.Change() { 978 return false 979 } 980 // Do a recursive check if its in the same change 981 return waitChainSearch(startT, searchT) 982 } 983 984 func waitChainSearch(startT, searchT *state.Task) bool { 985 for _, cand := range startT.HaltTasks() { 986 if cand == searchT { 987 return true 988 } 989 if waitChainSearch(cand, searchT) { 990 return true 991 } 992 } 993 994 return false 995 } 996 997 // batchConnectTasks creates connect tasks and interface hooks for 998 // conns and sets their wait chain with regard to the setupProfiles 999 // task. 1000 // 1001 // The tasks are chained so that: - prepare-plug-, prepare-slot- and 1002 // connect tasks are all executed before setup-profiles - 1003 // connect-plug-, connect-slot- are all executed after setup-profiles. 1004 // The "delayed-setup-profiles" flag is set on the connect tasks to 1005 // indicate that doConnect handler should not set security backends up 1006 // because this will be done later by the setup-profiles task. 1007 func batchConnectTasks(st *state.State, snapsup *snapstate.SnapSetup, conns map[string]*interfaces.ConnRef, connOpts map[string]*connectOpts) (ts *state.TaskSet, hasInterfaceHooks bool, err error) { 1008 setupProfiles := st.NewTask("setup-profiles", fmt.Sprintf(i18n.G("Setup snap %q (%s) security profiles for auto-connections"), snapsup.InstanceName(), snapsup.Revision())) 1009 setupProfiles.Set("snap-setup", snapsup) 1010 1011 ts = state.NewTaskSet() 1012 for connID, conn := range conns { 1013 var opts connectOpts 1014 if providedOpts := connOpts[connID]; providedOpts != nil { 1015 opts = *providedOpts 1016 } else { 1017 // default 1018 opts.AutoConnect = true 1019 } 1020 opts.DelayedSetupProfiles = true 1021 connectTs, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, opts) 1022 if err != nil { 1023 return nil, false, fmt.Errorf("internal error: auto-connect of %q failed: %s", conn, err) 1024 } 1025 1026 if len(connectTs.Tasks()) > 1 { 1027 hasInterfaceHooks = true 1028 } 1029 1030 // setup-profiles needs to wait for the main "connect" task 1031 connectTask, _ := connectTs.Edge(ConnectTaskEdge) 1032 if connectTask == nil { 1033 return nil, false, fmt.Errorf("internal error: no 'connect' task found for %q", conn) 1034 } 1035 setupProfiles.WaitFor(connectTask) 1036 1037 // setup-profiles must be run before the task that marks the end of connect-plug- and connect-slot- hooks 1038 afterConnectTask, _ := connectTs.Edge(AfterConnectHooksEdge) 1039 if afterConnectTask != nil { 1040 afterConnectTask.WaitFor(setupProfiles) 1041 } 1042 ts.AddAll(connectTs) 1043 } 1044 if len(ts.Tasks()) > 0 { 1045 ts.AddTask(setupProfiles) 1046 } 1047 return ts, hasInterfaceHooks, nil 1048 } 1049 1050 // firstTaskAfterBootWhenPreseeding finds the first task to be run for thisSnap 1051 // on first boot after mark-preseeded task, this is always the install hook. 1052 // It is an internal error if install hook for thisSnap cannot be found. 1053 func firstTaskAfterBootWhenPreseeding(thisSnap string, markPreseeded *state.Task) (*state.Task, error) { 1054 if markPreseeded.Change() == nil { 1055 return nil, fmt.Errorf("internal error: %s task not in change", markPreseeded.Kind()) 1056 } 1057 for _, ht := range markPreseeded.HaltTasks() { 1058 if ht.Kind() == "run-hook" { 1059 var hs hookstate.HookSetup 1060 if err := ht.Get("hook-setup", &hs); err != nil { 1061 return nil, fmt.Errorf("internal error: cannot get hook setup: %v", err) 1062 } 1063 if hs.Hook == "install" && hs.Snap == thisSnap { 1064 return ht, nil 1065 } 1066 } 1067 } 1068 return nil, fmt.Errorf("internal error: cannot find install hook for snap %q", thisSnap) 1069 } 1070 1071 func filterForSlot(slot *snap.SlotInfo) func(candSlots []*snap.SlotInfo) []*snap.SlotInfo { 1072 return func(candSlots []*snap.SlotInfo) []*snap.SlotInfo { 1073 for _, candSlot := range candSlots { 1074 if candSlot.String() == slot.String() { 1075 return []*snap.SlotInfo{slot} 1076 } 1077 } 1078 return nil 1079 } 1080 } 1081 1082 // doAutoConnect creates task(s) to connect the given snap to viable candidates. 1083 func (m *InterfaceManager) doAutoConnect(task *state.Task, _ *tomb.Tomb) error { 1084 st := task.State() 1085 st.Lock() 1086 defer st.Unlock() 1087 1088 snapsup, err := snapstate.TaskSnapSetup(task) 1089 if err != nil { 1090 return err 1091 } 1092 1093 deviceCtx, err := snapstate.DeviceCtx(st, task, nil) 1094 if err != nil { 1095 return err 1096 } 1097 1098 conns, err := getConns(st) 1099 if err != nil { 1100 return err 1101 } 1102 1103 // The previous task (link-snap) may have triggered a restart, 1104 // if this is the case we can only proceed once the restart 1105 // has happened or we may not have all the interfaces of the 1106 // new core/base snap. 1107 if err := snapstate.FinishRestart(task, snapsup); err != nil { 1108 return err 1109 } 1110 1111 snapName := snapsup.InstanceName() 1112 1113 autochecker, err := newAutoConnectChecker(st, task, m.repo, deviceCtx) 1114 if err != nil { 1115 return err 1116 } 1117 1118 gadgectConnect := newGadgetConnect(st, task, m.repo, snapName, deviceCtx) 1119 1120 // wait for auto-install, started by prerequisites code, for 1121 // the default-providers of content ifaces so we can 1122 // auto-connect to them; snapstate prerequisites does a bit 1123 // more filtering than this so defaultProviders here can 1124 // contain some more snaps; should not be an issue in practice 1125 // given the check below checks for same chain and we don't 1126 // forcefully wait for defaultProviders; we just retry for 1127 // things in the intersection between defaultProviders here and 1128 // snaps with not ready link-snap|setup-profiles tasks 1129 defaultProviders := snap.DefaultContentProviders(m.repo.Plugs(snapName)) 1130 for _, chg := range st.Changes() { 1131 if chg.Status().Ready() { 1132 continue 1133 } 1134 for _, t := range chg.Tasks() { 1135 if t.Status().Ready() { 1136 continue 1137 } 1138 if t.Kind() != "link-snap" && t.Kind() != "setup-profiles" { 1139 continue 1140 } 1141 if snapsup, err := snapstate.TaskSnapSetup(t); err == nil { 1142 // Only retry if the task that installs the 1143 // content provider is not waiting for us 1144 // (or this will just hang forever). 1145 _, ok := defaultProviders[snapsup.InstanceName()] 1146 if ok && !inSameChangeWaitChain(task, t) { 1147 return &state.Retry{After: contentLinkRetryTimeout} 1148 } 1149 } 1150 } 1151 } 1152 1153 plugs := m.repo.Plugs(snapName) 1154 slots := m.repo.Slots(snapName) 1155 newconns := make(map[string]*interfaces.ConnRef, len(plugs)+len(slots)) 1156 var connOpts map[string]*connectOpts 1157 1158 conflictError := func(retry *state.Retry, err error) error { 1159 if retry != nil { 1160 task.Logf("Waiting for conflicting change in progress: %s", retry.Reason) 1161 return retry // will retry 1162 } 1163 return fmt.Errorf("auto-connect conflict check failed: %v", err) 1164 } 1165 1166 // Consider gadget connections, we want to remember them in 1167 // any case with "by-gadget" set, so they should be processed 1168 // before the auto-connection ones. 1169 if err := gadgectConnect.addGadgetConnections(newconns, conns, conflictError); err != nil { 1170 return err 1171 } 1172 if len(newconns) > 0 { 1173 connOpts = make(map[string]*connectOpts, len(newconns)) 1174 byGadgetOpts := &connectOpts{AutoConnect: true, ByGadget: true} 1175 for key := range newconns { 1176 connOpts[key] = byGadgetOpts 1177 } 1178 } 1179 1180 // Auto-connect all the plugs 1181 cannotAutoConnectLog := func(plug *snap.PlugInfo, candRefs []string) string { 1182 return fmt.Sprintf("cannot auto-connect plug %s, candidates found: %s", plug, strings.Join(candRefs, ", ")) 1183 } 1184 if err := autochecker.addAutoConnections(newconns, plugs, nil, conns, cannotAutoConnectLog, conflictError); err != nil { 1185 return err 1186 } 1187 // Auto-connect all the slots 1188 for _, slot := range slots { 1189 candidates := m.repo.AutoConnectCandidatePlugs(snapName, slot.Name, autochecker.check) 1190 if len(candidates) == 0 { 1191 continue 1192 } 1193 1194 cannotAutoConnectLog := func(plug *snap.PlugInfo, candRefs []string) string { 1195 return fmt.Sprintf("cannot auto-connect slot %s to plug %s, candidates found: %s", slot, plug, strings.Join(candRefs, ", ")) 1196 } 1197 if err := autochecker.addAutoConnections(newconns, candidates, filterForSlot(slot), conns, cannotAutoConnectLog, conflictError); err != nil { 1198 return err 1199 } 1200 } 1201 1202 autots, hasInterfaceHooks, err := batchConnectTasks(st, snapsup, newconns, connOpts) 1203 if err != nil { 1204 return err 1205 } 1206 1207 // If interface hooks are not present then connects can be executed during 1208 // preseeding. 1209 // Otherwise we will run all connects, their hooks and setup-profiles after 1210 // preseeding (on first boot). Note, we may be facing multiple connections 1211 // here where only some have hooks; however there is no point in running 1212 // those without hooks before mark-preseeded, because only setup-profiles is 1213 // performance-critical and it still needs to run after those with hooks. 1214 if m.preseed && hasInterfaceHooks { 1215 for _, t := range st.Tasks() { 1216 if t.Kind() == "mark-preseeded" { 1217 markPreseeded := t 1218 // consistency check 1219 if markPreseeded.Status() != state.DoStatus { 1220 return fmt.Errorf("internal error: unexpected state of mark-preseeded task: %s", markPreseeded.Status()) 1221 } 1222 1223 firstTaskAfterBoot, err := firstTaskAfterBootWhenPreseeding(snapsup.InstanceName(), markPreseeded) 1224 if err != nil { 1225 return err 1226 } 1227 // first task of the snap that normally runs on first boot 1228 // needs to wait on connects & interface hooks. 1229 firstTaskAfterBoot.WaitAll(autots) 1230 1231 // connect tasks and interface hooks need to wait for end of preseeding 1232 // (they need to run on first boot, not during preseeding). 1233 autots.WaitFor(markPreseeded) 1234 t.Change().AddAll(autots) 1235 task.SetStatus(state.DoneStatus) 1236 st.EnsureBefore(0) 1237 return nil 1238 } 1239 } 1240 return fmt.Errorf("internal error: mark-preseeded task not found in preseeding mode") 1241 } 1242 1243 if len(autots.Tasks()) > 0 { 1244 snapstate.InjectTasks(task, autots) 1245 1246 st.EnsureBefore(0) 1247 } 1248 1249 task.SetStatus(state.DoneStatus) 1250 return nil 1251 } 1252 1253 // doAutoDisconnect creates tasks for disconnecting all interfaces of a snap and running its interface hooks. 1254 func (m *InterfaceManager) doAutoDisconnect(task *state.Task, _ *tomb.Tomb) error { 1255 st := task.State() 1256 st.Lock() 1257 defer st.Unlock() 1258 1259 snapsup, err := snapstate.TaskSnapSetup(task) 1260 if err != nil { 1261 return err 1262 } 1263 1264 snapName := snapsup.InstanceName() 1265 connections, err := m.repo.Connections(snapName) 1266 if err != nil { 1267 return err 1268 } 1269 1270 // check for conflicts on all connections first before creating disconnect hooks 1271 for _, connRef := range connections { 1272 if err := checkDisconnectConflicts(st, snapName, connRef.PlugRef.Snap, connRef.SlotRef.Snap); err != nil { 1273 if _, retry := err.(*state.Retry); retry { 1274 logger.Debugf("disconnecting interfaces of snap %q will be retried because of %q - %q conflict", snapName, connRef.PlugRef.Snap, connRef.SlotRef.Snap) 1275 task.Logf("Waiting for conflicting change in progress...") 1276 return err // will retry 1277 } 1278 return fmt.Errorf("cannot check conflicts when disconnecting interfaces: %s", err) 1279 } 1280 } 1281 1282 hookTasks := state.NewTaskSet() 1283 for _, connRef := range connections { 1284 conn, err := m.repo.Connection(connRef) 1285 if err != nil { 1286 break 1287 } 1288 // "auto-disconnect" flag indicates it's a disconnect triggered as part of snap removal, in which 1289 // case we want to skip the logic of marking auto-connections as 'undesired' and instead just remove 1290 // them so they can be automatically connected if the snap is installed again. 1291 ts, err := disconnectTasks(st, conn, disconnectOpts{AutoDisconnect: true}) 1292 if err != nil { 1293 return err 1294 } 1295 hookTasks.AddAll(ts) 1296 } 1297 1298 snapstate.InjectTasks(task, hookTasks) 1299 1300 // make sure that we add tasks and mark this task done in the same atomic write, otherwise there is a risk of re-adding tasks again 1301 task.SetStatus(state.DoneStatus) 1302 return nil 1303 } 1304 1305 func (m *InterfaceManager) undoAutoConnect(task *state.Task, _ *tomb.Tomb) error { 1306 // TODO Introduce disconnection hooks, and run them here as well to give a chance 1307 // for the snap to undo whatever it did when the connection was established. 1308 return nil 1309 } 1310 1311 // transitionConnectionsCoreMigration will transition all connections 1312 // from oldName to newName. Note that this is only useful when you 1313 // know that newName supports everything that oldName supports, 1314 // otherwise you will be in a world of pain. 1315 func (m *InterfaceManager) transitionConnectionsCoreMigration(st *state.State, oldName, newName string) error { 1316 // transition over, ubuntu-core has only slots 1317 conns, err := getConns(st) 1318 if err != nil { 1319 return err 1320 } 1321 1322 for id := range conns { 1323 connRef, err := interfaces.ParseConnRef(id) 1324 if err != nil { 1325 return err 1326 } 1327 if connRef.SlotRef.Snap == oldName { 1328 connRef.SlotRef.Snap = newName 1329 conns[connRef.ID()] = conns[id] 1330 delete(conns, id) 1331 } 1332 } 1333 setConns(st, conns) 1334 1335 // After migrating connections in state, remove them from repo so they stay in sync and we don't 1336 // attempt to run disconnects on when the old core gets removed as part of the transition. 1337 if err := m.removeConnections(oldName); err != nil { 1338 return err 1339 } 1340 1341 // The reloadConnections() just modifies the repository object, it 1342 // has no effect on the running system, i.e. no security profiles 1343 // on disk are rewritten. This is ok because core/ubuntu-core have 1344 // exactly the same profiles and nothing in the generated policies 1345 // has the core snap-name encoded. 1346 if _, err := m.reloadConnections(newName); err != nil { 1347 return err 1348 } 1349 1350 return nil 1351 } 1352 1353 func (m *InterfaceManager) doTransitionUbuntuCore(t *state.Task, _ *tomb.Tomb) error { 1354 st := t.State() 1355 st.Lock() 1356 defer st.Unlock() 1357 1358 var oldName, newName string 1359 if err := t.Get("old-name", &oldName); err != nil { 1360 return err 1361 } 1362 if err := t.Get("new-name", &newName); err != nil { 1363 return err 1364 } 1365 1366 return m.transitionConnectionsCoreMigration(st, oldName, newName) 1367 } 1368 1369 func (m *InterfaceManager) undoTransitionUbuntuCore(t *state.Task, _ *tomb.Tomb) error { 1370 st := t.State() 1371 st.Lock() 1372 defer st.Unlock() 1373 1374 // symmetrical to the "do" method, just reverse them again 1375 var oldName, newName string 1376 if err := t.Get("old-name", &oldName); err != nil { 1377 return err 1378 } 1379 if err := t.Get("new-name", &newName); err != nil { 1380 return err 1381 } 1382 1383 return m.transitionConnectionsCoreMigration(st, newName, oldName) 1384 } 1385 1386 // doHotplugConnect creates task(s) to (re)create old connections or auto-connect viable slots in response to hotplug "add" event. 1387 func (m *InterfaceManager) doHotplugConnect(task *state.Task, _ *tomb.Tomb) error { 1388 st := task.State() 1389 st.Lock() 1390 defer st.Unlock() 1391 1392 deviceCtx, err := snapstate.DeviceCtx(st, task, nil) 1393 if err != nil { 1394 return err 1395 } 1396 1397 conns, err := getConns(st) 1398 if err != nil { 1399 return err 1400 } 1401 1402 ifaceName, hotplugKey, err := getHotplugAttrs(task) 1403 if err != nil { 1404 return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err) 1405 } 1406 1407 slot, err := m.repo.SlotForHotplugKey(ifaceName, hotplugKey) 1408 if err != nil { 1409 return err 1410 } 1411 if slot == nil { 1412 return fmt.Errorf("cannot find hotplug slot for interface %s and hotplug key %q", ifaceName, hotplugKey) 1413 } 1414 1415 // find old connections for slots of this device - note we can't ask the repository since we need 1416 // to recreate old connections that are only remembered in the state. 1417 connsForDevice := findConnsForHotplugKey(conns, ifaceName, hotplugKey) 1418 1419 conflictError := func(retry *state.Retry, err error) error { 1420 if retry != nil { 1421 task.Logf("hotplug connect will be retried: %s", retry.Reason) 1422 return retry // will retry 1423 } 1424 return fmt.Errorf("hotplug-connect conflict check failed: %v", err) 1425 } 1426 1427 // find old connections to recreate 1428 var recreate []*interfaces.ConnRef 1429 for _, id := range connsForDevice { 1430 conn := conns[id] 1431 // device was not unplugged, this is the case if snapd is restarted and we enumerate devices. 1432 // note, the situation where device was not unplugged but has changed is handled 1433 // by hotlugDeviceAdded handler - updateDevice. 1434 if !conn.HotplugGone || conn.Undesired { 1435 continue 1436 } 1437 1438 // the device was unplugged while connected, so it had disconnect hooks run; recreate the connection 1439 connRef, err := interfaces.ParseConnRef(id) 1440 if err != nil { 1441 return err 1442 } 1443 1444 if err := checkAutoconnectConflicts(st, task, connRef.PlugRef.Snap, connRef.SlotRef.Snap); err != nil { 1445 retry, _ := err.(*state.Retry) 1446 return conflictError(retry, err) 1447 } 1448 recreate = append(recreate, connRef) 1449 } 1450 1451 // find new auto-connections 1452 autochecker, err := newAutoConnectChecker(st, task, m.repo, deviceCtx) 1453 if err != nil { 1454 return err 1455 } 1456 1457 instanceName := slot.Snap.InstanceName() 1458 candidates := m.repo.AutoConnectCandidatePlugs(instanceName, slot.Name, autochecker.check) 1459 1460 newconns := make(map[string]*interfaces.ConnRef, len(candidates)) 1461 // Auto-connect the plugs 1462 cannotAutoConnectLog := func(plug *snap.PlugInfo, candRefs []string) string { 1463 return fmt.Sprintf("cannot auto-connect hotplug slot %s to plug %s, candidates found: %s", slot, plug, strings.Join(candRefs, ", ")) 1464 } 1465 if err := autochecker.addAutoConnections(newconns, candidates, filterForSlot(slot), conns, cannotAutoConnectLog, conflictError); err != nil { 1466 return err 1467 } 1468 1469 if len(recreate) == 0 && len(newconns) == 0 { 1470 return nil 1471 } 1472 1473 // Create connect tasks and interface hooks for old connections 1474 connectTs := state.NewTaskSet() 1475 for _, conn := range recreate { 1476 wasAutoconnected := conns[conn.ID()].Auto 1477 ts, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, connectOpts{AutoConnect: wasAutoconnected}) 1478 if err != nil { 1479 return fmt.Errorf("internal error: connect of %q failed: %s", conn, err) 1480 } 1481 connectTs.AddAll(ts) 1482 } 1483 // Create connect tasks and interface hooks for new auto-connections 1484 for _, conn := range newconns { 1485 ts, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, connectOpts{AutoConnect: true}) 1486 if err != nil { 1487 return fmt.Errorf("internal error: auto-connect of %q failed: %s", conn, err) 1488 } 1489 connectTs.AddAll(ts) 1490 } 1491 1492 if len(connectTs.Tasks()) > 0 { 1493 snapstate.InjectTasks(task, connectTs) 1494 st.EnsureBefore(0) 1495 } 1496 1497 // make sure that we add tasks and mark this task done in the same atomic write, otherwise there is a risk of re-adding tasks again 1498 task.SetStatus(state.DoneStatus) 1499 1500 return nil 1501 } 1502 1503 // doHotplugUpdateSlot updates static attributes of a hotplug slot for given device. 1504 func (m *InterfaceManager) doHotplugUpdateSlot(task *state.Task, _ *tomb.Tomb) error { 1505 st := task.State() 1506 st.Lock() 1507 defer st.Unlock() 1508 1509 ifaceName, hotplugKey, err := getHotplugAttrs(task) 1510 if err != nil { 1511 return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err) 1512 } 1513 1514 var attrs map[string]interface{} 1515 if err := task.Get("slot-attrs", &attrs); err != nil { 1516 return fmt.Errorf("internal error: cannot get slot-attrs attribute for device %s, interface %s: %s", hotplugKey, ifaceName, err) 1517 } 1518 1519 stateSlots, err := getHotplugSlots(st) 1520 if err != nil { 1521 return fmt.Errorf("internal error: cannot obtain hotplug slots: %v", err) 1522 } 1523 1524 slot, err := m.repo.UpdateHotplugSlotAttrs(ifaceName, hotplugKey, attrs) 1525 if err != nil { 1526 return err 1527 } 1528 1529 if slotSpec, ok := stateSlots[slot.Name]; ok { 1530 slotSpec.StaticAttrs = attrs 1531 stateSlots[slot.Name] = slotSpec 1532 setHotplugSlots(st, stateSlots) 1533 } else { 1534 return fmt.Errorf("internal error: cannot find slot %s for device %q", slot.Name, hotplugKey) 1535 } 1536 1537 return nil 1538 } 1539 1540 // doHotplugRemoveSlot removes hotplug slot for given device from the repository in response to udev "remove" event. 1541 // This task must necessarily be run after all affected slot gets disconnected in the repo. 1542 func (m *InterfaceManager) doHotplugRemoveSlot(task *state.Task, _ *tomb.Tomb) error { 1543 st := task.State() 1544 st.Lock() 1545 defer st.Unlock() 1546 1547 ifaceName, hotplugKey, err := getHotplugAttrs(task) 1548 if err != nil { 1549 return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err) 1550 } 1551 1552 slot, err := m.repo.SlotForHotplugKey(ifaceName, hotplugKey) 1553 if err != nil { 1554 return fmt.Errorf("internal error: cannot determine slots: %v", err) 1555 } 1556 if slot != nil { 1557 if err := m.repo.RemoveSlot(slot.Snap.InstanceName(), slot.Name); err != nil { 1558 return fmt.Errorf("cannot remove hotplug slot: %v", err) 1559 } 1560 } 1561 1562 stateSlots, err := getHotplugSlots(st) 1563 if err != nil { 1564 return fmt.Errorf("internal error: cannot obtain hotplug slots: %v", err) 1565 } 1566 1567 // remove the slot from hotplug-slots in the state as long as there are no connections referencing it, 1568 // including connection with hotplug-gone=true. 1569 slotDef := findHotplugSlot(stateSlots, ifaceName, hotplugKey) 1570 if slotDef == nil { 1571 return fmt.Errorf("internal error: cannot find hotplug slot for interface %s, hotplug key %q", ifaceName, hotplugKey) 1572 } 1573 conns, err := getConns(st) 1574 if err != nil { 1575 return err 1576 } 1577 for _, conn := range conns { 1578 if conn.Interface == slotDef.Interface && conn.HotplugKey == slotDef.HotplugKey { 1579 // there is a connection referencing this slot, do not remove it, only mark as "gone" 1580 slotDef.HotplugGone = true 1581 stateSlots[slotDef.Name] = slotDef 1582 setHotplugSlots(st, stateSlots) 1583 return nil 1584 } 1585 } 1586 delete(stateSlots, slotDef.Name) 1587 setHotplugSlots(st, stateSlots) 1588 1589 return nil 1590 } 1591 1592 // doHotplugDisconnect creates task(s) to disconnect connections and remove slots in response to hotplug "remove" event. 1593 func (m *InterfaceManager) doHotplugDisconnect(task *state.Task, _ *tomb.Tomb) error { 1594 st := task.State() 1595 st.Lock() 1596 defer st.Unlock() 1597 1598 ifaceName, hotplugKey, err := getHotplugAttrs(task) 1599 if err != nil { 1600 return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err) 1601 } 1602 1603 connections, err := m.repo.ConnectionsForHotplugKey(ifaceName, hotplugKey) 1604 if err != nil { 1605 return err 1606 } 1607 if len(connections) == 0 { 1608 return nil 1609 } 1610 1611 // check for conflicts on all connections first before creating disconnect hooks 1612 for _, connRef := range connections { 1613 if err := checkHotplugDisconnectConflicts(st, connRef.PlugRef.Snap, connRef.SlotRef.Snap); err != nil { 1614 if retry, ok := err.(*state.Retry); ok { 1615 task.Logf("Waiting for conflicting change in progress: %s", retry.Reason) 1616 return err // will retry 1617 } 1618 return fmt.Errorf("cannot check conflicts when disconnecting interfaces: %s", err) 1619 } 1620 } 1621 1622 dts := state.NewTaskSet() 1623 for _, connRef := range connections { 1624 conn, err := m.repo.Connection(connRef) 1625 if err != nil { 1626 // this should never happen since we get all connections from the repo 1627 return fmt.Errorf("internal error: cannot get connection %q: %s", connRef, err) 1628 } 1629 // "by-hotplug" flag indicates it's a disconnect triggered as part of hotplug removal. 1630 ts, err := disconnectTasks(st, conn, disconnectOpts{ByHotplug: true}) 1631 if err != nil { 1632 return fmt.Errorf("internal error: cannot create disconnect tasks: %s", err) 1633 } 1634 dts.AddAll(ts) 1635 } 1636 1637 snapstate.InjectTasks(task, dts) 1638 st.EnsureBefore(0) 1639 1640 // make sure that we add tasks and mark this task done in the same atomic write, otherwise there is a risk of re-adding tasks again 1641 task.SetStatus(state.DoneStatus) 1642 1643 return nil 1644 } 1645 1646 func (m *InterfaceManager) doHotplugAddSlot(task *state.Task, _ *tomb.Tomb) error { 1647 st := task.State() 1648 st.Lock() 1649 defer st.Unlock() 1650 1651 systemSnap, err := systemSnapInfo(st) 1652 if err != nil { 1653 return fmt.Errorf("system snap not available") 1654 } 1655 1656 ifaceName, hotplugKey, err := getHotplugAttrs(task) 1657 if err != nil { 1658 return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err) 1659 } 1660 1661 var proposedSlot hotplug.ProposedSlot 1662 if err := task.Get("proposed-slot", &proposedSlot); err != nil { 1663 return fmt.Errorf("internal error: cannot get proposed hotplug slot from task attributes: %s", err) 1664 } 1665 var devinfo hotplug.HotplugDeviceInfo 1666 if err := task.Get("device-info", &devinfo); err != nil { 1667 return fmt.Errorf("internal error: cannot get hotplug device info from task attributes: %s", err) 1668 } 1669 1670 stateSlots, err := getHotplugSlots(st) 1671 if err != nil { 1672 return fmt.Errorf("internal error obtaining hotplug slots: %v", err.Error()) 1673 } 1674 1675 iface := m.repo.Interface(ifaceName) 1676 if iface == nil { 1677 return fmt.Errorf("internal error: cannot find interface %s", ifaceName) 1678 } 1679 1680 slot := findHotplugSlot(stateSlots, ifaceName, hotplugKey) 1681 1682 // if we know this slot already, restore / update it. 1683 if slot != nil { 1684 if slot.HotplugGone { 1685 // hotplugGone means the device was unplugged, so its disconnect hooks were run and can now 1686 // simply recreate the slot with potentially new attributes, and old connections will be re-created 1687 newSlot := &snap.SlotInfo{ 1688 Name: slot.Name, 1689 Label: proposedSlot.Label, 1690 Snap: systemSnap, 1691 Interface: ifaceName, 1692 Attrs: proposedSlot.Attrs, 1693 HotplugKey: hotplugKey, 1694 } 1695 return addHotplugSlot(st, m.repo, stateSlots, iface, newSlot) 1696 } 1697 1698 // else - not gone, restored already by reloadConnections, but may need updating. 1699 if !reflect.DeepEqual(proposedSlot.Attrs, slot.StaticAttrs) { 1700 ts := updateDevice(st, iface.Name(), hotplugKey, proposedSlot.Attrs) 1701 snapstate.InjectTasks(task, ts) 1702 st.EnsureBefore(0) 1703 task.SetStatus(state.DoneStatus) 1704 } // else - nothing to do 1705 return nil 1706 } 1707 1708 // New slot. 1709 slotName := hotplugSlotName(hotplugKey, systemSnap.InstanceName(), proposedSlot.Name, iface.Name(), &devinfo, m.repo, stateSlots) 1710 newSlot := &snap.SlotInfo{ 1711 Name: slotName, 1712 Label: proposedSlot.Label, 1713 Snap: systemSnap, 1714 Interface: iface.Name(), 1715 Attrs: proposedSlot.Attrs, 1716 HotplugKey: hotplugKey, 1717 } 1718 return addHotplugSlot(st, m.repo, stateSlots, iface, newSlot) 1719 } 1720 1721 // doHotplugSeqWait returns Retry error if there is another change for same hotplug key and a lower sequence number. 1722 // Sequence numbers control the order of execution of hotplug-related changes, which would otherwise be executed in 1723 // arbitrary order by task runner, leading to unexpected results if multiple events for same device are in flight 1724 // (e.g. plugging, followed by immediate unplugging, or snapd restart with pending hotplug changes). 1725 // The handler expects "hotplug-key" and "hotplug-seq" values set on own and other hotplug-related changes. 1726 func (m *InterfaceManager) doHotplugSeqWait(task *state.Task, _ *tomb.Tomb) error { 1727 st := task.State() 1728 st.Lock() 1729 defer st.Unlock() 1730 1731 chg := task.Change() 1732 if chg == nil || !isHotplugChange(chg) { 1733 return fmt.Errorf("internal error: task %q not in a hotplug change", task.Kind()) 1734 } 1735 1736 seq, hotplugKey, err := getHotplugChangeAttrs(chg) 1737 if err != nil { 1738 return err 1739 } 1740 1741 for _, otherChg := range st.Changes() { 1742 if otherChg.Status().Ready() || otherChg.ID() == chg.ID() { 1743 continue 1744 } 1745 1746 // only inspect hotplug changes 1747 if !isHotplugChange(otherChg) { 1748 continue 1749 } 1750 1751 otherSeq, otherKey, err := getHotplugChangeAttrs(otherChg) 1752 if err != nil { 1753 return err 1754 } 1755 1756 // conflict with retry if there another change affecting same device and has lower sequence number 1757 if hotplugKey == otherKey && otherSeq < seq { 1758 task.Logf("Waiting processing of earlier hotplug event change %q affecting device with hotplug key %q", otherChg.Kind(), hotplugKey) 1759 // TODO: consider introducing a new task that runs last and does EnsureBefore(0) for hotplug changes 1760 return &state.Retry{After: hotplugRetryTimeout} 1761 } 1762 } 1763 1764 // no conflicting change for same hotplug key found 1765 return nil 1766 }