github.com/rigado/snapd@v2.42.5-go-mod+incompatible/overlord/ifacestate/handlers.go (about)

     1  // -*- Mode: Go; indent-tabs-mode: t -*-
     2  
     3  /*
     4   * Copyright (C) 2016 Canonical Ltd
     5   *
     6   * This program is free software: you can redistribute it and/or modify
     7   * it under the terms of the GNU General Public License version 3 as
     8   * published by the Free Software Foundation.
     9   *
    10   * This program is distributed in the hope that it will be useful,
    11   * but WITHOUT ANY WARRANTY; without even the implied warranty of
    12   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13   * GNU General Public License for more details.
    14   *
    15   * You should have received a copy of the GNU General Public License
    16   * along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17   *
    18   */
    19  
    20  package ifacestate
    21  
    22  import (
    23  	"fmt"
    24  	"reflect"
    25  	"sort"
    26  	"strings"
    27  	"time"
    28  
    29  	"gopkg.in/tomb.v2"
    30  
    31  	"github.com/snapcore/snapd/i18n"
    32  	"github.com/snapcore/snapd/interfaces"
    33  	"github.com/snapcore/snapd/interfaces/hotplug"
    34  	"github.com/snapcore/snapd/logger"
    35  	"github.com/snapcore/snapd/overlord/snapstate"
    36  	"github.com/snapcore/snapd/overlord/state"
    37  	"github.com/snapcore/snapd/snap"
    38  	"github.com/snapcore/snapd/timings"
    39  )
    40  
    41  // confinementOptions returns interfaces.ConfinementOptions from snapstate.Flags.
    42  func confinementOptions(flags snapstate.Flags) interfaces.ConfinementOptions {
    43  	return interfaces.ConfinementOptions{
    44  		DevMode:  flags.DevMode,
    45  		JailMode: flags.JailMode,
    46  		Classic:  flags.Classic,
    47  	}
    48  }
    49  
    50  func (m *InterfaceManager) setupAffectedSnaps(task *state.Task, affectingSnap string, affectedSnaps []string, tm timings.Measurer) error {
    51  	st := task.State()
    52  
    53  	// Setup security of the affected snaps.
    54  	for _, affectedInstanceName := range affectedSnaps {
    55  		// the snap that triggered the change needs to be skipped
    56  		if affectedInstanceName == affectingSnap {
    57  			continue
    58  		}
    59  		var snapst snapstate.SnapState
    60  		if err := snapstate.Get(st, affectedInstanceName, &snapst); err != nil {
    61  			task.Errorf("skipping security profiles setup for snap %q when handling snap %q: %v", affectedInstanceName, affectingSnap, err)
    62  			continue
    63  		}
    64  		affectedSnapInfo, err := snapst.CurrentInfo()
    65  		if err != nil {
    66  			return err
    67  		}
    68  		if err := addImplicitSlots(st, affectedSnapInfo); err != nil {
    69  			return err
    70  		}
    71  		opts := confinementOptions(snapst.Flags)
    72  		if err := m.setupSnapSecurity(task, affectedSnapInfo, opts, tm); err != nil {
    73  			return err
    74  		}
    75  	}
    76  	return nil
    77  }
    78  
    79  func (m *InterfaceManager) doSetupProfiles(task *state.Task, tomb *tomb.Tomb) error {
    80  	task.State().Lock()
    81  	defer task.State().Unlock()
    82  
    83  	perfTimings := timings.NewForTask(task)
    84  	defer perfTimings.Save(task.State())
    85  
    86  	// Get snap.Info from bits handed by the snap manager.
    87  	snapsup, err := snapstate.TaskSnapSetup(task)
    88  	if err != nil {
    89  		return err
    90  	}
    91  
    92  	snapInfo, err := snap.ReadInfo(snapsup.InstanceName(), snapsup.SideInfo)
    93  	if err != nil {
    94  		return err
    95  	}
    96  
    97  	// We no longer do/need core-phase-2, see
    98  	//   https://github.com/snapcore/snapd/pull/5301
    99  	// This code is just here to deal with old state that may still
   100  	// have the 2nd setup-profiles with this flag set.
   101  	var corePhase2 bool
   102  	if err := task.Get("core-phase-2", &corePhase2); err != nil && err != state.ErrNoState {
   103  		return err
   104  	}
   105  	if corePhase2 {
   106  		// nothing to do
   107  		return nil
   108  	}
   109  
   110  	opts := confinementOptions(snapsup.Flags)
   111  	return m.setupProfilesForSnap(task, tomb, snapInfo, opts, perfTimings)
   112  }
   113  
   114  func (m *InterfaceManager) setupProfilesForSnap(task *state.Task, _ *tomb.Tomb, snapInfo *snap.Info, opts interfaces.ConfinementOptions, tm timings.Measurer) error {
   115  	st := task.State()
   116  
   117  	if err := addImplicitSlots(task.State(), snapInfo); err != nil {
   118  		return err
   119  	}
   120  
   121  	snapName := snapInfo.InstanceName()
   122  
   123  	// The snap may have been updated so perform the following operation to
   124  	// ensure that we are always working on the correct state:
   125  	//
   126  	// - disconnect all connections to/from the given snap
   127  	//   - remembering the snaps that were affected by this operation
   128  	// - remove the (old) snap from the interfaces repository
   129  	// - add the (new) snap to the interfaces repository
   130  	// - restore connections based on what is kept in the state
   131  	//   - if a connection cannot be restored then remove it from the state
   132  	// - setup the security of all the affected snaps
   133  	disconnectedSnaps, err := m.repo.DisconnectSnap(snapName)
   134  	if err != nil {
   135  		return err
   136  	}
   137  	// XXX: what about snap renames? We should remove the old name (or switch
   138  	// to IDs in the interfaces repository)
   139  	if err := m.repo.RemoveSnap(snapName); err != nil {
   140  		return err
   141  	}
   142  	if err := m.repo.AddSnap(snapInfo); err != nil {
   143  		return err
   144  	}
   145  	if len(snapInfo.BadInterfaces) > 0 {
   146  		task.Logf("%s", snap.BadInterfacesSummary(snapInfo))
   147  	}
   148  
   149  	// Reload the connections and compute the set of affected snaps. The set
   150  	// affectedSet set contains name of all the affected snap instances.  The
   151  	// arrays affectedNames and affectedSnaps contain, arrays of snap names and
   152  	// snapInfo's, respectively. The arrays are sorted by name with the special
   153  	// exception that the snap being setup is always first. The affectedSnaps
   154  	// array may be shorter than the set of affected snaps in case any of the
   155  	// snaps cannot be found in the state.
   156  	reconnectedSnaps, err := m.reloadConnections(snapName)
   157  	if err != nil {
   158  		return err
   159  	}
   160  	affectedSet := make(map[string]bool)
   161  	for _, name := range disconnectedSnaps {
   162  		affectedSet[name] = true
   163  	}
   164  	for _, name := range reconnectedSnaps {
   165  		affectedSet[name] = true
   166  	}
   167  
   168  	// Sort the set of affected names, ensuring that the snap being setup
   169  	// is first regardless of the name it has.
   170  	affectedNames := make([]string, 0, len(affectedSet))
   171  	for name := range affectedSet {
   172  		if name != snapName {
   173  			affectedNames = append(affectedNames, name)
   174  		}
   175  	}
   176  	sort.Strings(affectedNames)
   177  	affectedNames = append([]string{snapName}, affectedNames...)
   178  
   179  	// Obtain snap.Info for each affected snap, skipping those that cannot be
   180  	// found and compute the confinement options that apply to it.
   181  	affectedSnaps := make([]*snap.Info, 0, len(affectedSet))
   182  	confinementOpts := make([]interfaces.ConfinementOptions, 0, len(affectedSet))
   183  	// For the snap being setup we know exactly what was requested.
   184  	affectedSnaps = append(affectedSnaps, snapInfo)
   185  	confinementOpts = append(confinementOpts, opts)
   186  	// For remaining snaps we need to interrogate the state.
   187  	for _, name := range affectedNames[1:] {
   188  		var snapst snapstate.SnapState
   189  		if err := snapstate.Get(st, name, &snapst); err != nil {
   190  			task.Errorf("cannot obtain state of snap %s: %s", name, err)
   191  			continue
   192  		}
   193  		snapInfo, err := snapst.CurrentInfo()
   194  		if err != nil {
   195  			return err
   196  		}
   197  		if err := addImplicitSlots(st, snapInfo); err != nil {
   198  			return err
   199  		}
   200  		affectedSnaps = append(affectedSnaps, snapInfo)
   201  		confinementOpts = append(confinementOpts, confinementOptions(snapst.Flags))
   202  	}
   203  
   204  	return m.setupSecurityByBackend(task, affectedSnaps, confinementOpts, tm)
   205  }
   206  
   207  func (m *InterfaceManager) doRemoveProfiles(task *state.Task, tomb *tomb.Tomb) error {
   208  	st := task.State()
   209  	st.Lock()
   210  	defer st.Unlock()
   211  
   212  	perfTimings := timings.NewForTask(task)
   213  	defer perfTimings.Save(st)
   214  
   215  	// Get SnapSetup for this snap. This is gives us the name of the snap.
   216  	snapSetup, err := snapstate.TaskSnapSetup(task)
   217  	if err != nil {
   218  		return err
   219  	}
   220  	snapName := snapSetup.InstanceName()
   221  
   222  	return m.removeProfilesForSnap(task, tomb, snapName, perfTimings)
   223  }
   224  
   225  func (m *InterfaceManager) removeProfilesForSnap(task *state.Task, _ *tomb.Tomb, snapName string, tm timings.Measurer) error {
   226  	// Disconnect the snap entirely.
   227  	// This is required to remove the snap from the interface repository.
   228  	// The returned list of affected snaps will need to have its security setup
   229  	// to reflect the change.
   230  	affectedSnaps, err := m.repo.DisconnectSnap(snapName)
   231  	if err != nil {
   232  		return err
   233  	}
   234  	if err := m.setupAffectedSnaps(task, snapName, affectedSnaps, tm); err != nil {
   235  		return err
   236  	}
   237  
   238  	// Remove the snap from the interface repository.
   239  	// This discards all the plugs and slots belonging to that snap.
   240  	if err := m.repo.RemoveSnap(snapName); err != nil {
   241  		return err
   242  	}
   243  
   244  	// Remove security artefacts of the snap.
   245  	if err := m.removeSnapSecurity(task, snapName); err != nil {
   246  		return err
   247  	}
   248  
   249  	return nil
   250  }
   251  
   252  func (m *InterfaceManager) undoSetupProfiles(task *state.Task, tomb *tomb.Tomb) error {
   253  	st := task.State()
   254  	st.Lock()
   255  	defer st.Unlock()
   256  
   257  	perfTimings := timings.NewForTask(task)
   258  	defer perfTimings.Save(st)
   259  
   260  	var corePhase2 bool
   261  	if err := task.Get("core-phase-2", &corePhase2); err != nil && err != state.ErrNoState {
   262  		return err
   263  	}
   264  	if corePhase2 {
   265  		// let the first setup-profiles deal with this
   266  		return nil
   267  	}
   268  
   269  	snapsup, err := snapstate.TaskSnapSetup(task)
   270  	if err != nil {
   271  		return err
   272  	}
   273  	snapName := snapsup.InstanceName()
   274  
   275  	// Get the name from SnapSetup and use it to find the current SideInfo
   276  	// about the snap, if there is one.
   277  	var snapst snapstate.SnapState
   278  	err = snapstate.Get(st, snapName, &snapst)
   279  	if err != nil && err != state.ErrNoState {
   280  		return err
   281  	}
   282  	sideInfo := snapst.CurrentSideInfo()
   283  	if sideInfo == nil {
   284  		// The snap was not installed before so undo should remove security profiles.
   285  		return m.removeProfilesForSnap(task, tomb, snapName, perfTimings)
   286  	} else {
   287  		// The snap was installed before so undo should setup the old security profiles.
   288  		snapInfo, err := snap.ReadInfo(snapName, sideInfo)
   289  		if err != nil {
   290  			return err
   291  		}
   292  		opts := confinementOptions(snapst.Flags)
   293  		return m.setupProfilesForSnap(task, tomb, snapInfo, opts, perfTimings)
   294  	}
   295  }
   296  
   297  func (m *InterfaceManager) doDiscardConns(task *state.Task, _ *tomb.Tomb) error {
   298  	st := task.State()
   299  	st.Lock()
   300  	defer st.Unlock()
   301  
   302  	snapSetup, err := snapstate.TaskSnapSetup(task)
   303  	if err != nil {
   304  		return err
   305  	}
   306  
   307  	instanceName := snapSetup.InstanceName()
   308  
   309  	var snapst snapstate.SnapState
   310  	err = snapstate.Get(st, instanceName, &snapst)
   311  	if err != nil && err != state.ErrNoState {
   312  		return err
   313  	}
   314  
   315  	if err == nil && len(snapst.Sequence) != 0 {
   316  		return fmt.Errorf("cannot discard connections for snap %q while it is present", instanceName)
   317  	}
   318  	conns, err := getConns(st)
   319  	if err != nil {
   320  		return err
   321  	}
   322  	removed := make(map[string]*connState)
   323  	for id := range conns {
   324  		connRef, err := interfaces.ParseConnRef(id)
   325  		if err != nil {
   326  			return err
   327  		}
   328  		if connRef.PlugRef.Snap == instanceName || connRef.SlotRef.Snap == instanceName {
   329  			removed[id] = conns[id]
   330  			delete(conns, id)
   331  		}
   332  	}
   333  	task.Set("removed", removed)
   334  	setConns(st, conns)
   335  	return nil
   336  }
   337  
   338  func (m *InterfaceManager) undoDiscardConns(task *state.Task, _ *tomb.Tomb) error {
   339  	st := task.State()
   340  	st.Lock()
   341  	defer st.Unlock()
   342  
   343  	var removed map[string]*connState
   344  	err := task.Get("removed", &removed)
   345  	if err != nil && err != state.ErrNoState {
   346  		return err
   347  	}
   348  
   349  	conns, err := getConns(st)
   350  	if err != nil {
   351  		return err
   352  	}
   353  
   354  	for id, connState := range removed {
   355  		conns[id] = connState
   356  	}
   357  	setConns(st, conns)
   358  	task.Set("removed", nil)
   359  	return nil
   360  }
   361  
   362  func getDynamicHookAttributes(task *state.Task) (plugAttrs, slotAttrs map[string]interface{}, err error) {
   363  	if err = task.Get("plug-dynamic", &plugAttrs); err != nil && err != state.ErrNoState {
   364  		return nil, nil, err
   365  	}
   366  	if err = task.Get("slot-dynamic", &slotAttrs); err != nil && err != state.ErrNoState {
   367  		return nil, nil, err
   368  	}
   369  	if plugAttrs == nil {
   370  		plugAttrs = make(map[string]interface{})
   371  	}
   372  	if slotAttrs == nil {
   373  		slotAttrs = make(map[string]interface{})
   374  	}
   375  
   376  	return plugAttrs, slotAttrs, nil
   377  }
   378  
   379  func setDynamicHookAttributes(task *state.Task, plugAttrs, slotAttrs map[string]interface{}) {
   380  	task.Set("plug-dynamic", plugAttrs)
   381  	task.Set("slot-dynamic", slotAttrs)
   382  }
   383  
   384  func (m *InterfaceManager) doConnect(task *state.Task, _ *tomb.Tomb) error {
   385  	st := task.State()
   386  	st.Lock()
   387  	defer st.Unlock()
   388  
   389  	perfTimings := timings.NewForTask(task)
   390  	defer perfTimings.Save(st)
   391  
   392  	plugRef, slotRef, err := getPlugAndSlotRefs(task)
   393  	if err != nil {
   394  		return err
   395  	}
   396  
   397  	var autoConnect bool
   398  	if err := task.Get("auto", &autoConnect); err != nil && err != state.ErrNoState {
   399  		return err
   400  	}
   401  	var byGadget bool
   402  	if err := task.Get("by-gadget", &byGadget); err != nil && err != state.ErrNoState {
   403  		return err
   404  	}
   405  	var delayedSetupProfiles bool
   406  	if err := task.Get("delayed-setup-profiles", &delayedSetupProfiles); err != nil && err != state.ErrNoState {
   407  		return err
   408  	}
   409  
   410  	deviceCtx, err := snapstate.DeviceCtx(st, task, nil)
   411  	if err != nil {
   412  		return err
   413  	}
   414  
   415  	conns, err := getConns(st)
   416  	if err != nil {
   417  		return err
   418  	}
   419  
   420  	connRef := &interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef}
   421  
   422  	var plugSnapst snapstate.SnapState
   423  	if err := snapstate.Get(st, plugRef.Snap, &plugSnapst); err != nil {
   424  		if autoConnect && err == state.ErrNoState {
   425  			// conflict logic should prevent this
   426  			return fmt.Errorf("internal error: snap %q is no longer available for auto-connecting", plugRef.Snap)
   427  		}
   428  		return err
   429  	}
   430  
   431  	var slotSnapst snapstate.SnapState
   432  	if err := snapstate.Get(st, slotRef.Snap, &slotSnapst); err != nil {
   433  		if autoConnect && err == state.ErrNoState {
   434  			// conflict logic should prevent this
   435  			return fmt.Errorf("internal error: snap %q is no longer available for auto-connecting", slotRef.Snap)
   436  		}
   437  		return err
   438  	}
   439  
   440  	plug := m.repo.Plug(connRef.PlugRef.Snap, connRef.PlugRef.Name)
   441  	if plug == nil {
   442  		// conflict logic should prevent this
   443  		return fmt.Errorf("snap %q has no %q plug", connRef.PlugRef.Snap, connRef.PlugRef.Name)
   444  	}
   445  
   446  	slot := m.repo.Slot(connRef.SlotRef.Snap, connRef.SlotRef.Name)
   447  	if slot == nil {
   448  		// conflict logic should prevent this
   449  		return fmt.Errorf("snap %q has no %q slot", connRef.SlotRef.Snap, connRef.SlotRef.Name)
   450  	}
   451  
   452  	// attributes are always present, even if there are no hooks (they're initialized by Connect).
   453  	plugDynamicAttrs, slotDynamicAttrs, err := getDynamicHookAttributes(task)
   454  	if err != nil {
   455  		return fmt.Errorf("failed to get hook attributes: %s", err)
   456  	}
   457  
   458  	var policyChecker interfaces.PolicyFunc
   459  
   460  	// manual connections and connections by the gadget obey the
   461  	// policy "connection" rules, other auto-connections obey the
   462  	// "auto-connection" rules
   463  	if autoConnect && !byGadget {
   464  		autochecker, err := newAutoConnectChecker(st, deviceCtx)
   465  		if err != nil {
   466  			return err
   467  		}
   468  		policyChecker = autochecker.check
   469  	} else {
   470  		policyCheck, err := newConnectChecker(st, deviceCtx)
   471  		if err != nil {
   472  			return err
   473  		}
   474  		policyChecker = policyCheck.check
   475  	}
   476  
   477  	// static attributes of the plug and slot not provided, the ones from snap infos will be used
   478  	conn, err := m.repo.Connect(connRef, nil, plugDynamicAttrs, nil, slotDynamicAttrs, policyChecker)
   479  	if err != nil || conn == nil {
   480  		return err
   481  	}
   482  
   483  	if !delayedSetupProfiles {
   484  		slotOpts := confinementOptions(slotSnapst.Flags)
   485  		if err := m.setupSnapSecurity(task, slot.Snap, slotOpts, perfTimings); err != nil {
   486  			return err
   487  		}
   488  
   489  		plugOpts := confinementOptions(plugSnapst.Flags)
   490  		if err := m.setupSnapSecurity(task, plug.Snap, plugOpts, perfTimings); err != nil {
   491  			return err
   492  		}
   493  	} else {
   494  		logger.Debugf("Connect handler: skipping setupSnapSecurity for snaps %q and %q", plug.Snap.InstanceName(), slot.Snap.InstanceName())
   495  	}
   496  
   497  	conns[connRef.ID()] = &connState{
   498  		Interface:        conn.Interface(),
   499  		StaticPlugAttrs:  conn.Plug.StaticAttrs(),
   500  		DynamicPlugAttrs: conn.Plug.DynamicAttrs(),
   501  		StaticSlotAttrs:  conn.Slot.StaticAttrs(),
   502  		DynamicSlotAttrs: conn.Slot.DynamicAttrs(),
   503  		Auto:             autoConnect,
   504  		ByGadget:         byGadget,
   505  		HotplugKey:       slot.HotplugKey,
   506  	}
   507  	setConns(st, conns)
   508  
   509  	// the dynamic attributes might have been updated by the interface's BeforeConnectPlug/Slot code,
   510  	// so we need to update the task for connect-plug- and connect-slot- hooks to see new values.
   511  	setDynamicHookAttributes(task, conn.Plug.DynamicAttrs(), conn.Slot.DynamicAttrs())
   512  	return nil
   513  }
   514  
   515  func (m *InterfaceManager) doDisconnect(task *state.Task, _ *tomb.Tomb) error {
   516  	st := task.State()
   517  	st.Lock()
   518  	defer st.Unlock()
   519  
   520  	perfTimings := timings.NewForTask(task)
   521  	defer perfTimings.Save(st)
   522  
   523  	plugRef, slotRef, err := getPlugAndSlotRefs(task)
   524  	if err != nil {
   525  		return err
   526  	}
   527  
   528  	conns, err := getConns(st)
   529  	if err != nil {
   530  		return err
   531  	}
   532  
   533  	var snapStates []snapstate.SnapState
   534  	for _, instanceName := range []string{plugRef.Snap, slotRef.Snap} {
   535  		var snapst snapstate.SnapState
   536  		if err := snapstate.Get(st, instanceName, &snapst); err != nil {
   537  			if err == state.ErrNoState {
   538  				task.Logf("skipping disconnect operation for connection %s %s, snap %q doesn't exist", plugRef, slotRef, instanceName)
   539  				return nil
   540  			}
   541  			task.Errorf("skipping security profiles setup for snap %q when disconnecting %s from %s: %v", instanceName, plugRef, slotRef, err)
   542  		} else {
   543  			snapStates = append(snapStates, snapst)
   544  		}
   545  	}
   546  
   547  	err = m.repo.Disconnect(plugRef.Snap, plugRef.Name, slotRef.Snap, slotRef.Name)
   548  	if err != nil {
   549  		return fmt.Errorf("snapd changed, please retry the operation: %v", err)
   550  	}
   551  	for _, snapst := range snapStates {
   552  		snapInfo, err := snapst.CurrentInfo()
   553  		if err != nil {
   554  			return err
   555  		}
   556  		opts := confinementOptions(snapst.Flags)
   557  		if err := m.setupSnapSecurity(task, snapInfo, opts, perfTimings); err != nil {
   558  			return err
   559  		}
   560  	}
   561  
   562  	cref := interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef}
   563  	conn, ok := conns[cref.ID()]
   564  	if !ok {
   565  		return fmt.Errorf("internal error: connection %q not found in state", cref.ID())
   566  	}
   567  
   568  	// store old connection for undo
   569  	task.Set("old-conn", conn)
   570  
   571  	// "auto-disconnect" flag indicates it's a disconnect triggered automatically as part of snap removal;
   572  	// such disconnects should not set undesired flag and instead just remove the connection.
   573  	var autoDisconnect bool
   574  	if err := task.Get("auto-disconnect", &autoDisconnect); err != nil && err != state.ErrNoState {
   575  		return fmt.Errorf("internal error: failed to read 'auto-disconnect' flag: %s", err)
   576  	}
   577  
   578  	// "by-hotplug" flag indicates it's a disconnect triggered by hotplug remove event;
   579  	// we want to keep information of the connection and just mark it as hotplug-gone.
   580  	var byHotplug bool
   581  	if err := task.Get("by-hotplug", &byHotplug); err != nil && err != state.ErrNoState {
   582  		return fmt.Errorf("internal error: cannot read 'by-hotplug' flag: %s", err)
   583  	}
   584  
   585  	switch {
   586  	case byHotplug:
   587  		conn.HotplugGone = true
   588  		conns[cref.ID()] = conn
   589  	case conn.Auto && !autoDisconnect:
   590  		conn.Undesired = true
   591  		conn.DynamicPlugAttrs = nil
   592  		conn.DynamicSlotAttrs = nil
   593  		conn.StaticPlugAttrs = nil
   594  		conn.StaticSlotAttrs = nil
   595  		conns[cref.ID()] = conn
   596  	default:
   597  		delete(conns, cref.ID())
   598  	}
   599  	setConns(st, conns)
   600  
   601  	return nil
   602  }
   603  
   604  func (m *InterfaceManager) undoDisconnect(task *state.Task, _ *tomb.Tomb) error {
   605  	st := task.State()
   606  	st.Lock()
   607  	defer st.Unlock()
   608  
   609  	perfTimings := timings.NewForTask(task)
   610  	defer perfTimings.Save(st)
   611  
   612  	var oldconn connState
   613  	err := task.Get("old-conn", &oldconn)
   614  	if err == state.ErrNoState {
   615  		return nil
   616  	}
   617  	if err != nil {
   618  		return err
   619  	}
   620  
   621  	plugRef, slotRef, err := getPlugAndSlotRefs(task)
   622  	if err != nil {
   623  		return err
   624  	}
   625  
   626  	conns, err := getConns(st)
   627  	if err != nil {
   628  		return err
   629  	}
   630  
   631  	var plugSnapst snapstate.SnapState
   632  	if err := snapstate.Get(st, plugRef.Snap, &plugSnapst); err != nil {
   633  		return err
   634  	}
   635  	var slotSnapst snapstate.SnapState
   636  	if err := snapstate.Get(st, slotRef.Snap, &slotSnapst); err != nil {
   637  		return err
   638  	}
   639  
   640  	connRef := &interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef}
   641  
   642  	plug := m.repo.Plug(connRef.PlugRef.Snap, connRef.PlugRef.Name)
   643  	if plug == nil {
   644  		return fmt.Errorf("snap %q has no %q plug", connRef.PlugRef.Snap, connRef.PlugRef.Name)
   645  	}
   646  	slot := m.repo.Slot(connRef.SlotRef.Snap, connRef.SlotRef.Name)
   647  	if slot == nil {
   648  		return fmt.Errorf("snap %q has no %q slot", connRef.SlotRef.Snap, connRef.SlotRef.Name)
   649  	}
   650  
   651  	_, err = m.repo.Connect(connRef, nil, oldconn.DynamicPlugAttrs, nil, oldconn.DynamicSlotAttrs, nil)
   652  	if err != nil {
   653  		return err
   654  	}
   655  
   656  	slotOpts := confinementOptions(slotSnapst.Flags)
   657  	if err := m.setupSnapSecurity(task, slot.Snap, slotOpts, perfTimings); err != nil {
   658  		return err
   659  	}
   660  	plugOpts := confinementOptions(plugSnapst.Flags)
   661  	if err := m.setupSnapSecurity(task, plug.Snap, plugOpts, perfTimings); err != nil {
   662  		return err
   663  	}
   664  
   665  	conns[connRef.ID()] = &oldconn
   666  	setConns(st, conns)
   667  
   668  	return nil
   669  }
   670  
   671  func (m *InterfaceManager) undoConnect(task *state.Task, _ *tomb.Tomb) error {
   672  	st := task.State()
   673  	st.Lock()
   674  	defer st.Unlock()
   675  
   676  	plugRef, slotRef, err := getPlugAndSlotRefs(task)
   677  	if err != nil {
   678  		return err
   679  	}
   680  	connRef := interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef}
   681  	conns, err := getConns(st)
   682  	if err != nil {
   683  		return err
   684  	}
   685  	delete(conns, connRef.ID())
   686  	setConns(st, conns)
   687  	return nil
   688  }
   689  
   690  // timeout for shared content retry
   691  var contentLinkRetryTimeout = 30 * time.Second
   692  
   693  // timeout for retrying hotplug-related tasks
   694  var hotplugRetryTimeout = 300 * time.Millisecond
   695  
   696  // defaultContentProviders returns a dict of the default-providers for the
   697  // content plugs for the given instanceName
   698  func (m *InterfaceManager) defaultContentProviders(instanceName string) map[string]bool {
   699  	plugs := m.repo.Plugs(instanceName)
   700  	defaultProviders := make(map[string]bool, len(plugs))
   701  	for _, plug := range plugs {
   702  		if plug.Interface == "content" {
   703  			var s string
   704  			if err := plug.Attr("content", &s); err == nil && s != "" {
   705  				var dprovider string
   706  				if err := plug.Attr("default-provider", &dprovider); err == nil && dprovider != "" {
   707  					defaultProviders[dprovider] = true
   708  				}
   709  			}
   710  		}
   711  	}
   712  	return defaultProviders
   713  }
   714  
   715  func obsoleteCorePhase2SetupProfiles(kind string, task *state.Task) (bool, error) {
   716  	if kind != "setup-profiles" {
   717  		return false, nil
   718  	}
   719  
   720  	var corePhase2 bool
   721  	if err := task.Get("core-phase-2", &corePhase2); err != nil && err != state.ErrNoState {
   722  		return false, err
   723  	}
   724  	return corePhase2, nil
   725  }
   726  
   727  func checkAutoconnectConflicts(st *state.State, autoconnectTask *state.Task, plugSnap, slotSnap string) error {
   728  	for _, task := range st.Tasks() {
   729  		if task.Status().Ready() {
   730  			continue
   731  		}
   732  
   733  		k := task.Kind()
   734  		if k == "connect" || k == "disconnect" {
   735  			// retry if we found another connect/disconnect affecting same snap; note we can only encounter
   736  			// connects/disconnects created by doAutoDisconnect / doAutoConnect here as manual interface ops
   737  			// are rejected by conflict check logic in snapstate.
   738  			plugRef, slotRef, err := getPlugAndSlotRefs(task)
   739  			if err != nil {
   740  				return err
   741  			}
   742  			if plugRef.Snap == plugSnap {
   743  				return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting plug snap %s, task %q", plugSnap, k)}
   744  			}
   745  			if slotRef.Snap == slotSnap {
   746  				return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting slot snap %s, task %q", slotSnap, k)}
   747  			}
   748  			continue
   749  		}
   750  
   751  		snapsup, err := snapstate.TaskSnapSetup(task)
   752  		// e.g. hook tasks don't have task snap setup
   753  		if err != nil {
   754  			continue
   755  		}
   756  
   757  		otherSnapName := snapsup.InstanceName()
   758  
   759  		// different snaps - no conflict
   760  		if otherSnapName != plugSnap && otherSnapName != slotSnap {
   761  			continue
   762  		}
   763  
   764  		// setup-profiles core-phase-2 is now no-op, we shouldn't
   765  		// conflict on it; note, old snapd would create this task even
   766  		// for regular snaps if installed with the dangerous flag.
   767  		obsoleteCorePhase2, err := obsoleteCorePhase2SetupProfiles(k, task)
   768  		if err != nil {
   769  			return err
   770  		}
   771  		if obsoleteCorePhase2 {
   772  			continue
   773  		}
   774  
   775  		// other snap that affects us because of plug or slot
   776  		if k == "unlink-snap" || k == "link-snap" || k == "setup-profiles" || k == "discard-snap" {
   777  			// discard-snap is scheduled as part of garbage collection during refresh, if multiple revsions are already installed.
   778  			// this revision check avoids conflict with own discard tasks created as part of install/refresh.
   779  			if k == "discard-snap" && autoconnectTask.Change() != nil && autoconnectTask.Change().ID() == task.Change().ID() {
   780  				continue
   781  			}
   782  			// if snap is getting removed, we will retry but the snap will be gone and auto-connect becomes no-op
   783  			// if snap is getting installed/refreshed - temporary conflict, retry later
   784  			return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting snap %s with task %q", otherSnapName, k)}
   785  		}
   786  	}
   787  	return nil
   788  }
   789  
   790  func checkDisconnectConflicts(st *state.State, disconnectingSnap, plugSnap, slotSnap string) error {
   791  	for _, task := range st.Tasks() {
   792  		if task.Status().Ready() {
   793  			continue
   794  		}
   795  
   796  		k := task.Kind()
   797  		if k == "connect" || k == "disconnect" {
   798  			// retry if we found another connect/disconnect affecting same snap; note we can only encounter
   799  			// connects/disconnects created by doAutoDisconnect / doAutoConnect here as manual interface ops
   800  			// are rejected by conflict check logic in snapstate.
   801  			plugRef, slotRef, err := getPlugAndSlotRefs(task)
   802  			if err != nil {
   803  				return err
   804  			}
   805  			if plugRef.Snap == plugSnap || slotRef.Snap == slotSnap {
   806  				return &state.Retry{After: connectRetryTimeout}
   807  			}
   808  			continue
   809  		}
   810  
   811  		snapsup, err := snapstate.TaskSnapSetup(task)
   812  		// e.g. hook tasks don't have task snap setup
   813  		if err != nil {
   814  			continue
   815  		}
   816  
   817  		otherSnapName := snapsup.InstanceName()
   818  
   819  		// different snaps - no conflict
   820  		if otherSnapName != plugSnap && otherSnapName != slotSnap {
   821  			continue
   822  		}
   823  
   824  		// another task related to same snap op (unrelated op would be blocked by snapstate conflict logic)
   825  		if otherSnapName == disconnectingSnap {
   826  			continue
   827  		}
   828  
   829  		// note, don't care about unlink-snap for the opposite end. This relies
   830  		// on the fact that auto-disconnect will create conflicting "disconnect" tasks that
   831  		// we will retry with the logic above.
   832  		if k == "link-snap" || k == "setup-profiles" {
   833  			// other snap is getting installed/refreshed - temporary conflict
   834  			return &state.Retry{After: connectRetryTimeout}
   835  		}
   836  	}
   837  	return nil
   838  }
   839  
   840  func checkHotplugDisconnectConflicts(st *state.State, plugSnap, slotSnap string) error {
   841  	for _, task := range st.Tasks() {
   842  		if task.Status().Ready() {
   843  			continue
   844  		}
   845  
   846  		k := task.Kind()
   847  		if k == "connect" || k == "disconnect" {
   848  			plugRef, slotRef, err := getPlugAndSlotRefs(task)
   849  			if err != nil {
   850  				return err
   851  			}
   852  			if plugRef.Snap == plugSnap {
   853  				return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting plug snap %s, task %q", plugSnap, k)}
   854  			}
   855  			if slotRef.Snap == slotSnap {
   856  				return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting slot snap %s, task %q", slotSnap, k)}
   857  			}
   858  			continue
   859  		}
   860  
   861  		snapsup, err := snapstate.TaskSnapSetup(task)
   862  		// e.g. hook tasks don't have task snap setup
   863  		if err != nil {
   864  			continue
   865  		}
   866  		otherSnapName := snapsup.InstanceName()
   867  
   868  		// different snaps - no conflict
   869  		if otherSnapName != plugSnap && otherSnapName != slotSnap {
   870  			continue
   871  		}
   872  
   873  		if k == "link-snap" || k == "setup-profiles" || k == "unlink-snap" {
   874  			// other snap is getting installed/refreshed/removed - temporary conflict
   875  			return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting snap %s with task %q", otherSnapName, k)}
   876  		}
   877  	}
   878  	return nil
   879  }
   880  
   881  // inSameChangeWaitChains returns true if there is a wait chain so
   882  // that `startT` is run before `searchT` in the same state.Change.
   883  func inSameChangeWaitChain(startT, searchT *state.Task) bool {
   884  	// Trivial case, tasks in different changes (they could in theory
   885  	// still have cross-change waits but we don't do these today).
   886  	// In this case, return quickly.
   887  	if startT.Change() != searchT.Change() {
   888  		return false
   889  	}
   890  	// Do a recursive check if its in the same change
   891  	return waitChainSearch(startT, searchT)
   892  }
   893  
   894  func waitChainSearch(startT, searchT *state.Task) bool {
   895  	for _, cand := range startT.HaltTasks() {
   896  		if cand == searchT {
   897  			return true
   898  		}
   899  		if waitChainSearch(cand, searchT) {
   900  			return true
   901  		}
   902  	}
   903  
   904  	return false
   905  }
   906  
   907  // batchConnectTasks creates connect tasks and interface hooks for
   908  // conns and sets their wait chain with regard to the setupProfiles
   909  // task.
   910  //
   911  // The tasks are chained so that: - prepare-plug-, prepare-slot- and
   912  // connect tasks are all executed before setup-profiles -
   913  // connect-plug-, connect-slot- are all executed after setup-profiles.
   914  // The "delayed-setup-profiles" flag is set on the connect tasks to
   915  // indicate that doConnect handler should not set security backends up
   916  // because this will be done later by the setup-profiles task.
   917  func batchConnectTasks(st *state.State, snapsup *snapstate.SnapSetup, conns map[string]*interfaces.ConnRef, autoconnect bool) (*state.TaskSet, error) {
   918  	setupProfiles := st.NewTask("setup-profiles", fmt.Sprintf(i18n.G("Setup snap %q (%s) security profiles for auto-connections"), snapsup.InstanceName(), snapsup.Revision()))
   919  	setupProfiles.Set("snap-setup", snapsup)
   920  
   921  	ts := state.NewTaskSet()
   922  	for _, conn := range conns {
   923  		connectTs, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, connectOpts{AutoConnect: autoconnect, DelayedSetupProfiles: true})
   924  		if err != nil {
   925  			return nil, fmt.Errorf("internal error: auto-connect of %q failed: %s", conn, err)
   926  		}
   927  
   928  		// setup-profiles needs to wait for the main "connect" task
   929  		connectTask, _ := connectTs.Edge(ConnectTaskEdge)
   930  		if connectTask == nil {
   931  			return nil, fmt.Errorf("internal error: no 'connect' task found for %q", conn)
   932  		}
   933  		setupProfiles.WaitFor(connectTask)
   934  
   935  		// setup-profiles must be run before the task that marks the end of connect-plug- and connect-slot- hooks
   936  		afterConnectTask, _ := connectTs.Edge(AfterConnectHooksEdge)
   937  		if afterConnectTask != nil {
   938  			afterConnectTask.WaitFor(setupProfiles)
   939  		}
   940  		ts.AddAll(connectTs)
   941  	}
   942  	if len(ts.Tasks()) > 0 {
   943  		ts.AddTask(setupProfiles)
   944  	}
   945  	return ts, nil
   946  }
   947  
   948  // doAutoConnect creates task(s) to connect the given snap to viable candidates.
   949  func (m *InterfaceManager) doAutoConnect(task *state.Task, _ *tomb.Tomb) error {
   950  	st := task.State()
   951  	st.Lock()
   952  	defer st.Unlock()
   953  
   954  	snapsup, err := snapstate.TaskSnapSetup(task)
   955  	if err != nil {
   956  		return err
   957  	}
   958  
   959  	deviceCtx, err := snapstate.DeviceCtx(st, task, nil)
   960  	if err != nil {
   961  		return err
   962  	}
   963  
   964  	conns, err := getConns(st)
   965  	if err != nil {
   966  		return err
   967  	}
   968  
   969  	// The previous task (link-snap) may have triggered a restart,
   970  	// if this is the case we can only procceed once the restart
   971  	// has happened or we may not have all the interfaces of the
   972  	// new core/base snap.
   973  	if err := snapstate.WaitRestart(task, snapsup); err != nil {
   974  		return err
   975  	}
   976  
   977  	snapName := snapsup.InstanceName()
   978  
   979  	autochecker, err := newAutoConnectChecker(st, deviceCtx)
   980  	if err != nil {
   981  		return err
   982  	}
   983  
   984  	// wait for auto-install, started by prerequisites code, for
   985  	// the default-providers of content ifaces so we can
   986  	// auto-connect to them
   987  	defaultProviders := m.defaultContentProviders(snapName)
   988  	for _, chg := range st.Changes() {
   989  		if chg.Status().Ready() {
   990  			continue
   991  		}
   992  		for _, t := range chg.Tasks() {
   993  			if t.Status().Ready() {
   994  				continue
   995  			}
   996  			if t.Kind() != "link-snap" && t.Kind() != "setup-profiles" {
   997  				continue
   998  			}
   999  			if snapsup, err := snapstate.TaskSnapSetup(t); err == nil {
  1000  				// Only retry if the task that installs the
  1001  				// content provider is not waiting for us
  1002  				// (or this will just hang forever).
  1003  				if defaultProviders[snapsup.InstanceName()] && !inSameChangeWaitChain(task, t) {
  1004  					return &state.Retry{After: contentLinkRetryTimeout}
  1005  				}
  1006  			}
  1007  		}
  1008  	}
  1009  
  1010  	plugs := m.repo.Plugs(snapName)
  1011  	slots := m.repo.Slots(snapName)
  1012  	newconns := make(map[string]*interfaces.ConnRef, len(plugs)+len(slots))
  1013  
  1014  	// Auto-connect all the plugs
  1015  	for _, plug := range plugs {
  1016  		candidates := m.repo.AutoConnectCandidateSlots(snapName, plug.Name, autochecker.check)
  1017  		if len(candidates) == 0 {
  1018  			continue
  1019  		}
  1020  		// If we are in a core transition we may have both the old ubuntu-core
  1021  		// snap and the new core snap providing the same interface. In that
  1022  		// situation we want to ignore any candidates in ubuntu-core and simply
  1023  		// go with those from the new core snap.
  1024  		if len(candidates) == 2 {
  1025  			switch {
  1026  			case candidates[0].Snap.InstanceName() == "ubuntu-core" && candidates[1].Snap.InstanceName() == "core":
  1027  				candidates = candidates[1:2]
  1028  			case candidates[1].Snap.InstanceName() == "ubuntu-core" && candidates[0].Snap.InstanceName() == "core":
  1029  				candidates = candidates[0:1]
  1030  			}
  1031  		}
  1032  		if len(candidates) != 1 {
  1033  			crefs := make([]string, len(candidates))
  1034  			for i, candidate := range candidates {
  1035  				crefs[i] = candidate.String()
  1036  			}
  1037  			task.Logf("cannot auto-connect plug %s, candidates found: %s", plug, strings.Join(crefs, ", "))
  1038  			continue
  1039  		}
  1040  		slot := candidates[0]
  1041  		connRef := interfaces.NewConnRef(plug, slot)
  1042  		key := connRef.ID()
  1043  		if _, ok := conns[key]; ok {
  1044  			// Suggested connection already exist (or has Undesired flag set) so don't clobber it.
  1045  			// NOTE: we don't log anything here as this is a normal and common condition.
  1046  			continue
  1047  		}
  1048  
  1049  		ignore, err := findSymmetricAutoconnectTask(st, plug.Snap.InstanceName(), slot.Snap.InstanceName(), task)
  1050  		if err != nil {
  1051  			return err
  1052  		}
  1053  
  1054  		if ignore {
  1055  			continue
  1056  		}
  1057  
  1058  		if err := checkAutoconnectConflicts(st, task, plug.Snap.InstanceName(), slot.Snap.InstanceName()); err != nil {
  1059  			if retry, ok := err.(*state.Retry); ok {
  1060  				task.Logf("Waiting for conflicting change in progress: %s", retry.Reason)
  1061  				return err // will retry
  1062  			}
  1063  			return fmt.Errorf("auto-connect conflict check failed: %s", err)
  1064  		}
  1065  		newconns[connRef.ID()] = connRef
  1066  	}
  1067  	// Auto-connect all the slots
  1068  	for _, slot := range slots {
  1069  		candidates := m.repo.AutoConnectCandidatePlugs(snapName, slot.Name, autochecker.check)
  1070  		if len(candidates) == 0 {
  1071  			continue
  1072  		}
  1073  
  1074  		for _, plug := range candidates {
  1075  			// make sure slot is the only viable
  1076  			// connection for plug, same check as if we were
  1077  			// considering auto-connections from plug
  1078  			candSlots := m.repo.AutoConnectCandidateSlots(plug.Snap.InstanceName(), plug.Name, autochecker.check)
  1079  
  1080  			if len(candSlots) != 1 || candSlots[0].String() != slot.String() {
  1081  				crefs := make([]string, len(candSlots))
  1082  				for i, candidate := range candSlots {
  1083  					crefs[i] = candidate.String()
  1084  				}
  1085  				task.Logf("cannot auto-connect slot %s to %s, candidates found: %s", slot, plug, strings.Join(crefs, ", "))
  1086  				continue
  1087  			}
  1088  
  1089  			connRef := interfaces.NewConnRef(plug, slot)
  1090  			key := connRef.ID()
  1091  			if _, ok := conns[key]; ok {
  1092  				// Suggested connection already exist (or has Undesired flag set) so don't clobber it.
  1093  				// NOTE: we don't log anything here as this is a normal and common condition.
  1094  				continue
  1095  			}
  1096  			if _, ok := newconns[key]; ok {
  1097  				continue
  1098  			}
  1099  
  1100  			ignore, err := findSymmetricAutoconnectTask(st, plug.Snap.InstanceName(), slot.Snap.InstanceName(), task)
  1101  			if err != nil {
  1102  				return err
  1103  			}
  1104  
  1105  			if ignore {
  1106  				continue
  1107  			}
  1108  
  1109  			if err := checkAutoconnectConflicts(st, task, plug.Snap.InstanceName(), slot.Snap.InstanceName()); err != nil {
  1110  				if retry, ok := err.(*state.Retry); ok {
  1111  					task.Logf("Waiting for conflicting change in progress: %s", retry.Reason)
  1112  					return err // will retry
  1113  				}
  1114  				return fmt.Errorf("auto-connect conflict check failed: %s", err)
  1115  			}
  1116  			newconns[connRef.ID()] = connRef
  1117  		}
  1118  	}
  1119  
  1120  	autoconnect := true
  1121  	autots, err := batchConnectTasks(st, snapsup, newconns, autoconnect)
  1122  	if err != nil {
  1123  		return err
  1124  	}
  1125  
  1126  	if len(autots.Tasks()) > 0 {
  1127  		snapstate.InjectTasks(task, autots)
  1128  
  1129  		st.EnsureBefore(0)
  1130  	}
  1131  
  1132  	task.SetStatus(state.DoneStatus)
  1133  	return nil
  1134  }
  1135  
  1136  // doAutoDisconnect creates tasks for disconnecting all interfaces of a snap and running its interface hooks.
  1137  func (m *InterfaceManager) doAutoDisconnect(task *state.Task, _ *tomb.Tomb) error {
  1138  	st := task.State()
  1139  	st.Lock()
  1140  	defer st.Unlock()
  1141  
  1142  	snapsup, err := snapstate.TaskSnapSetup(task)
  1143  	if err != nil {
  1144  		return err
  1145  	}
  1146  
  1147  	snapName := snapsup.InstanceName()
  1148  	connections, err := m.repo.Connections(snapName)
  1149  	if err != nil {
  1150  		return err
  1151  	}
  1152  
  1153  	// check for conflicts on all connections first before creating disconnect hooks
  1154  	for _, connRef := range connections {
  1155  		if err := checkDisconnectConflicts(st, snapName, connRef.PlugRef.Snap, connRef.SlotRef.Snap); err != nil {
  1156  			if _, retry := err.(*state.Retry); retry {
  1157  				logger.Debugf("disconnecting interfaces of snap %q will be retried because of %q - %q conflict", snapName, connRef.PlugRef.Snap, connRef.SlotRef.Snap)
  1158  				task.Logf("Waiting for conflicting change in progress...")
  1159  				return err // will retry
  1160  			}
  1161  			return fmt.Errorf("cannot check conflicts when disconnecting interfaces: %s", err)
  1162  		}
  1163  	}
  1164  
  1165  	hookTasks := state.NewTaskSet()
  1166  	for _, connRef := range connections {
  1167  		conn, err := m.repo.Connection(connRef)
  1168  		if err != nil {
  1169  			break
  1170  		}
  1171  		// "auto-disconnect" flag indicates it's a disconnect triggered as part of snap removal, in which
  1172  		// case we want to skip the logic of marking auto-connections as 'undesired' and instead just remove
  1173  		// them so they can be automatically connected if the snap is installed again.
  1174  		ts, err := disconnectTasks(st, conn, disconnectOpts{AutoDisconnect: true})
  1175  		if err != nil {
  1176  			return err
  1177  		}
  1178  		hookTasks.AddAll(ts)
  1179  	}
  1180  
  1181  	snapstate.InjectTasks(task, hookTasks)
  1182  
  1183  	// make sure that we add tasks and mark this task done in the same atomic write, otherwise there is a risk of re-adding tasks again
  1184  	task.SetStatus(state.DoneStatus)
  1185  	return nil
  1186  }
  1187  
  1188  func (m *InterfaceManager) undoAutoConnect(task *state.Task, _ *tomb.Tomb) error {
  1189  	// TODO Introduce disconnection hooks, and run them here as well to give a chance
  1190  	// for the snap to undo whatever it did when the connection was established.
  1191  	return nil
  1192  }
  1193  
  1194  // transitionConnectionsCoreMigration will transition all connections
  1195  // from oldName to newName. Note that this is only useful when you
  1196  // know that newName supports everything that oldName supports,
  1197  // otherwise you will be in a world of pain.
  1198  func (m *InterfaceManager) transitionConnectionsCoreMigration(st *state.State, oldName, newName string) error {
  1199  	// transition over, ubuntu-core has only slots
  1200  	conns, err := getConns(st)
  1201  	if err != nil {
  1202  		return err
  1203  	}
  1204  
  1205  	for id := range conns {
  1206  		connRef, err := interfaces.ParseConnRef(id)
  1207  		if err != nil {
  1208  			return err
  1209  		}
  1210  		if connRef.SlotRef.Snap == oldName {
  1211  			connRef.SlotRef.Snap = newName
  1212  			conns[connRef.ID()] = conns[id]
  1213  			delete(conns, id)
  1214  		}
  1215  	}
  1216  	setConns(st, conns)
  1217  
  1218  	// After migrating connections in state, remove them from repo so they stay in sync and we don't
  1219  	// attempt to run disconnects on when the old core gets removed as part of the transition.
  1220  	if err := m.removeConnections(oldName); err != nil {
  1221  		return err
  1222  	}
  1223  
  1224  	// The reloadConnections() just modifies the repository object, it
  1225  	// has no effect on the running system, i.e. no security profiles
  1226  	// on disk are rewritten. This is ok because core/ubuntu-core have
  1227  	// exactly the same profiles and nothing in the generated policies
  1228  	// has the core snap-name encoded.
  1229  	if _, err := m.reloadConnections(newName); err != nil {
  1230  		return err
  1231  	}
  1232  
  1233  	return nil
  1234  }
  1235  
  1236  func (m *InterfaceManager) doTransitionUbuntuCore(t *state.Task, _ *tomb.Tomb) error {
  1237  	st := t.State()
  1238  	st.Lock()
  1239  	defer st.Unlock()
  1240  
  1241  	var oldName, newName string
  1242  	if err := t.Get("old-name", &oldName); err != nil {
  1243  		return err
  1244  	}
  1245  	if err := t.Get("new-name", &newName); err != nil {
  1246  		return err
  1247  	}
  1248  
  1249  	return m.transitionConnectionsCoreMigration(st, oldName, newName)
  1250  }
  1251  
  1252  func (m *InterfaceManager) undoTransitionUbuntuCore(t *state.Task, _ *tomb.Tomb) error {
  1253  	st := t.State()
  1254  	st.Lock()
  1255  	defer st.Unlock()
  1256  
  1257  	// symmetrical to the "do" method, just reverse them again
  1258  	var oldName, newName string
  1259  	if err := t.Get("old-name", &oldName); err != nil {
  1260  		return err
  1261  	}
  1262  	if err := t.Get("new-name", &newName); err != nil {
  1263  		return err
  1264  	}
  1265  
  1266  	return m.transitionConnectionsCoreMigration(st, newName, oldName)
  1267  }
  1268  
  1269  // doGadgetConnect creates task(s) to follow the interface connection instructions from the gadget.
  1270  func (m *InterfaceManager) doGadgetConnect(task *state.Task, _ *tomb.Tomb) error {
  1271  	st := task.State()
  1272  	st.Lock()
  1273  	defer st.Unlock()
  1274  
  1275  	deviceCtx, err := snapstate.DeviceCtx(st, task, nil)
  1276  	if err != nil {
  1277  		return err
  1278  	}
  1279  
  1280  	conns, err := getConns(st)
  1281  	if err != nil {
  1282  		return err
  1283  	}
  1284  
  1285  	gconns, err := snapstate.GadgetConnections(st, deviceCtx)
  1286  	if err != nil {
  1287  		return err
  1288  	}
  1289  
  1290  	gconnts := state.NewTaskSet()
  1291  	var newconns []*interfaces.ConnRef
  1292  
  1293  	// consider the gadget connect instructions
  1294  	for _, gconn := range gconns {
  1295  		plugSnapName, err := resolveSnapIDToName(st, gconn.Plug.SnapID)
  1296  		if err != nil {
  1297  			return err
  1298  		}
  1299  		plug := m.repo.Plug(plugSnapName, gconn.Plug.Plug)
  1300  		if plug == nil {
  1301  			task.Logf("gadget connect: ignoring missing plug %s:%s", gconn.Plug.SnapID, gconn.Plug.Plug)
  1302  			continue
  1303  		}
  1304  
  1305  		slotSnapName, err := resolveSnapIDToName(st, gconn.Slot.SnapID)
  1306  		if err != nil {
  1307  			return err
  1308  		}
  1309  		slot := m.repo.Slot(slotSnapName, gconn.Slot.Slot)
  1310  		if slot == nil {
  1311  			task.Logf("gadget connect: ignoring missing slot %s:%s", gconn.Slot.SnapID, gconn.Slot.Slot)
  1312  			continue
  1313  		}
  1314  
  1315  		connRef := interfaces.NewConnRef(plug, slot)
  1316  		key := connRef.ID()
  1317  		if _, ok := conns[key]; ok {
  1318  			// Gadget connection already exist (or has Undesired flag set) so don't clobber it.
  1319  			continue
  1320  		}
  1321  
  1322  		if err := checkAutoconnectConflicts(st, task, plug.Snap.InstanceName(), slot.Snap.InstanceName()); err != nil {
  1323  			if retry, ok := err.(*state.Retry); ok {
  1324  				task.Logf("gadget connect will be retried: %s", retry.Reason)
  1325  				return err // will retry
  1326  			}
  1327  			return fmt.Errorf("gadget connect conflict check failed: %s", err)
  1328  		}
  1329  		newconns = append(newconns, connRef)
  1330  	}
  1331  
  1332  	// Create connect tasks and interface hooks
  1333  	for _, conn := range newconns {
  1334  		ts, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, connectOpts{AutoConnect: true, ByGadget: true})
  1335  		if err != nil {
  1336  			return fmt.Errorf("internal error: connect of %q failed: %s", conn, err)
  1337  		}
  1338  		gconnts.AddAll(ts)
  1339  	}
  1340  
  1341  	if len(gconnts.Tasks()) > 0 {
  1342  		snapstate.InjectTasks(task, gconnts)
  1343  
  1344  		st.EnsureBefore(0)
  1345  	}
  1346  
  1347  	task.SetStatus(state.DoneStatus)
  1348  	return nil
  1349  }
  1350  
  1351  // doHotplugConnect creates task(s) to (re)create old connections or auto-connect viable slots in response to hotplug "add" event.
  1352  func (m *InterfaceManager) doHotplugConnect(task *state.Task, _ *tomb.Tomb) error {
  1353  	st := task.State()
  1354  	st.Lock()
  1355  	defer st.Unlock()
  1356  
  1357  	deviceCtx, err := snapstate.DeviceCtx(st, task, nil)
  1358  	if err != nil {
  1359  		return err
  1360  	}
  1361  
  1362  	conns, err := getConns(st)
  1363  	if err != nil {
  1364  		return err
  1365  	}
  1366  
  1367  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1368  	if err != nil {
  1369  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1370  	}
  1371  
  1372  	slot, err := m.repo.SlotForHotplugKey(ifaceName, hotplugKey)
  1373  	if err != nil {
  1374  		return err
  1375  	}
  1376  	if slot == nil {
  1377  		return fmt.Errorf("cannot find hotplug slot for interface %s and hotplug key %q", ifaceName, hotplugKey)
  1378  	}
  1379  
  1380  	// find old connections for slots of this device - note we can't ask the repository since we need
  1381  	// to recreate old connections that are only remembered in the state.
  1382  	connsForDevice := findConnsForHotplugKey(conns, ifaceName, hotplugKey)
  1383  
  1384  	// find old connections to recreate
  1385  	var recreate []*interfaces.ConnRef
  1386  	for _, id := range connsForDevice {
  1387  		conn := conns[id]
  1388  		// device was not unplugged, this is the case if snapd is restarted and we enumerate devices.
  1389  		// note, the situation where device was not unplugged but has changed is handled
  1390  		// by hotlugDeviceAdded handler - updateDevice.
  1391  		if !conn.HotplugGone || conn.Undesired {
  1392  			continue
  1393  		}
  1394  
  1395  		// the device was unplugged while connected, so it had disconnect hooks run; recreate the connection
  1396  		connRef, err := interfaces.ParseConnRef(id)
  1397  		if err != nil {
  1398  			return err
  1399  		}
  1400  
  1401  		if err := checkAutoconnectConflicts(st, task, connRef.PlugRef.Snap, connRef.SlotRef.Snap); err != nil {
  1402  			if retry, ok := err.(*state.Retry); ok {
  1403  				task.Logf("hotplug connect will be retried: %s", retry.Reason)
  1404  				return err // will retry
  1405  			}
  1406  			return fmt.Errorf("hotplug connect conflict check failed: %s", err)
  1407  		}
  1408  		recreate = append(recreate, connRef)
  1409  	}
  1410  
  1411  	// find new auto-connections
  1412  	autochecker, err := newAutoConnectChecker(st, deviceCtx)
  1413  	if err != nil {
  1414  		return err
  1415  	}
  1416  
  1417  	instanceName := slot.Snap.InstanceName()
  1418  	candidates := m.repo.AutoConnectCandidatePlugs(instanceName, slot.Name, autochecker.check)
  1419  
  1420  	var newconns []*interfaces.ConnRef
  1421  	// Auto-connect the slots
  1422  	for _, plug := range candidates {
  1423  		// make sure slot is the only viable
  1424  		// connection for plug, same check as if we were
  1425  		// considering auto-connections from plug
  1426  		candSlots := m.repo.AutoConnectCandidateSlots(plug.Snap.InstanceName(), plug.Name, autochecker.check)
  1427  		if len(candSlots) != 1 || candSlots[0].String() != slot.String() {
  1428  			crefs := make([]string, len(candSlots))
  1429  			for i, candidate := range candSlots {
  1430  				crefs[i] = candidate.String()
  1431  			}
  1432  			task.Logf("cannot auto-connect slot %s to %s, candidates found: %s", slot, plug, strings.Join(crefs, ", "))
  1433  			continue
  1434  		}
  1435  
  1436  		connRef := interfaces.NewConnRef(plug, slot)
  1437  		key := connRef.ID()
  1438  		if _, ok := conns[key]; ok {
  1439  			// existing connection, already considered by connsForDevice loop
  1440  			continue
  1441  		}
  1442  
  1443  		if err := checkAutoconnectConflicts(st, task, plug.Snap.InstanceName(), slot.Snap.InstanceName()); err != nil {
  1444  			if retry, ok := err.(*state.Retry); ok {
  1445  				task.Logf("hotplug connect will be retried: %s", retry.Reason)
  1446  				return err // will retry
  1447  			}
  1448  			return fmt.Errorf("hotplug connect conflict check failed: %s", err)
  1449  		}
  1450  		newconns = append(newconns, connRef)
  1451  	}
  1452  
  1453  	if len(recreate) == 0 && len(newconns) == 0 {
  1454  		return nil
  1455  	}
  1456  
  1457  	// Create connect tasks and interface hooks for old connections
  1458  	connectTs := state.NewTaskSet()
  1459  	for _, conn := range recreate {
  1460  		wasAutoconnected := conns[conn.ID()].Auto
  1461  		ts, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, connectOpts{AutoConnect: wasAutoconnected})
  1462  		if err != nil {
  1463  			return fmt.Errorf("internal error: connect of %q failed: %s", conn, err)
  1464  		}
  1465  		connectTs.AddAll(ts)
  1466  	}
  1467  	// Create connect tasks and interface hooks for new auto-connections
  1468  	for _, conn := range newconns {
  1469  		ts, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, connectOpts{AutoConnect: true})
  1470  		if err != nil {
  1471  			return fmt.Errorf("internal error: auto-connect of %q failed: %s", conn, err)
  1472  		}
  1473  		connectTs.AddAll(ts)
  1474  	}
  1475  
  1476  	if len(connectTs.Tasks()) > 0 {
  1477  		snapstate.InjectTasks(task, connectTs)
  1478  		st.EnsureBefore(0)
  1479  	}
  1480  
  1481  	// make sure that we add tasks and mark this task done in the same atomic write, otherwise there is a risk of re-adding tasks again
  1482  	task.SetStatus(state.DoneStatus)
  1483  
  1484  	return nil
  1485  }
  1486  
  1487  // doHotplugUpdateSlot updates static attributes of a hotplug slot for given device.
  1488  func (m *InterfaceManager) doHotplugUpdateSlot(task *state.Task, _ *tomb.Tomb) error {
  1489  	st := task.State()
  1490  	st.Lock()
  1491  	defer st.Unlock()
  1492  
  1493  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1494  	if err != nil {
  1495  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1496  	}
  1497  
  1498  	var attrs map[string]interface{}
  1499  	if err := task.Get("slot-attrs", &attrs); err != nil {
  1500  		return fmt.Errorf("internal error: cannot get slot-attrs attribute for device %s, interface %s: %s", hotplugKey, ifaceName, err)
  1501  	}
  1502  
  1503  	stateSlots, err := getHotplugSlots(st)
  1504  	if err != nil {
  1505  		return fmt.Errorf("internal error: cannot obtain hotplug slots: %v", err)
  1506  	}
  1507  
  1508  	slot, err := m.repo.UpdateHotplugSlotAttrs(ifaceName, hotplugKey, attrs)
  1509  	if err != nil {
  1510  		return err
  1511  	}
  1512  
  1513  	if slotSpec, ok := stateSlots[slot.Name]; ok {
  1514  		slotSpec.StaticAttrs = attrs
  1515  		stateSlots[slot.Name] = slotSpec
  1516  		setHotplugSlots(st, stateSlots)
  1517  	} else {
  1518  		return fmt.Errorf("internal error: cannot find slot %s for device %q", slot.Name, hotplugKey)
  1519  	}
  1520  
  1521  	return nil
  1522  }
  1523  
  1524  // doHotplugRemoveSlot removes hotplug slot for given device from the repository in response to udev "remove" event.
  1525  // This task must necessarily be run after all affected slot gets disconnected in the repo.
  1526  func (m *InterfaceManager) doHotplugRemoveSlot(task *state.Task, _ *tomb.Tomb) error {
  1527  	st := task.State()
  1528  	st.Lock()
  1529  	defer st.Unlock()
  1530  
  1531  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1532  	if err != nil {
  1533  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1534  	}
  1535  
  1536  	slot, err := m.repo.SlotForHotplugKey(ifaceName, hotplugKey)
  1537  	if err != nil {
  1538  		return fmt.Errorf("internal error: cannot determine slots: %v", err)
  1539  	}
  1540  	if slot != nil {
  1541  		if err := m.repo.RemoveSlot(slot.Snap.InstanceName(), slot.Name); err != nil {
  1542  			return fmt.Errorf("cannot remove hotplug slot: %v", err)
  1543  		}
  1544  	}
  1545  
  1546  	stateSlots, err := getHotplugSlots(st)
  1547  	if err != nil {
  1548  		return fmt.Errorf("internal error: cannot obtain hotplug slots: %v", err)
  1549  	}
  1550  
  1551  	// remove the slot from hotplug-slots in the state as long as there are no connections referencing it,
  1552  	// including connection with hotplug-gone=true.
  1553  	slotDef := findHotplugSlot(stateSlots, ifaceName, hotplugKey)
  1554  	if slotDef == nil {
  1555  		return fmt.Errorf("internal error: cannot find hotplug slot for interface %s, hotplug key %q", ifaceName, hotplugKey)
  1556  	}
  1557  	conns, err := getConns(st)
  1558  	if err != nil {
  1559  		return err
  1560  	}
  1561  	for _, conn := range conns {
  1562  		if conn.Interface == slotDef.Interface && conn.HotplugKey == slotDef.HotplugKey {
  1563  			// there is a connection referencing this slot, do not remove it, only mark as "gone"
  1564  			slotDef.HotplugGone = true
  1565  			stateSlots[slotDef.Name] = slotDef
  1566  			setHotplugSlots(st, stateSlots)
  1567  			return nil
  1568  		}
  1569  	}
  1570  	delete(stateSlots, slotDef.Name)
  1571  	setHotplugSlots(st, stateSlots)
  1572  
  1573  	return nil
  1574  }
  1575  
  1576  // doHotplugDisconnect creates task(s) to disconnect connections and remove slots in response to hotplug "remove" event.
  1577  func (m *InterfaceManager) doHotplugDisconnect(task *state.Task, _ *tomb.Tomb) error {
  1578  	st := task.State()
  1579  	st.Lock()
  1580  	defer st.Unlock()
  1581  
  1582  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1583  	if err != nil {
  1584  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1585  	}
  1586  
  1587  	connections, err := m.repo.ConnectionsForHotplugKey(ifaceName, hotplugKey)
  1588  	if err != nil {
  1589  		return err
  1590  	}
  1591  	if len(connections) == 0 {
  1592  		return nil
  1593  	}
  1594  
  1595  	// check for conflicts on all connections first before creating disconnect hooks
  1596  	for _, connRef := range connections {
  1597  		if err := checkHotplugDisconnectConflicts(st, connRef.PlugRef.Snap, connRef.SlotRef.Snap); err != nil {
  1598  			if retry, ok := err.(*state.Retry); ok {
  1599  				task.Logf("Waiting for conflicting change in progress: %s", retry.Reason)
  1600  				return err // will retry
  1601  			}
  1602  			return fmt.Errorf("cannot check conflicts when disconnecting interfaces: %s", err)
  1603  		}
  1604  	}
  1605  
  1606  	dts := state.NewTaskSet()
  1607  	for _, connRef := range connections {
  1608  		conn, err := m.repo.Connection(connRef)
  1609  		if err != nil {
  1610  			// this should never happen since we get all connections from the repo
  1611  			return fmt.Errorf("internal error: cannot get connection %q: %s", connRef, err)
  1612  		}
  1613  		// "by-hotplug" flag indicates it's a disconnect triggered as part of hotplug removal.
  1614  		ts, err := disconnectTasks(st, conn, disconnectOpts{ByHotplug: true})
  1615  		if err != nil {
  1616  			return fmt.Errorf("internal error: cannot create disconnect tasks: %s", err)
  1617  		}
  1618  		dts.AddAll(ts)
  1619  	}
  1620  
  1621  	snapstate.InjectTasks(task, dts)
  1622  	st.EnsureBefore(0)
  1623  
  1624  	// make sure that we add tasks and mark this task done in the same atomic write, otherwise there is a risk of re-adding tasks again
  1625  	task.SetStatus(state.DoneStatus)
  1626  
  1627  	return nil
  1628  }
  1629  
  1630  func (m *InterfaceManager) doHotplugAddSlot(task *state.Task, _ *tomb.Tomb) error {
  1631  	st := task.State()
  1632  	st.Lock()
  1633  	defer st.Unlock()
  1634  
  1635  	systemSnap, err := systemSnapInfo(st)
  1636  	if err != nil {
  1637  		return fmt.Errorf("system snap not available")
  1638  	}
  1639  
  1640  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1641  	if err != nil {
  1642  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1643  	}
  1644  
  1645  	var proposedSlot hotplug.ProposedSlot
  1646  	if err := task.Get("proposed-slot", &proposedSlot); err != nil {
  1647  		return fmt.Errorf("internal error: cannot get proposed hotplug slot from task attributes: %s", err)
  1648  	}
  1649  	var devinfo hotplug.HotplugDeviceInfo
  1650  	if err := task.Get("device-info", &devinfo); err != nil {
  1651  		return fmt.Errorf("internal error: cannot get hotplug device info from task attributes: %s", err)
  1652  	}
  1653  
  1654  	stateSlots, err := getHotplugSlots(st)
  1655  	if err != nil {
  1656  		return fmt.Errorf("internal error obtaining hotplug slots: %v", err.Error())
  1657  	}
  1658  
  1659  	iface := m.repo.Interface(ifaceName)
  1660  	if iface == nil {
  1661  		return fmt.Errorf("internal error: cannot find interface %s", ifaceName)
  1662  	}
  1663  
  1664  	slot := findHotplugSlot(stateSlots, ifaceName, hotplugKey)
  1665  
  1666  	// if we know this slot already, restore / update it.
  1667  	if slot != nil {
  1668  		if slot.HotplugGone {
  1669  			// hotplugGone means the device was unplugged, so its disconnect hooks were run and can now
  1670  			// simply recreate the slot with potentially new attributes, and old connections will be re-created
  1671  			newSlot := &snap.SlotInfo{
  1672  				Name:       slot.Name,
  1673  				Label:      proposedSlot.Label,
  1674  				Snap:       systemSnap,
  1675  				Interface:  ifaceName,
  1676  				Attrs:      proposedSlot.Attrs,
  1677  				HotplugKey: hotplugKey,
  1678  			}
  1679  			return addHotplugSlot(st, m.repo, stateSlots, iface, newSlot)
  1680  		}
  1681  
  1682  		// else - not gone, restored already by reloadConnections, but may need updating.
  1683  		if !reflect.DeepEqual(proposedSlot.Attrs, slot.StaticAttrs) {
  1684  			ts := updateDevice(st, iface.Name(), hotplugKey, proposedSlot.Attrs)
  1685  			snapstate.InjectTasks(task, ts)
  1686  			st.EnsureBefore(0)
  1687  			task.SetStatus(state.DoneStatus)
  1688  		} // else - nothing to do
  1689  		return nil
  1690  	}
  1691  
  1692  	// New slot.
  1693  	slotName := hotplugSlotName(hotplugKey, systemSnap.InstanceName(), proposedSlot.Name, iface.Name(), &devinfo, m.repo, stateSlots)
  1694  	newSlot := &snap.SlotInfo{
  1695  		Name:       slotName,
  1696  		Label:      proposedSlot.Label,
  1697  		Snap:       systemSnap,
  1698  		Interface:  iface.Name(),
  1699  		Attrs:      proposedSlot.Attrs,
  1700  		HotplugKey: hotplugKey,
  1701  	}
  1702  	return addHotplugSlot(st, m.repo, stateSlots, iface, newSlot)
  1703  }
  1704  
  1705  // doHotplugSeqWait returns Retry error if there is another change for same hotplug key and a lower sequence number.
  1706  // Sequence numbers control the order of execution of hotplug-related changes, which would otherwise be executed in
  1707  // arbitrary order by task runner, leading to unexpected results if multiple events for same device are in flight
  1708  // (e.g. plugging, followed by immediate unplugging, or snapd restart with pending hotplug changes).
  1709  // The handler expects "hotplug-key" and "hotplug-seq" values set on own and other hotplug-related changes.
  1710  func (m *InterfaceManager) doHotplugSeqWait(task *state.Task, _ *tomb.Tomb) error {
  1711  	st := task.State()
  1712  	st.Lock()
  1713  	defer st.Unlock()
  1714  
  1715  	chg := task.Change()
  1716  	if chg == nil || !isHotplugChange(chg) {
  1717  		return fmt.Errorf("internal error: task %q not in a hotplug change", task.Kind())
  1718  	}
  1719  
  1720  	seq, hotplugKey, err := getHotplugChangeAttrs(chg)
  1721  	if err != nil {
  1722  		return err
  1723  	}
  1724  
  1725  	for _, otherChg := range st.Changes() {
  1726  		if otherChg.Status().Ready() || otherChg.ID() == chg.ID() {
  1727  			continue
  1728  		}
  1729  
  1730  		// only inspect hotplug changes
  1731  		if !isHotplugChange(otherChg) {
  1732  			continue
  1733  		}
  1734  
  1735  		otherSeq, otherKey, err := getHotplugChangeAttrs(otherChg)
  1736  		if err != nil {
  1737  			return err
  1738  		}
  1739  
  1740  		// conflict with retry if there another change affecting same device and has lower sequence number
  1741  		if hotplugKey == otherKey && otherSeq < seq {
  1742  			task.Logf("Waiting processing of earlier hotplug event change %q affecting device with hotplug key %q", otherChg.Kind(), hotplugKey)
  1743  			// TODO: consider introducing a new task that runs last and does EnsureBefore(0) for hotplug changes
  1744  			return &state.Retry{After: hotplugRetryTimeout}
  1745  		}
  1746  	}
  1747  
  1748  	// no conflicting change for same hotplug key found
  1749  	return nil
  1750  }