gopkg.in/ubuntu-core/snappy.v0@v0.0.0-20210902073436-25a8614f10a6/overlord/ifacestate/handlers.go (about)

     1  // -*- Mode: Go; indent-tabs-mode: t -*-
     2  
     3  /*
     4   * Copyright (C) 2016 Canonical Ltd
     5   *
     6   * This program is free software: you can redistribute it and/or modify
     7   * it under the terms of the GNU General Public License version 3 as
     8   * published by the Free Software Foundation.
     9   *
    10   * This program is distributed in the hope that it will be useful,
    11   * but WITHOUT ANY WARRANTY; without even the implied warranty of
    12   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13   * GNU General Public License for more details.
    14   *
    15   * You should have received a copy of the GNU General Public License
    16   * along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17   *
    18   */
    19  
    20  package ifacestate
    21  
    22  import (
    23  	"fmt"
    24  	"reflect"
    25  	"sort"
    26  	"strings"
    27  	"time"
    28  
    29  	"gopkg.in/tomb.v2"
    30  
    31  	"github.com/snapcore/snapd/i18n"
    32  	"github.com/snapcore/snapd/interfaces"
    33  	"github.com/snapcore/snapd/interfaces/hotplug"
    34  	"github.com/snapcore/snapd/logger"
    35  	"github.com/snapcore/snapd/overlord/hookstate"
    36  	"github.com/snapcore/snapd/overlord/snapstate"
    37  	"github.com/snapcore/snapd/overlord/state"
    38  	"github.com/snapcore/snapd/snap"
    39  	"github.com/snapcore/snapd/timings"
    40  )
    41  
    42  // confinementOptions returns interfaces.ConfinementOptions from snapstate.Flags.
    43  func confinementOptions(flags snapstate.Flags) interfaces.ConfinementOptions {
    44  	return interfaces.ConfinementOptions{
    45  		DevMode:  flags.DevMode,
    46  		JailMode: flags.JailMode,
    47  		Classic:  flags.Classic,
    48  	}
    49  }
    50  
    51  func (m *InterfaceManager) setupAffectedSnaps(task *state.Task, affectingSnap string, affectedSnaps []string, tm timings.Measurer) error {
    52  	st := task.State()
    53  
    54  	// Setup security of the affected snaps.
    55  	for _, affectedInstanceName := range affectedSnaps {
    56  		// the snap that triggered the change needs to be skipped
    57  		if affectedInstanceName == affectingSnap {
    58  			continue
    59  		}
    60  		var snapst snapstate.SnapState
    61  		if err := snapstate.Get(st, affectedInstanceName, &snapst); err != nil {
    62  			task.Errorf("skipping security profiles setup for snap %q when handling snap %q: %v", affectedInstanceName, affectingSnap, err)
    63  			continue
    64  		}
    65  		affectedSnapInfo, err := snapst.CurrentInfo()
    66  		if err != nil {
    67  			return err
    68  		}
    69  		if err := addImplicitSlots(st, affectedSnapInfo); err != nil {
    70  			return err
    71  		}
    72  		opts := confinementOptions(snapst.Flags)
    73  		if err := m.setupSnapSecurity(task, affectedSnapInfo, opts, tm); err != nil {
    74  			return err
    75  		}
    76  	}
    77  	return nil
    78  }
    79  
    80  func (m *InterfaceManager) doSetupProfiles(task *state.Task, tomb *tomb.Tomb) error {
    81  	task.State().Lock()
    82  	defer task.State().Unlock()
    83  
    84  	perfTimings := state.TimingsForTask(task)
    85  	defer perfTimings.Save(task.State())
    86  
    87  	// Get snap.Info from bits handed by the snap manager.
    88  	snapsup, err := snapstate.TaskSnapSetup(task)
    89  	if err != nil {
    90  		return err
    91  	}
    92  
    93  	snapInfo, err := snap.ReadInfo(snapsup.InstanceName(), snapsup.SideInfo)
    94  	if err != nil {
    95  		return err
    96  	}
    97  
    98  	if len(snapInfo.BadInterfaces) > 0 {
    99  		task.State().Warnf("%s", snap.BadInterfacesSummary(snapInfo))
   100  	}
   101  
   102  	// We no longer do/need core-phase-2, see
   103  	//   https://github.com/snapcore/snapd/pull/5301
   104  	// This code is just here to deal with old state that may still
   105  	// have the 2nd setup-profiles with this flag set.
   106  	var corePhase2 bool
   107  	if err := task.Get("core-phase-2", &corePhase2); err != nil && err != state.ErrNoState {
   108  		return err
   109  	}
   110  	if corePhase2 {
   111  		// nothing to do
   112  		return nil
   113  	}
   114  
   115  	opts := confinementOptions(snapsup.Flags)
   116  	return m.setupProfilesForSnap(task, tomb, snapInfo, opts, perfTimings)
   117  }
   118  
   119  func (m *InterfaceManager) setupProfilesForSnap(task *state.Task, _ *tomb.Tomb, snapInfo *snap.Info, opts interfaces.ConfinementOptions, tm timings.Measurer) error {
   120  	st := task.State()
   121  
   122  	if err := addImplicitSlots(task.State(), snapInfo); err != nil {
   123  		return err
   124  	}
   125  
   126  	snapName := snapInfo.InstanceName()
   127  
   128  	// The snap may have been updated so perform the following operation to
   129  	// ensure that we are always working on the correct state:
   130  	//
   131  	// - disconnect all connections to/from the given snap
   132  	//   - remembering the snaps that were affected by this operation
   133  	// - remove the (old) snap from the interfaces repository
   134  	// - add the (new) snap to the interfaces repository
   135  	// - restore connections based on what is kept in the state
   136  	//   - if a connection cannot be restored then remove it from the state
   137  	// - setup the security of all the affected snaps
   138  	disconnectedSnaps, err := m.repo.DisconnectSnap(snapName)
   139  	if err != nil {
   140  		return err
   141  	}
   142  	// XXX: what about snap renames? We should remove the old name (or switch
   143  	// to IDs in the interfaces repository)
   144  	if err := m.repo.RemoveSnap(snapName); err != nil {
   145  		return err
   146  	}
   147  	if err := m.repo.AddSnap(snapInfo); err != nil {
   148  		return err
   149  	}
   150  	if len(snapInfo.BadInterfaces) > 0 {
   151  		task.Logf("%s", snap.BadInterfacesSummary(snapInfo))
   152  	}
   153  
   154  	// Reload the connections and compute the set of affected snaps. The set
   155  	// affectedSet set contains name of all the affected snap instances.  The
   156  	// arrays affectedNames and affectedSnaps contain, arrays of snap names and
   157  	// snapInfo's, respectively. The arrays are sorted by name with the special
   158  	// exception that the snap being setup is always first. The affectedSnaps
   159  	// array may be shorter than the set of affected snaps in case any of the
   160  	// snaps cannot be found in the state.
   161  	reconnectedSnaps, err := m.reloadConnections(snapName)
   162  	if err != nil {
   163  		return err
   164  	}
   165  	affectedSet := make(map[string]bool)
   166  	for _, name := range disconnectedSnaps {
   167  		affectedSet[name] = true
   168  	}
   169  	for _, name := range reconnectedSnaps {
   170  		affectedSet[name] = true
   171  	}
   172  
   173  	// Sort the set of affected names, ensuring that the snap being setup
   174  	// is first regardless of the name it has.
   175  	affectedNames := make([]string, 0, len(affectedSet))
   176  	for name := range affectedSet {
   177  		if name != snapName {
   178  			affectedNames = append(affectedNames, name)
   179  		}
   180  	}
   181  	sort.Strings(affectedNames)
   182  	affectedNames = append([]string{snapName}, affectedNames...)
   183  
   184  	// Obtain snap.Info for each affected snap, skipping those that cannot be
   185  	// found and compute the confinement options that apply to it.
   186  	affectedSnaps := make([]*snap.Info, 0, len(affectedSet))
   187  	confinementOpts := make([]interfaces.ConfinementOptions, 0, len(affectedSet))
   188  	// For the snap being setup we know exactly what was requested.
   189  	affectedSnaps = append(affectedSnaps, snapInfo)
   190  	confinementOpts = append(confinementOpts, opts)
   191  	// For remaining snaps we need to interrogate the state.
   192  	for _, name := range affectedNames[1:] {
   193  		var snapst snapstate.SnapState
   194  		if err := snapstate.Get(st, name, &snapst); err != nil {
   195  			task.Errorf("cannot obtain state of snap %s: %s", name, err)
   196  			continue
   197  		}
   198  		snapInfo, err := snapst.CurrentInfo()
   199  		if err != nil {
   200  			return err
   201  		}
   202  		if err := addImplicitSlots(st, snapInfo); err != nil {
   203  			return err
   204  		}
   205  		affectedSnaps = append(affectedSnaps, snapInfo)
   206  		confinementOpts = append(confinementOpts, confinementOptions(snapst.Flags))
   207  	}
   208  
   209  	return m.setupSecurityByBackend(task, affectedSnaps, confinementOpts, tm)
   210  }
   211  
   212  func (m *InterfaceManager) doRemoveProfiles(task *state.Task, tomb *tomb.Tomb) error {
   213  	st := task.State()
   214  	st.Lock()
   215  	defer st.Unlock()
   216  
   217  	perfTimings := state.TimingsForTask(task)
   218  	defer perfTimings.Save(st)
   219  
   220  	// Get SnapSetup for this snap. This is gives us the name of the snap.
   221  	snapSetup, err := snapstate.TaskSnapSetup(task)
   222  	if err != nil {
   223  		return err
   224  	}
   225  	snapName := snapSetup.InstanceName()
   226  
   227  	return m.removeProfilesForSnap(task, tomb, snapName, perfTimings)
   228  }
   229  
   230  func (m *InterfaceManager) removeProfilesForSnap(task *state.Task, _ *tomb.Tomb, snapName string, tm timings.Measurer) error {
   231  	// Disconnect the snap entirely.
   232  	// This is required to remove the snap from the interface repository.
   233  	// The returned list of affected snaps will need to have its security setup
   234  	// to reflect the change.
   235  	affectedSnaps, err := m.repo.DisconnectSnap(snapName)
   236  	if err != nil {
   237  		return err
   238  	}
   239  	if err := m.setupAffectedSnaps(task, snapName, affectedSnaps, tm); err != nil {
   240  		return err
   241  	}
   242  
   243  	// Remove the snap from the interface repository.
   244  	// This discards all the plugs and slots belonging to that snap.
   245  	if err := m.repo.RemoveSnap(snapName); err != nil {
   246  		return err
   247  	}
   248  
   249  	// Remove security artefacts of the snap.
   250  	if err := m.removeSnapSecurity(task, snapName); err != nil {
   251  		return err
   252  	}
   253  
   254  	return nil
   255  }
   256  
   257  func (m *InterfaceManager) undoSetupProfiles(task *state.Task, tomb *tomb.Tomb) error {
   258  	st := task.State()
   259  	st.Lock()
   260  	defer st.Unlock()
   261  
   262  	perfTimings := state.TimingsForTask(task)
   263  	defer perfTimings.Save(st)
   264  
   265  	var corePhase2 bool
   266  	if err := task.Get("core-phase-2", &corePhase2); err != nil && err != state.ErrNoState {
   267  		return err
   268  	}
   269  	if corePhase2 {
   270  		// let the first setup-profiles deal with this
   271  		return nil
   272  	}
   273  
   274  	snapsup, err := snapstate.TaskSnapSetup(task)
   275  	if err != nil {
   276  		return err
   277  	}
   278  	snapName := snapsup.InstanceName()
   279  
   280  	// Get the name from SnapSetup and use it to find the current SideInfo
   281  	// about the snap, if there is one.
   282  	var snapst snapstate.SnapState
   283  	err = snapstate.Get(st, snapName, &snapst)
   284  	if err != nil && err != state.ErrNoState {
   285  		return err
   286  	}
   287  	sideInfo := snapst.CurrentSideInfo()
   288  	if sideInfo == nil {
   289  		// The snap was not installed before so undo should remove security profiles.
   290  		return m.removeProfilesForSnap(task, tomb, snapName, perfTimings)
   291  	} else {
   292  		// The snap was installed before so undo should setup the old security profiles.
   293  		snapInfo, err := snap.ReadInfo(snapName, sideInfo)
   294  		if err != nil {
   295  			return err
   296  		}
   297  		opts := confinementOptions(snapst.Flags)
   298  		return m.setupProfilesForSnap(task, tomb, snapInfo, opts, perfTimings)
   299  	}
   300  }
   301  
   302  func (m *InterfaceManager) doDiscardConns(task *state.Task, _ *tomb.Tomb) error {
   303  	st := task.State()
   304  	st.Lock()
   305  	defer st.Unlock()
   306  
   307  	snapSetup, err := snapstate.TaskSnapSetup(task)
   308  	if err != nil {
   309  		return err
   310  	}
   311  
   312  	instanceName := snapSetup.InstanceName()
   313  
   314  	var snapst snapstate.SnapState
   315  	err = snapstate.Get(st, instanceName, &snapst)
   316  	if err != nil && err != state.ErrNoState {
   317  		return err
   318  	}
   319  
   320  	if err == nil && len(snapst.Sequence) != 0 {
   321  		return fmt.Errorf("cannot discard connections for snap %q while it is present", instanceName)
   322  	}
   323  	conns, err := getConns(st)
   324  	if err != nil {
   325  		return err
   326  	}
   327  	removed := make(map[string]*connState)
   328  	for id := range conns {
   329  		connRef, err := interfaces.ParseConnRef(id)
   330  		if err != nil {
   331  			return err
   332  		}
   333  		if connRef.PlugRef.Snap == instanceName || connRef.SlotRef.Snap == instanceName {
   334  			removed[id] = conns[id]
   335  			delete(conns, id)
   336  		}
   337  	}
   338  	task.Set("removed", removed)
   339  	setConns(st, conns)
   340  	return nil
   341  }
   342  
   343  func (m *InterfaceManager) undoDiscardConns(task *state.Task, _ *tomb.Tomb) error {
   344  	st := task.State()
   345  	st.Lock()
   346  	defer st.Unlock()
   347  
   348  	var removed map[string]*connState
   349  	err := task.Get("removed", &removed)
   350  	if err != nil && err != state.ErrNoState {
   351  		return err
   352  	}
   353  
   354  	conns, err := getConns(st)
   355  	if err != nil {
   356  		return err
   357  	}
   358  
   359  	for id, connState := range removed {
   360  		conns[id] = connState
   361  	}
   362  	setConns(st, conns)
   363  	task.Set("removed", nil)
   364  	return nil
   365  }
   366  
   367  func getDynamicHookAttributes(task *state.Task) (plugAttrs, slotAttrs map[string]interface{}, err error) {
   368  	if err = task.Get("plug-dynamic", &plugAttrs); err != nil && err != state.ErrNoState {
   369  		return nil, nil, err
   370  	}
   371  	if err = task.Get("slot-dynamic", &slotAttrs); err != nil && err != state.ErrNoState {
   372  		return nil, nil, err
   373  	}
   374  	if plugAttrs == nil {
   375  		plugAttrs = make(map[string]interface{})
   376  	}
   377  	if slotAttrs == nil {
   378  		slotAttrs = make(map[string]interface{})
   379  	}
   380  
   381  	return plugAttrs, slotAttrs, nil
   382  }
   383  
   384  func setDynamicHookAttributes(task *state.Task, plugAttrs, slotAttrs map[string]interface{}) {
   385  	task.Set("plug-dynamic", plugAttrs)
   386  	task.Set("slot-dynamic", slotAttrs)
   387  }
   388  
   389  func (m *InterfaceManager) doConnect(task *state.Task, _ *tomb.Tomb) error {
   390  	st := task.State()
   391  	st.Lock()
   392  	defer st.Unlock()
   393  
   394  	perfTimings := state.TimingsForTask(task)
   395  	defer perfTimings.Save(st)
   396  
   397  	plugRef, slotRef, err := getPlugAndSlotRefs(task)
   398  	if err != nil {
   399  		return err
   400  	}
   401  
   402  	var autoConnect bool
   403  	if err := task.Get("auto", &autoConnect); err != nil && err != state.ErrNoState {
   404  		return err
   405  	}
   406  	var byGadget bool
   407  	if err := task.Get("by-gadget", &byGadget); err != nil && err != state.ErrNoState {
   408  		return err
   409  	}
   410  	var delayedSetupProfiles bool
   411  	if err := task.Get("delayed-setup-profiles", &delayedSetupProfiles); err != nil && err != state.ErrNoState {
   412  		return err
   413  	}
   414  
   415  	deviceCtx, err := snapstate.DeviceCtx(st, task, nil)
   416  	if err != nil {
   417  		return err
   418  	}
   419  
   420  	conns, err := getConns(st)
   421  	if err != nil {
   422  		return err
   423  	}
   424  
   425  	connRef := &interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef}
   426  
   427  	var plugSnapst snapstate.SnapState
   428  	if err := snapstate.Get(st, plugRef.Snap, &plugSnapst); err != nil {
   429  		if autoConnect && err == state.ErrNoState {
   430  			// conflict logic should prevent this
   431  			return fmt.Errorf("internal error: snap %q is no longer available for auto-connecting", plugRef.Snap)
   432  		}
   433  		return err
   434  	}
   435  
   436  	var slotSnapst snapstate.SnapState
   437  	if err := snapstate.Get(st, slotRef.Snap, &slotSnapst); err != nil {
   438  		if autoConnect && err == state.ErrNoState {
   439  			// conflict logic should prevent this
   440  			return fmt.Errorf("internal error: snap %q is no longer available for auto-connecting", slotRef.Snap)
   441  		}
   442  		return err
   443  	}
   444  
   445  	plug := m.repo.Plug(connRef.PlugRef.Snap, connRef.PlugRef.Name)
   446  	if plug == nil {
   447  		// conflict logic should prevent this
   448  		return fmt.Errorf("snap %q has no %q plug", connRef.PlugRef.Snap, connRef.PlugRef.Name)
   449  	}
   450  
   451  	slot := m.repo.Slot(connRef.SlotRef.Snap, connRef.SlotRef.Name)
   452  	if slot == nil {
   453  		// conflict logic should prevent this
   454  		return fmt.Errorf("snap %q has no %q slot", connRef.SlotRef.Snap, connRef.SlotRef.Name)
   455  	}
   456  
   457  	// attributes are always present, even if there are no hooks (they're initialized by Connect).
   458  	plugDynamicAttrs, slotDynamicAttrs, err := getDynamicHookAttributes(task)
   459  	if err != nil {
   460  		return fmt.Errorf("failed to get hook attributes: %s", err)
   461  	}
   462  
   463  	var policyChecker interfaces.PolicyFunc
   464  
   465  	// manual connections and connections by the gadget obey the
   466  	// policy "connection" rules, other auto-connections obey the
   467  	// "auto-connection" rules
   468  	if autoConnect && !byGadget {
   469  		autochecker, err := newAutoConnectChecker(st, task, m.repo, deviceCtx)
   470  		if err != nil {
   471  			return err
   472  		}
   473  		policyChecker = func(plug *interfaces.ConnectedPlug, slot *interfaces.ConnectedSlot) (bool, error) {
   474  			ok, _, err := autochecker.check(plug, slot)
   475  			return ok, err
   476  		}
   477  	} else {
   478  		policyCheck, err := newConnectChecker(st, deviceCtx)
   479  		if err != nil {
   480  			return err
   481  		}
   482  		policyChecker = policyCheck.check
   483  	}
   484  
   485  	// static attributes of the plug and slot not provided, the ones from snap infos will be used
   486  	conn, err := m.repo.Connect(connRef, nil, plugDynamicAttrs, nil, slotDynamicAttrs, policyChecker)
   487  	if err != nil || conn == nil {
   488  		return err
   489  	}
   490  
   491  	if !delayedSetupProfiles {
   492  		slotOpts := confinementOptions(slotSnapst.Flags)
   493  		if err := m.setupSnapSecurity(task, slot.Snap, slotOpts, perfTimings); err != nil {
   494  			return err
   495  		}
   496  
   497  		plugOpts := confinementOptions(plugSnapst.Flags)
   498  		if err := m.setupSnapSecurity(task, plug.Snap, plugOpts, perfTimings); err != nil {
   499  			return err
   500  		}
   501  	} else {
   502  		logger.Debugf("Connect handler: skipping setupSnapSecurity for snaps %q and %q", plug.Snap.InstanceName(), slot.Snap.InstanceName())
   503  	}
   504  
   505  	// For undo handler. We need to remember old state of the connection only
   506  	// if undesired flag is set because that means there was a remembered
   507  	// inactive connection already and we should restore its properties
   508  	// in case of undo. Otherwise we don't have to keep old-conn because undo
   509  	// can simply delete any trace of the connection.
   510  	if old, ok := conns[connRef.ID()]; ok && old.Undesired {
   511  		task.Set("old-conn", old)
   512  	}
   513  
   514  	conns[connRef.ID()] = &connState{
   515  		Interface:        conn.Interface(),
   516  		StaticPlugAttrs:  conn.Plug.StaticAttrs(),
   517  		DynamicPlugAttrs: conn.Plug.DynamicAttrs(),
   518  		StaticSlotAttrs:  conn.Slot.StaticAttrs(),
   519  		DynamicSlotAttrs: conn.Slot.DynamicAttrs(),
   520  		Auto:             autoConnect,
   521  		ByGadget:         byGadget,
   522  		HotplugKey:       slot.HotplugKey,
   523  	}
   524  	setConns(st, conns)
   525  
   526  	// the dynamic attributes might have been updated by the interface's BeforeConnectPlug/Slot code,
   527  	// so we need to update the task for connect-plug- and connect-slot- hooks to see new values.
   528  	setDynamicHookAttributes(task, conn.Plug.DynamicAttrs(), conn.Slot.DynamicAttrs())
   529  	return nil
   530  }
   531  
   532  func (m *InterfaceManager) doDisconnect(task *state.Task, _ *tomb.Tomb) error {
   533  	st := task.State()
   534  	st.Lock()
   535  	defer st.Unlock()
   536  
   537  	perfTimings := state.TimingsForTask(task)
   538  	defer perfTimings.Save(st)
   539  
   540  	plugRef, slotRef, err := getPlugAndSlotRefs(task)
   541  	if err != nil {
   542  		return err
   543  	}
   544  
   545  	cref := interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef}
   546  
   547  	conns, err := getConns(st)
   548  	if err != nil {
   549  		return err
   550  	}
   551  
   552  	// forget flag can be passed with snap disconnect --forget
   553  	var forget bool
   554  	if err := task.Get("forget", &forget); err != nil && err != state.ErrNoState {
   555  		return fmt.Errorf("internal error: cannot read 'forget' flag: %s", err)
   556  	}
   557  
   558  	var snapStates []snapstate.SnapState
   559  	for _, instanceName := range []string{plugRef.Snap, slotRef.Snap} {
   560  		var snapst snapstate.SnapState
   561  		if err := snapstate.Get(st, instanceName, &snapst); err != nil {
   562  			if err == state.ErrNoState {
   563  				task.Logf("skipping disconnect operation for connection %s %s, snap %q doesn't exist", plugRef, slotRef, instanceName)
   564  				return nil
   565  			}
   566  			task.Errorf("skipping security profiles setup for snap %q when disconnecting %s from %s: %v", instanceName, plugRef, slotRef, err)
   567  		} else {
   568  			snapStates = append(snapStates, snapst)
   569  		}
   570  	}
   571  
   572  	conn, ok := conns[cref.ID()]
   573  	if !ok {
   574  		return fmt.Errorf("internal error: connection %q not found in state", cref.ID())
   575  	}
   576  
   577  	// store old connection for undo
   578  	task.Set("old-conn", conn)
   579  
   580  	err = m.repo.Disconnect(plugRef.Snap, plugRef.Name, slotRef.Snap, slotRef.Name)
   581  	if err != nil {
   582  		_, notConnected := err.(*interfaces.NotConnectedError)
   583  		_, noPlugOrSlot := err.(*interfaces.NoPlugOrSlotError)
   584  		// not connected, just forget it.
   585  		if forget && (notConnected || noPlugOrSlot) {
   586  			delete(conns, cref.ID())
   587  			setConns(st, conns)
   588  			return nil
   589  		}
   590  		return fmt.Errorf("snapd changed, please retry the operation: %v", err)
   591  	}
   592  
   593  	for _, snapst := range snapStates {
   594  		snapInfo, err := snapst.CurrentInfo()
   595  		if err != nil {
   596  			return err
   597  		}
   598  		opts := confinementOptions(snapst.Flags)
   599  		if err := m.setupSnapSecurity(task, snapInfo, opts, perfTimings); err != nil {
   600  			return err
   601  		}
   602  	}
   603  
   604  	// "auto-disconnect" flag indicates it's a disconnect triggered automatically as part of snap removal;
   605  	// such disconnects should not set undesired flag and instead just remove the connection.
   606  	var autoDisconnect bool
   607  	if err := task.Get("auto-disconnect", &autoDisconnect); err != nil && err != state.ErrNoState {
   608  		return fmt.Errorf("internal error: failed to read 'auto-disconnect' flag: %s", err)
   609  	}
   610  
   611  	// "by-hotplug" flag indicates it's a disconnect triggered by hotplug remove event;
   612  	// we want to keep information of the connection and just mark it as hotplug-gone.
   613  	var byHotplug bool
   614  	if err := task.Get("by-hotplug", &byHotplug); err != nil && err != state.ErrNoState {
   615  		return fmt.Errorf("internal error: cannot read 'by-hotplug' flag: %s", err)
   616  	}
   617  
   618  	switch {
   619  	case forget:
   620  		delete(conns, cref.ID())
   621  	case byHotplug:
   622  		conn.HotplugGone = true
   623  		conns[cref.ID()] = conn
   624  	case conn.Auto && !autoDisconnect:
   625  		conn.Undesired = true
   626  		conn.DynamicPlugAttrs = nil
   627  		conn.DynamicSlotAttrs = nil
   628  		conn.StaticPlugAttrs = nil
   629  		conn.StaticSlotAttrs = nil
   630  		conns[cref.ID()] = conn
   631  	default:
   632  		delete(conns, cref.ID())
   633  	}
   634  	setConns(st, conns)
   635  
   636  	return nil
   637  }
   638  
   639  func (m *InterfaceManager) undoDisconnect(task *state.Task, _ *tomb.Tomb) error {
   640  	st := task.State()
   641  	st.Lock()
   642  	defer st.Unlock()
   643  
   644  	perfTimings := state.TimingsForTask(task)
   645  	defer perfTimings.Save(st)
   646  
   647  	var oldconn connState
   648  	err := task.Get("old-conn", &oldconn)
   649  	if err == state.ErrNoState {
   650  		return nil
   651  	}
   652  	if err != nil {
   653  		return err
   654  	}
   655  
   656  	var forget bool
   657  	if err := task.Get("forget", &forget); err != nil && err != state.ErrNoState {
   658  		return fmt.Errorf("internal error: cannot read 'forget' flag: %s", err)
   659  	}
   660  
   661  	plugRef, slotRef, err := getPlugAndSlotRefs(task)
   662  	if err != nil {
   663  		return err
   664  	}
   665  
   666  	conns, err := getConns(st)
   667  	if err != nil {
   668  		return err
   669  	}
   670  
   671  	var plugSnapst snapstate.SnapState
   672  	if err := snapstate.Get(st, plugRef.Snap, &plugSnapst); err != nil {
   673  		return err
   674  	}
   675  	var slotSnapst snapstate.SnapState
   676  	if err := snapstate.Get(st, slotRef.Snap, &slotSnapst); err != nil {
   677  		return err
   678  	}
   679  
   680  	connRef := &interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef}
   681  
   682  	plug := m.repo.Plug(connRef.PlugRef.Snap, connRef.PlugRef.Name)
   683  	slot := m.repo.Slot(connRef.SlotRef.Snap, connRef.SlotRef.Name)
   684  	if forget && (plug == nil || slot == nil) {
   685  		// we were trying to forget an inactive connection that was
   686  		// referring to a non-existing plug or slot; just restore it
   687  		// in the conns state but do not reconnect via repository.
   688  		conns[connRef.ID()] = &oldconn
   689  		setConns(st, conns)
   690  		return nil
   691  	}
   692  	if plug == nil {
   693  		return fmt.Errorf("snap %q has no %q plug", connRef.PlugRef.Snap, connRef.PlugRef.Name)
   694  	}
   695  	if slot == nil {
   696  		return fmt.Errorf("snap %q has no %q slot", connRef.SlotRef.Snap, connRef.SlotRef.Name)
   697  	}
   698  
   699  	_, err = m.repo.Connect(connRef, nil, oldconn.DynamicPlugAttrs, nil, oldconn.DynamicSlotAttrs, nil)
   700  	if err != nil {
   701  		return err
   702  	}
   703  
   704  	slotOpts := confinementOptions(slotSnapst.Flags)
   705  	if err := m.setupSnapSecurity(task, slot.Snap, slotOpts, perfTimings); err != nil {
   706  		return err
   707  	}
   708  	plugOpts := confinementOptions(plugSnapst.Flags)
   709  	if err := m.setupSnapSecurity(task, plug.Snap, plugOpts, perfTimings); err != nil {
   710  		return err
   711  	}
   712  
   713  	conns[connRef.ID()] = &oldconn
   714  	setConns(st, conns)
   715  
   716  	return nil
   717  }
   718  
   719  func (m *InterfaceManager) undoConnect(task *state.Task, _ *tomb.Tomb) error {
   720  	st := task.State()
   721  	st.Lock()
   722  	defer st.Unlock()
   723  
   724  	perfTimings := state.TimingsForTask(task)
   725  	defer perfTimings.Save(st)
   726  
   727  	plugRef, slotRef, err := getPlugAndSlotRefs(task)
   728  	if err != nil {
   729  		return err
   730  	}
   731  	connRef := interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef}
   732  	conns, err := getConns(st)
   733  	if err != nil {
   734  		return err
   735  	}
   736  
   737  	var old connState
   738  	err = task.Get("old-conn", &old)
   739  	if err != nil && err != state.ErrNoState {
   740  		return err
   741  	}
   742  	if err == nil {
   743  		conns[connRef.ID()] = &old
   744  	} else {
   745  		delete(conns, connRef.ID())
   746  	}
   747  	setConns(st, conns)
   748  
   749  	if err := m.repo.Disconnect(connRef.PlugRef.Snap, connRef.PlugRef.Name, connRef.SlotRef.Snap, connRef.SlotRef.Name); err != nil {
   750  		return err
   751  	}
   752  
   753  	var delayedSetupProfiles bool
   754  	if err := task.Get("delayed-setup-profiles", &delayedSetupProfiles); err != nil && err != state.ErrNoState {
   755  		return err
   756  	}
   757  	if delayedSetupProfiles {
   758  		logger.Debugf("Connect undo handler: skipping setupSnapSecurity for snaps %q and %q", connRef.PlugRef.Snap, connRef.SlotRef.Snap)
   759  		return nil
   760  	}
   761  
   762  	plug := m.repo.Plug(connRef.PlugRef.Snap, connRef.PlugRef.Name)
   763  	if plug == nil {
   764  		return fmt.Errorf("internal error: snap %q has no %q plug", connRef.PlugRef.Snap, connRef.PlugRef.Name)
   765  	}
   766  	slot := m.repo.Slot(connRef.SlotRef.Snap, connRef.SlotRef.Name)
   767  	if slot == nil {
   768  		return fmt.Errorf("internal error: snap %q has no %q slot", connRef.SlotRef.Snap, connRef.SlotRef.Name)
   769  	}
   770  
   771  	var plugSnapst snapstate.SnapState
   772  	err = snapstate.Get(st, plugRef.Snap, &plugSnapst)
   773  	if err == state.ErrNoState {
   774  		return fmt.Errorf("internal error: snap %q is no longer available", plugRef.Snap)
   775  	}
   776  	if err != nil {
   777  		return err
   778  	}
   779  	var slotSnapst snapstate.SnapState
   780  	err = snapstate.Get(st, slotRef.Snap, &slotSnapst)
   781  	if err == state.ErrNoState {
   782  		return fmt.Errorf("internal error: snap %q is no longer available", slotRef.Snap)
   783  	}
   784  	if err != nil {
   785  		return err
   786  	}
   787  	slotOpts := confinementOptions(slotSnapst.Flags)
   788  	if err := m.setupSnapSecurity(task, slot.Snap, slotOpts, perfTimings); err != nil {
   789  		return err
   790  	}
   791  	plugOpts := confinementOptions(plugSnapst.Flags)
   792  	if err := m.setupSnapSecurity(task, plug.Snap, plugOpts, perfTimings); err != nil {
   793  		return err
   794  	}
   795  
   796  	return nil
   797  }
   798  
   799  // timeout for shared content retry
   800  var contentLinkRetryTimeout = 30 * time.Second
   801  
   802  // timeout for retrying hotplug-related tasks
   803  var hotplugRetryTimeout = 300 * time.Millisecond
   804  
   805  func obsoleteCorePhase2SetupProfiles(kind string, task *state.Task) (bool, error) {
   806  	if kind != "setup-profiles" {
   807  		return false, nil
   808  	}
   809  
   810  	var corePhase2 bool
   811  	if err := task.Get("core-phase-2", &corePhase2); err != nil && err != state.ErrNoState {
   812  		return false, err
   813  	}
   814  	return corePhase2, nil
   815  }
   816  
   817  func checkAutoconnectConflicts(st *state.State, autoconnectTask *state.Task, plugSnap, slotSnap string) error {
   818  	for _, task := range st.Tasks() {
   819  		if task.Status().Ready() {
   820  			continue
   821  		}
   822  
   823  		k := task.Kind()
   824  		if k == "connect" || k == "disconnect" {
   825  			// retry if we found another connect/disconnect affecting same snap; note we can only encounter
   826  			// connects/disconnects created by doAutoDisconnect / doAutoConnect here as manual interface ops
   827  			// are rejected by conflict check logic in snapstate.
   828  			plugRef, slotRef, err := getPlugAndSlotRefs(task)
   829  			if err != nil {
   830  				return err
   831  			}
   832  			if plugRef.Snap == plugSnap {
   833  				return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting plug snap %s, task %q", plugSnap, k)}
   834  			}
   835  			if slotRef.Snap == slotSnap {
   836  				return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting slot snap %s, task %q", slotSnap, k)}
   837  			}
   838  			continue
   839  		}
   840  
   841  		snapsup, err := snapstate.TaskSnapSetup(task)
   842  		// e.g. hook tasks don't have task snap setup
   843  		if err != nil {
   844  			continue
   845  		}
   846  
   847  		otherSnapName := snapsup.InstanceName()
   848  
   849  		// different snaps - no conflict
   850  		if otherSnapName != plugSnap && otherSnapName != slotSnap {
   851  			continue
   852  		}
   853  
   854  		// setup-profiles core-phase-2 is now no-op, we shouldn't
   855  		// conflict on it; note, old snapd would create this task even
   856  		// for regular snaps if installed with the dangerous flag.
   857  		obsoleteCorePhase2, err := obsoleteCorePhase2SetupProfiles(k, task)
   858  		if err != nil {
   859  			return err
   860  		}
   861  		if obsoleteCorePhase2 {
   862  			continue
   863  		}
   864  
   865  		// other snap that affects us because of plug or slot
   866  		if k == "unlink-snap" || k == "link-snap" || k == "setup-profiles" || k == "discard-snap" {
   867  			// discard-snap is scheduled as part of garbage collection during refresh, if multiple revsions are already installed.
   868  			// this revision check avoids conflict with own discard tasks created as part of install/refresh.
   869  			if k == "discard-snap" && autoconnectTask.Change() != nil && autoconnectTask.Change().ID() == task.Change().ID() {
   870  				continue
   871  			}
   872  			// if snap is getting removed, we will retry but the snap will be gone and auto-connect becomes no-op
   873  			// if snap is getting installed/refreshed - temporary conflict, retry later
   874  			return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting snap %s with task %q", otherSnapName, k)}
   875  		}
   876  	}
   877  	return nil
   878  }
   879  
   880  func checkDisconnectConflicts(st *state.State, disconnectingSnap, plugSnap, slotSnap string) error {
   881  	for _, task := range st.Tasks() {
   882  		if task.Status().Ready() {
   883  			continue
   884  		}
   885  
   886  		k := task.Kind()
   887  		if k == "connect" || k == "disconnect" {
   888  			// retry if we found another connect/disconnect affecting same snap; note we can only encounter
   889  			// connects/disconnects created by doAutoDisconnect / doAutoConnect here as manual interface ops
   890  			// are rejected by conflict check logic in snapstate.
   891  			plugRef, slotRef, err := getPlugAndSlotRefs(task)
   892  			if err != nil {
   893  				return err
   894  			}
   895  			if plugRef.Snap == plugSnap || slotRef.Snap == slotSnap {
   896  				return &state.Retry{After: connectRetryTimeout}
   897  			}
   898  			continue
   899  		}
   900  
   901  		snapsup, err := snapstate.TaskSnapSetup(task)
   902  		// e.g. hook tasks don't have task snap setup
   903  		if err != nil {
   904  			continue
   905  		}
   906  
   907  		otherSnapName := snapsup.InstanceName()
   908  
   909  		// different snaps - no conflict
   910  		if otherSnapName != plugSnap && otherSnapName != slotSnap {
   911  			continue
   912  		}
   913  
   914  		// another task related to same snap op (unrelated op would be blocked by snapstate conflict logic)
   915  		if otherSnapName == disconnectingSnap {
   916  			continue
   917  		}
   918  
   919  		// note, don't care about unlink-snap for the opposite end. This relies
   920  		// on the fact that auto-disconnect will create conflicting "disconnect" tasks that
   921  		// we will retry with the logic above.
   922  		if k == "link-snap" || k == "setup-profiles" {
   923  			// other snap is getting installed/refreshed - temporary conflict
   924  			return &state.Retry{After: connectRetryTimeout}
   925  		}
   926  	}
   927  	return nil
   928  }
   929  
   930  func checkHotplugDisconnectConflicts(st *state.State, plugSnap, slotSnap string) error {
   931  	for _, task := range st.Tasks() {
   932  		if task.Status().Ready() {
   933  			continue
   934  		}
   935  
   936  		k := task.Kind()
   937  		if k == "connect" || k == "disconnect" {
   938  			plugRef, slotRef, err := getPlugAndSlotRefs(task)
   939  			if err != nil {
   940  				return err
   941  			}
   942  			if plugRef.Snap == plugSnap {
   943  				return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting plug snap %s, task %q", plugSnap, k)}
   944  			}
   945  			if slotRef.Snap == slotSnap {
   946  				return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting slot snap %s, task %q", slotSnap, k)}
   947  			}
   948  			continue
   949  		}
   950  
   951  		snapsup, err := snapstate.TaskSnapSetup(task)
   952  		// e.g. hook tasks don't have task snap setup
   953  		if err != nil {
   954  			continue
   955  		}
   956  		otherSnapName := snapsup.InstanceName()
   957  
   958  		// different snaps - no conflict
   959  		if otherSnapName != plugSnap && otherSnapName != slotSnap {
   960  			continue
   961  		}
   962  
   963  		if k == "link-snap" || k == "setup-profiles" || k == "unlink-snap" {
   964  			// other snap is getting installed/refreshed/removed - temporary conflict
   965  			return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting snap %s with task %q", otherSnapName, k)}
   966  		}
   967  	}
   968  	return nil
   969  }
   970  
   971  // inSameChangeWaitChains returns true if there is a wait chain so
   972  // that `startT` is run before `searchT` in the same state.Change.
   973  func inSameChangeWaitChain(startT, searchT *state.Task) bool {
   974  	// Trivial case, tasks in different changes (they could in theory
   975  	// still have cross-change waits but we don't do these today).
   976  	// In this case, return quickly.
   977  	if startT.Change() != searchT.Change() {
   978  		return false
   979  	}
   980  	seenTasks := make(map[string]bool)
   981  	// Do a recursive check if its in the same change
   982  	return waitChainSearch(startT, searchT, seenTasks)
   983  }
   984  
   985  func waitChainSearch(startT, searchT *state.Task, seenTasks map[string]bool) bool {
   986  	if seenTasks[startT.ID()] {
   987  		return false
   988  	}
   989  	seenTasks[startT.ID()] = true
   990  	for _, cand := range startT.HaltTasks() {
   991  		if cand == searchT {
   992  			return true
   993  		}
   994  		if waitChainSearch(cand, searchT, seenTasks) {
   995  			return true
   996  		}
   997  	}
   998  
   999  	return false
  1000  }
  1001  
  1002  // batchConnectTasks creates connect tasks and interface hooks for
  1003  // conns and sets their wait chain with regard to the setupProfiles
  1004  // task.
  1005  //
  1006  // The tasks are chained so that: - prepare-plug-, prepare-slot- and
  1007  // connect tasks are all executed before setup-profiles -
  1008  // connect-plug-, connect-slot- are all executed after setup-profiles.
  1009  // The "delayed-setup-profiles" flag is set on the connect tasks to
  1010  // indicate that doConnect handler should not set security backends up
  1011  // because this will be done later by the setup-profiles task.
  1012  func batchConnectTasks(st *state.State, snapsup *snapstate.SnapSetup, conns map[string]*interfaces.ConnRef, connOpts map[string]*connectOpts) (ts *state.TaskSet, hasInterfaceHooks bool, err error) {
  1013  	setupProfiles := st.NewTask("setup-profiles", fmt.Sprintf(i18n.G("Setup snap %q (%s) security profiles for auto-connections"), snapsup.InstanceName(), snapsup.Revision()))
  1014  	setupProfiles.Set("snap-setup", snapsup)
  1015  
  1016  	ts = state.NewTaskSet()
  1017  	for connID, conn := range conns {
  1018  		var opts connectOpts
  1019  		if providedOpts := connOpts[connID]; providedOpts != nil {
  1020  			opts = *providedOpts
  1021  		} else {
  1022  			// default
  1023  			opts.AutoConnect = true
  1024  		}
  1025  		opts.DelayedSetupProfiles = true
  1026  		connectTs, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, opts)
  1027  		if err != nil {
  1028  			return nil, false, fmt.Errorf("internal error: auto-connect of %q failed: %s", conn, err)
  1029  		}
  1030  
  1031  		if len(connectTs.Tasks()) > 1 {
  1032  			hasInterfaceHooks = true
  1033  		}
  1034  
  1035  		// setup-profiles needs to wait for the main "connect" task
  1036  		connectTask, _ := connectTs.Edge(ConnectTaskEdge)
  1037  		if connectTask == nil {
  1038  			return nil, false, fmt.Errorf("internal error: no 'connect' task found for %q", conn)
  1039  		}
  1040  		setupProfiles.WaitFor(connectTask)
  1041  
  1042  		// setup-profiles must be run before the task that marks the end of connect-plug- and connect-slot- hooks
  1043  		afterConnectTask, _ := connectTs.Edge(AfterConnectHooksEdge)
  1044  		if afterConnectTask != nil {
  1045  			afterConnectTask.WaitFor(setupProfiles)
  1046  		}
  1047  		ts.AddAll(connectTs)
  1048  	}
  1049  	if len(ts.Tasks()) > 0 {
  1050  		ts.AddTask(setupProfiles)
  1051  	}
  1052  	return ts, hasInterfaceHooks, nil
  1053  }
  1054  
  1055  // firstTaskAfterBootWhenPreseeding finds the first task to be run for thisSnap
  1056  // on first boot after mark-preseeded task, this is always the install hook.
  1057  // It is an internal error if install hook for thisSnap cannot be found.
  1058  func firstTaskAfterBootWhenPreseeding(thisSnap string, markPreseeded *state.Task) (*state.Task, error) {
  1059  	if markPreseeded.Change() == nil {
  1060  		return nil, fmt.Errorf("internal error: %s task not in change", markPreseeded.Kind())
  1061  	}
  1062  	for _, ht := range markPreseeded.HaltTasks() {
  1063  		if ht.Kind() == "run-hook" {
  1064  			var hs hookstate.HookSetup
  1065  			if err := ht.Get("hook-setup", &hs); err != nil {
  1066  				return nil, fmt.Errorf("internal error: cannot get hook setup: %v", err)
  1067  			}
  1068  			if hs.Hook == "install" && hs.Snap == thisSnap {
  1069  				return ht, nil
  1070  			}
  1071  		}
  1072  	}
  1073  	return nil, fmt.Errorf("internal error: cannot find install hook for snap %q", thisSnap)
  1074  }
  1075  
  1076  func filterForSlot(slot *snap.SlotInfo) func(candSlots []*snap.SlotInfo) []*snap.SlotInfo {
  1077  	return func(candSlots []*snap.SlotInfo) []*snap.SlotInfo {
  1078  		for _, candSlot := range candSlots {
  1079  			if candSlot.String() == slot.String() {
  1080  				return []*snap.SlotInfo{slot}
  1081  			}
  1082  		}
  1083  		return nil
  1084  	}
  1085  }
  1086  
  1087  // doAutoConnect creates task(s) to connect the given snap to viable candidates.
  1088  func (m *InterfaceManager) doAutoConnect(task *state.Task, _ *tomb.Tomb) error {
  1089  	st := task.State()
  1090  	st.Lock()
  1091  	defer st.Unlock()
  1092  
  1093  	snapsup, err := snapstate.TaskSnapSetup(task)
  1094  	if err != nil {
  1095  		return err
  1096  	}
  1097  
  1098  	deviceCtx, err := snapstate.DeviceCtx(st, task, nil)
  1099  	if err != nil {
  1100  		return err
  1101  	}
  1102  
  1103  	conns, err := getConns(st)
  1104  	if err != nil {
  1105  		return err
  1106  	}
  1107  
  1108  	// The previous task (link-snap) may have triggered a restart,
  1109  	// if this is the case we can only proceed once the restart
  1110  	// has happened or we may not have all the interfaces of the
  1111  	// new core/base snap.
  1112  	if err := snapstate.FinishRestart(task, snapsup); err != nil {
  1113  		return err
  1114  	}
  1115  
  1116  	snapName := snapsup.InstanceName()
  1117  
  1118  	autochecker, err := newAutoConnectChecker(st, task, m.repo, deviceCtx)
  1119  	if err != nil {
  1120  		return err
  1121  	}
  1122  
  1123  	gadgectConnect := newGadgetConnect(st, task, m.repo, snapName, deviceCtx)
  1124  
  1125  	// wait for auto-install, started by prerequisites code, for
  1126  	// the default-providers of content ifaces so we can
  1127  	// auto-connect to them; snapstate prerequisites does a bit
  1128  	// more filtering than this so defaultProviders here can
  1129  	// contain some more snaps; should not be an issue in practice
  1130  	// given the check below checks for same chain and we don't
  1131  	// forcefully wait for defaultProviders; we just retry for
  1132  	// things in the intersection between defaultProviders here and
  1133  	// snaps with not ready link-snap|setup-profiles tasks
  1134  	defaultProviders := snap.DefaultContentProviders(m.repo.Plugs(snapName))
  1135  	for _, chg := range st.Changes() {
  1136  		if chg.Status().Ready() {
  1137  			continue
  1138  		}
  1139  		for _, t := range chg.Tasks() {
  1140  			if t.Status().Ready() {
  1141  				continue
  1142  			}
  1143  			if t.Kind() != "link-snap" && t.Kind() != "setup-profiles" {
  1144  				continue
  1145  			}
  1146  			if snapsup, err := snapstate.TaskSnapSetup(t); err == nil {
  1147  				// Only retry if the task that installs the
  1148  				// content provider is not waiting for us
  1149  				// (or this will just hang forever).
  1150  				_, ok := defaultProviders[snapsup.InstanceName()]
  1151  				if ok && !inSameChangeWaitChain(task, t) {
  1152  					return &state.Retry{After: contentLinkRetryTimeout}
  1153  				}
  1154  			}
  1155  		}
  1156  	}
  1157  
  1158  	plugs := m.repo.Plugs(snapName)
  1159  	slots := m.repo.Slots(snapName)
  1160  	newconns := make(map[string]*interfaces.ConnRef, len(plugs)+len(slots))
  1161  	var connOpts map[string]*connectOpts
  1162  
  1163  	conflictError := func(retry *state.Retry, err error) error {
  1164  		if retry != nil {
  1165  			task.Logf("Waiting for conflicting change in progress: %s", retry.Reason)
  1166  			return retry // will retry
  1167  		}
  1168  		return fmt.Errorf("auto-connect conflict check failed: %v", err)
  1169  	}
  1170  
  1171  	// Consider gadget connections, we want to remember them in
  1172  	// any case with "by-gadget" set, so they should be processed
  1173  	// before the auto-connection ones.
  1174  	if err := gadgectConnect.addGadgetConnections(newconns, conns, conflictError); err != nil {
  1175  		return err
  1176  	}
  1177  	if len(newconns) > 0 {
  1178  		connOpts = make(map[string]*connectOpts, len(newconns))
  1179  		byGadgetOpts := &connectOpts{AutoConnect: true, ByGadget: true}
  1180  		for key := range newconns {
  1181  			connOpts[key] = byGadgetOpts
  1182  		}
  1183  	}
  1184  
  1185  	// Auto-connect all the plugs
  1186  	cannotAutoConnectLog := func(plug *snap.PlugInfo, candRefs []string) string {
  1187  		return fmt.Sprintf("cannot auto-connect plug %s, candidates found: %s", plug, strings.Join(candRefs, ", "))
  1188  	}
  1189  	if err := autochecker.addAutoConnections(newconns, plugs, nil, conns, cannotAutoConnectLog, conflictError); err != nil {
  1190  		return err
  1191  	}
  1192  	// Auto-connect all the slots
  1193  	for _, slot := range slots {
  1194  		candidates := m.repo.AutoConnectCandidatePlugs(snapName, slot.Name, autochecker.check)
  1195  		if len(candidates) == 0 {
  1196  			continue
  1197  		}
  1198  
  1199  		cannotAutoConnectLog := func(plug *snap.PlugInfo, candRefs []string) string {
  1200  			return fmt.Sprintf("cannot auto-connect slot %s to plug %s, candidates found: %s", slot, plug, strings.Join(candRefs, ", "))
  1201  		}
  1202  		if err := autochecker.addAutoConnections(newconns, candidates, filterForSlot(slot), conns, cannotAutoConnectLog, conflictError); err != nil {
  1203  			return err
  1204  		}
  1205  	}
  1206  
  1207  	autots, hasInterfaceHooks, err := batchConnectTasks(st, snapsup, newconns, connOpts)
  1208  	if err != nil {
  1209  		return err
  1210  	}
  1211  
  1212  	// If interface hooks are not present then connects can be executed during
  1213  	// preseeding.
  1214  	// Otherwise we will run all connects, their hooks and setup-profiles after
  1215  	// preseeding (on first boot). Note, we may be facing multiple connections
  1216  	// here where only some have hooks; however there is no point in running
  1217  	// those without hooks before mark-preseeded, because only setup-profiles is
  1218  	// performance-critical and it still needs to run after those with hooks.
  1219  	if m.preseed && hasInterfaceHooks {
  1220  		for _, t := range st.Tasks() {
  1221  			if t.Kind() == "mark-preseeded" {
  1222  				markPreseeded := t
  1223  				// consistency check
  1224  				if markPreseeded.Status() != state.DoStatus {
  1225  					return fmt.Errorf("internal error: unexpected state of mark-preseeded task: %s", markPreseeded.Status())
  1226  				}
  1227  
  1228  				firstTaskAfterBoot, err := firstTaskAfterBootWhenPreseeding(snapsup.InstanceName(), markPreseeded)
  1229  				if err != nil {
  1230  					return err
  1231  				}
  1232  				// first task of the snap that normally runs on first boot
  1233  				// needs to wait on connects & interface hooks.
  1234  				firstTaskAfterBoot.WaitAll(autots)
  1235  
  1236  				// connect tasks and interface hooks need to wait for end of preseeding
  1237  				// (they need to run on first boot, not during preseeding).
  1238  				autots.WaitFor(markPreseeded)
  1239  				t.Change().AddAll(autots)
  1240  				task.SetStatus(state.DoneStatus)
  1241  				st.EnsureBefore(0)
  1242  				return nil
  1243  			}
  1244  		}
  1245  		return fmt.Errorf("internal error: mark-preseeded task not found in preseeding mode")
  1246  	}
  1247  
  1248  	if len(autots.Tasks()) > 0 {
  1249  		snapstate.InjectTasks(task, autots)
  1250  
  1251  		st.EnsureBefore(0)
  1252  	}
  1253  
  1254  	task.SetStatus(state.DoneStatus)
  1255  	return nil
  1256  }
  1257  
  1258  // doAutoDisconnect creates tasks for disconnecting all interfaces of a snap and running its interface hooks.
  1259  func (m *InterfaceManager) doAutoDisconnect(task *state.Task, _ *tomb.Tomb) error {
  1260  	st := task.State()
  1261  	st.Lock()
  1262  	defer st.Unlock()
  1263  
  1264  	snapsup, err := snapstate.TaskSnapSetup(task)
  1265  	if err != nil {
  1266  		return err
  1267  	}
  1268  
  1269  	snapName := snapsup.InstanceName()
  1270  	connections, err := m.repo.Connections(snapName)
  1271  	if err != nil {
  1272  		return err
  1273  	}
  1274  
  1275  	// check for conflicts on all connections first before creating disconnect hooks
  1276  	for _, connRef := range connections {
  1277  		if err := checkDisconnectConflicts(st, snapName, connRef.PlugRef.Snap, connRef.SlotRef.Snap); err != nil {
  1278  			if _, retry := err.(*state.Retry); retry {
  1279  				logger.Debugf("disconnecting interfaces of snap %q will be retried because of %q - %q conflict", snapName, connRef.PlugRef.Snap, connRef.SlotRef.Snap)
  1280  				task.Logf("Waiting for conflicting change in progress...")
  1281  				return err // will retry
  1282  			}
  1283  			return fmt.Errorf("cannot check conflicts when disconnecting interfaces: %s", err)
  1284  		}
  1285  	}
  1286  
  1287  	hookTasks := state.NewTaskSet()
  1288  	for _, connRef := range connections {
  1289  		conn, err := m.repo.Connection(connRef)
  1290  		if err != nil {
  1291  			break
  1292  		}
  1293  		// "auto-disconnect" flag indicates it's a disconnect triggered as part of snap removal, in which
  1294  		// case we want to skip the logic of marking auto-connections as 'undesired' and instead just remove
  1295  		// them so they can be automatically connected if the snap is installed again.
  1296  		ts, err := disconnectTasks(st, conn, disconnectOpts{AutoDisconnect: true})
  1297  		if err != nil {
  1298  			return err
  1299  		}
  1300  		hookTasks.AddAll(ts)
  1301  	}
  1302  
  1303  	snapstate.InjectTasks(task, hookTasks)
  1304  
  1305  	// make sure that we add tasks and mark this task done in the same atomic write, otherwise there is a risk of re-adding tasks again
  1306  	task.SetStatus(state.DoneStatus)
  1307  	return nil
  1308  }
  1309  
  1310  func (m *InterfaceManager) undoAutoConnect(task *state.Task, _ *tomb.Tomb) error {
  1311  	// TODO Introduce disconnection hooks, and run them here as well to give a chance
  1312  	// for the snap to undo whatever it did when the connection was established.
  1313  	return nil
  1314  }
  1315  
  1316  // transitionConnectionsCoreMigration will transition all connections
  1317  // from oldName to newName. Note that this is only useful when you
  1318  // know that newName supports everything that oldName supports,
  1319  // otherwise you will be in a world of pain.
  1320  func (m *InterfaceManager) transitionConnectionsCoreMigration(st *state.State, oldName, newName string) error {
  1321  	// transition over, ubuntu-core has only slots
  1322  	conns, err := getConns(st)
  1323  	if err != nil {
  1324  		return err
  1325  	}
  1326  
  1327  	for id := range conns {
  1328  		connRef, err := interfaces.ParseConnRef(id)
  1329  		if err != nil {
  1330  			return err
  1331  		}
  1332  		if connRef.SlotRef.Snap == oldName {
  1333  			connRef.SlotRef.Snap = newName
  1334  			conns[connRef.ID()] = conns[id]
  1335  			delete(conns, id)
  1336  		}
  1337  	}
  1338  	setConns(st, conns)
  1339  
  1340  	// After migrating connections in state, remove them from repo so they stay in sync and we don't
  1341  	// attempt to run disconnects on when the old core gets removed as part of the transition.
  1342  	if err := m.removeConnections(oldName); err != nil {
  1343  		return err
  1344  	}
  1345  
  1346  	// The reloadConnections() just modifies the repository object, it
  1347  	// has no effect on the running system, i.e. no security profiles
  1348  	// on disk are rewritten. This is ok because core/ubuntu-core have
  1349  	// exactly the same profiles and nothing in the generated policies
  1350  	// has the core snap-name encoded.
  1351  	if _, err := m.reloadConnections(newName); err != nil {
  1352  		return err
  1353  	}
  1354  
  1355  	return nil
  1356  }
  1357  
  1358  func (m *InterfaceManager) doTransitionUbuntuCore(t *state.Task, _ *tomb.Tomb) error {
  1359  	st := t.State()
  1360  	st.Lock()
  1361  	defer st.Unlock()
  1362  
  1363  	var oldName, newName string
  1364  	if err := t.Get("old-name", &oldName); err != nil {
  1365  		return err
  1366  	}
  1367  	if err := t.Get("new-name", &newName); err != nil {
  1368  		return err
  1369  	}
  1370  
  1371  	return m.transitionConnectionsCoreMigration(st, oldName, newName)
  1372  }
  1373  
  1374  func (m *InterfaceManager) undoTransitionUbuntuCore(t *state.Task, _ *tomb.Tomb) error {
  1375  	st := t.State()
  1376  	st.Lock()
  1377  	defer st.Unlock()
  1378  
  1379  	// symmetrical to the "do" method, just reverse them again
  1380  	var oldName, newName string
  1381  	if err := t.Get("old-name", &oldName); err != nil {
  1382  		return err
  1383  	}
  1384  	if err := t.Get("new-name", &newName); err != nil {
  1385  		return err
  1386  	}
  1387  
  1388  	return m.transitionConnectionsCoreMigration(st, newName, oldName)
  1389  }
  1390  
  1391  // doHotplugConnect creates task(s) to (re)create old connections or auto-connect viable slots in response to hotplug "add" event.
  1392  func (m *InterfaceManager) doHotplugConnect(task *state.Task, _ *tomb.Tomb) error {
  1393  	st := task.State()
  1394  	st.Lock()
  1395  	defer st.Unlock()
  1396  
  1397  	deviceCtx, err := snapstate.DeviceCtx(st, task, nil)
  1398  	if err != nil {
  1399  		return err
  1400  	}
  1401  
  1402  	conns, err := getConns(st)
  1403  	if err != nil {
  1404  		return err
  1405  	}
  1406  
  1407  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1408  	if err != nil {
  1409  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1410  	}
  1411  
  1412  	slot, err := m.repo.SlotForHotplugKey(ifaceName, hotplugKey)
  1413  	if err != nil {
  1414  		return err
  1415  	}
  1416  	if slot == nil {
  1417  		return fmt.Errorf("cannot find hotplug slot for interface %s and hotplug key %q", ifaceName, hotplugKey)
  1418  	}
  1419  
  1420  	// find old connections for slots of this device - note we can't ask the repository since we need
  1421  	// to recreate old connections that are only remembered in the state.
  1422  	connsForDevice := findConnsForHotplugKey(conns, ifaceName, hotplugKey)
  1423  
  1424  	conflictError := func(retry *state.Retry, err error) error {
  1425  		if retry != nil {
  1426  			task.Logf("hotplug connect will be retried: %s", retry.Reason)
  1427  			return retry // will retry
  1428  		}
  1429  		return fmt.Errorf("hotplug-connect conflict check failed: %v", err)
  1430  	}
  1431  
  1432  	// find old connections to recreate
  1433  	var recreate []*interfaces.ConnRef
  1434  	for _, id := range connsForDevice {
  1435  		conn := conns[id]
  1436  		// device was not unplugged, this is the case if snapd is restarted and we enumerate devices.
  1437  		// note, the situation where device was not unplugged but has changed is handled
  1438  		// by hotlugDeviceAdded handler - updateDevice.
  1439  		if !conn.HotplugGone || conn.Undesired {
  1440  			continue
  1441  		}
  1442  
  1443  		// the device was unplugged while connected, so it had disconnect hooks run; recreate the connection
  1444  		connRef, err := interfaces.ParseConnRef(id)
  1445  		if err != nil {
  1446  			return err
  1447  		}
  1448  
  1449  		if err := checkAutoconnectConflicts(st, task, connRef.PlugRef.Snap, connRef.SlotRef.Snap); err != nil {
  1450  			retry, _ := err.(*state.Retry)
  1451  			return conflictError(retry, err)
  1452  		}
  1453  		recreate = append(recreate, connRef)
  1454  	}
  1455  
  1456  	// find new auto-connections
  1457  	autochecker, err := newAutoConnectChecker(st, task, m.repo, deviceCtx)
  1458  	if err != nil {
  1459  		return err
  1460  	}
  1461  
  1462  	instanceName := slot.Snap.InstanceName()
  1463  	candidates := m.repo.AutoConnectCandidatePlugs(instanceName, slot.Name, autochecker.check)
  1464  
  1465  	newconns := make(map[string]*interfaces.ConnRef, len(candidates))
  1466  	// Auto-connect the plugs
  1467  	cannotAutoConnectLog := func(plug *snap.PlugInfo, candRefs []string) string {
  1468  		return fmt.Sprintf("cannot auto-connect hotplug slot %s to plug %s, candidates found: %s", slot, plug, strings.Join(candRefs, ", "))
  1469  	}
  1470  	if err := autochecker.addAutoConnections(newconns, candidates, filterForSlot(slot), conns, cannotAutoConnectLog, conflictError); err != nil {
  1471  		return err
  1472  	}
  1473  
  1474  	if len(recreate) == 0 && len(newconns) == 0 {
  1475  		return nil
  1476  	}
  1477  
  1478  	// Create connect tasks and interface hooks for old connections
  1479  	connectTs := state.NewTaskSet()
  1480  	for _, conn := range recreate {
  1481  		wasAutoconnected := conns[conn.ID()].Auto
  1482  		ts, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, connectOpts{AutoConnect: wasAutoconnected})
  1483  		if err != nil {
  1484  			return fmt.Errorf("internal error: connect of %q failed: %s", conn, err)
  1485  		}
  1486  		connectTs.AddAll(ts)
  1487  	}
  1488  	// Create connect tasks and interface hooks for new auto-connections
  1489  	for _, conn := range newconns {
  1490  		ts, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, connectOpts{AutoConnect: true})
  1491  		if err != nil {
  1492  			return fmt.Errorf("internal error: auto-connect of %q failed: %s", conn, err)
  1493  		}
  1494  		connectTs.AddAll(ts)
  1495  	}
  1496  
  1497  	if len(connectTs.Tasks()) > 0 {
  1498  		snapstate.InjectTasks(task, connectTs)
  1499  		st.EnsureBefore(0)
  1500  	}
  1501  
  1502  	// make sure that we add tasks and mark this task done in the same atomic write, otherwise there is a risk of re-adding tasks again
  1503  	task.SetStatus(state.DoneStatus)
  1504  
  1505  	return nil
  1506  }
  1507  
  1508  // doHotplugUpdateSlot updates static attributes of a hotplug slot for given device.
  1509  func (m *InterfaceManager) doHotplugUpdateSlot(task *state.Task, _ *tomb.Tomb) error {
  1510  	st := task.State()
  1511  	st.Lock()
  1512  	defer st.Unlock()
  1513  
  1514  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1515  	if err != nil {
  1516  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1517  	}
  1518  
  1519  	var attrs map[string]interface{}
  1520  	if err := task.Get("slot-attrs", &attrs); err != nil {
  1521  		return fmt.Errorf("internal error: cannot get slot-attrs attribute for device %s, interface %s: %s", hotplugKey, ifaceName, err)
  1522  	}
  1523  
  1524  	stateSlots, err := getHotplugSlots(st)
  1525  	if err != nil {
  1526  		return fmt.Errorf("internal error: cannot obtain hotplug slots: %v", err)
  1527  	}
  1528  
  1529  	slot, err := m.repo.UpdateHotplugSlotAttrs(ifaceName, hotplugKey, attrs)
  1530  	if err != nil {
  1531  		return err
  1532  	}
  1533  
  1534  	if slotSpec, ok := stateSlots[slot.Name]; ok {
  1535  		slotSpec.StaticAttrs = attrs
  1536  		stateSlots[slot.Name] = slotSpec
  1537  		setHotplugSlots(st, stateSlots)
  1538  	} else {
  1539  		return fmt.Errorf("internal error: cannot find slot %s for device %q", slot.Name, hotplugKey)
  1540  	}
  1541  
  1542  	return nil
  1543  }
  1544  
  1545  // doHotplugRemoveSlot removes hotplug slot for given device from the repository in response to udev "remove" event.
  1546  // This task must necessarily be run after all affected slot gets disconnected in the repo.
  1547  func (m *InterfaceManager) doHotplugRemoveSlot(task *state.Task, _ *tomb.Tomb) error {
  1548  	st := task.State()
  1549  	st.Lock()
  1550  	defer st.Unlock()
  1551  
  1552  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1553  	if err != nil {
  1554  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1555  	}
  1556  
  1557  	slot, err := m.repo.SlotForHotplugKey(ifaceName, hotplugKey)
  1558  	if err != nil {
  1559  		return fmt.Errorf("internal error: cannot determine slots: %v", err)
  1560  	}
  1561  	if slot != nil {
  1562  		if err := m.repo.RemoveSlot(slot.Snap.InstanceName(), slot.Name); err != nil {
  1563  			return fmt.Errorf("cannot remove hotplug slot: %v", err)
  1564  		}
  1565  	}
  1566  
  1567  	stateSlots, err := getHotplugSlots(st)
  1568  	if err != nil {
  1569  		return fmt.Errorf("internal error: cannot obtain hotplug slots: %v", err)
  1570  	}
  1571  
  1572  	// remove the slot from hotplug-slots in the state as long as there are no connections referencing it,
  1573  	// including connection with hotplug-gone=true.
  1574  	slotDef := findHotplugSlot(stateSlots, ifaceName, hotplugKey)
  1575  	if slotDef == nil {
  1576  		return fmt.Errorf("internal error: cannot find hotplug slot for interface %s, hotplug key %q", ifaceName, hotplugKey)
  1577  	}
  1578  	conns, err := getConns(st)
  1579  	if err != nil {
  1580  		return err
  1581  	}
  1582  	for _, conn := range conns {
  1583  		if conn.Interface == slotDef.Interface && conn.HotplugKey == slotDef.HotplugKey {
  1584  			// there is a connection referencing this slot, do not remove it, only mark as "gone"
  1585  			slotDef.HotplugGone = true
  1586  			stateSlots[slotDef.Name] = slotDef
  1587  			setHotplugSlots(st, stateSlots)
  1588  			return nil
  1589  		}
  1590  	}
  1591  	delete(stateSlots, slotDef.Name)
  1592  	setHotplugSlots(st, stateSlots)
  1593  
  1594  	return nil
  1595  }
  1596  
  1597  // doHotplugDisconnect creates task(s) to disconnect connections and remove slots in response to hotplug "remove" event.
  1598  func (m *InterfaceManager) doHotplugDisconnect(task *state.Task, _ *tomb.Tomb) error {
  1599  	st := task.State()
  1600  	st.Lock()
  1601  	defer st.Unlock()
  1602  
  1603  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1604  	if err != nil {
  1605  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1606  	}
  1607  
  1608  	connections, err := m.repo.ConnectionsForHotplugKey(ifaceName, hotplugKey)
  1609  	if err != nil {
  1610  		return err
  1611  	}
  1612  	if len(connections) == 0 {
  1613  		return nil
  1614  	}
  1615  
  1616  	// check for conflicts on all connections first before creating disconnect hooks
  1617  	for _, connRef := range connections {
  1618  		if err := checkHotplugDisconnectConflicts(st, connRef.PlugRef.Snap, connRef.SlotRef.Snap); err != nil {
  1619  			if retry, ok := err.(*state.Retry); ok {
  1620  				task.Logf("Waiting for conflicting change in progress: %s", retry.Reason)
  1621  				return err // will retry
  1622  			}
  1623  			return fmt.Errorf("cannot check conflicts when disconnecting interfaces: %s", err)
  1624  		}
  1625  	}
  1626  
  1627  	dts := state.NewTaskSet()
  1628  	for _, connRef := range connections {
  1629  		conn, err := m.repo.Connection(connRef)
  1630  		if err != nil {
  1631  			// this should never happen since we get all connections from the repo
  1632  			return fmt.Errorf("internal error: cannot get connection %q: %s", connRef, err)
  1633  		}
  1634  		// "by-hotplug" flag indicates it's a disconnect triggered as part of hotplug removal.
  1635  		ts, err := disconnectTasks(st, conn, disconnectOpts{ByHotplug: true})
  1636  		if err != nil {
  1637  			return fmt.Errorf("internal error: cannot create disconnect tasks: %s", err)
  1638  		}
  1639  		dts.AddAll(ts)
  1640  	}
  1641  
  1642  	snapstate.InjectTasks(task, dts)
  1643  	st.EnsureBefore(0)
  1644  
  1645  	// make sure that we add tasks and mark this task done in the same atomic write, otherwise there is a risk of re-adding tasks again
  1646  	task.SetStatus(state.DoneStatus)
  1647  
  1648  	return nil
  1649  }
  1650  
  1651  func (m *InterfaceManager) doHotplugAddSlot(task *state.Task, _ *tomb.Tomb) error {
  1652  	st := task.State()
  1653  	st.Lock()
  1654  	defer st.Unlock()
  1655  
  1656  	systemSnap, err := systemSnapInfo(st)
  1657  	if err != nil {
  1658  		return fmt.Errorf("system snap not available")
  1659  	}
  1660  
  1661  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1662  	if err != nil {
  1663  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1664  	}
  1665  
  1666  	var proposedSlot hotplug.ProposedSlot
  1667  	if err := task.Get("proposed-slot", &proposedSlot); err != nil {
  1668  		return fmt.Errorf("internal error: cannot get proposed hotplug slot from task attributes: %s", err)
  1669  	}
  1670  	var devinfo hotplug.HotplugDeviceInfo
  1671  	if err := task.Get("device-info", &devinfo); err != nil {
  1672  		return fmt.Errorf("internal error: cannot get hotplug device info from task attributes: %s", err)
  1673  	}
  1674  
  1675  	stateSlots, err := getHotplugSlots(st)
  1676  	if err != nil {
  1677  		return fmt.Errorf("internal error obtaining hotplug slots: %v", err.Error())
  1678  	}
  1679  
  1680  	iface := m.repo.Interface(ifaceName)
  1681  	if iface == nil {
  1682  		return fmt.Errorf("internal error: cannot find interface %s", ifaceName)
  1683  	}
  1684  
  1685  	slot := findHotplugSlot(stateSlots, ifaceName, hotplugKey)
  1686  
  1687  	// if we know this slot already, restore / update it.
  1688  	if slot != nil {
  1689  		if slot.HotplugGone {
  1690  			// hotplugGone means the device was unplugged, so its disconnect hooks were run and can now
  1691  			// simply recreate the slot with potentially new attributes, and old connections will be re-created
  1692  			newSlot := &snap.SlotInfo{
  1693  				Name:       slot.Name,
  1694  				Label:      proposedSlot.Label,
  1695  				Snap:       systemSnap,
  1696  				Interface:  ifaceName,
  1697  				Attrs:      proposedSlot.Attrs,
  1698  				HotplugKey: hotplugKey,
  1699  			}
  1700  			return addHotplugSlot(st, m.repo, stateSlots, iface, newSlot)
  1701  		}
  1702  
  1703  		// else - not gone, restored already by reloadConnections, but may need updating.
  1704  		if !reflect.DeepEqual(proposedSlot.Attrs, slot.StaticAttrs) {
  1705  			ts := updateDevice(st, iface.Name(), hotplugKey, proposedSlot.Attrs)
  1706  			snapstate.InjectTasks(task, ts)
  1707  			st.EnsureBefore(0)
  1708  			task.SetStatus(state.DoneStatus)
  1709  		} // else - nothing to do
  1710  		return nil
  1711  	}
  1712  
  1713  	// New slot.
  1714  	slotName := hotplugSlotName(hotplugKey, systemSnap.InstanceName(), proposedSlot.Name, iface.Name(), &devinfo, m.repo, stateSlots)
  1715  	newSlot := &snap.SlotInfo{
  1716  		Name:       slotName,
  1717  		Label:      proposedSlot.Label,
  1718  		Snap:       systemSnap,
  1719  		Interface:  iface.Name(),
  1720  		Attrs:      proposedSlot.Attrs,
  1721  		HotplugKey: hotplugKey,
  1722  	}
  1723  	return addHotplugSlot(st, m.repo, stateSlots, iface, newSlot)
  1724  }
  1725  
  1726  // doHotplugSeqWait returns Retry error if there is another change for same hotplug key and a lower sequence number.
  1727  // Sequence numbers control the order of execution of hotplug-related changes, which would otherwise be executed in
  1728  // arbitrary order by task runner, leading to unexpected results if multiple events for same device are in flight
  1729  // (e.g. plugging, followed by immediate unplugging, or snapd restart with pending hotplug changes).
  1730  // The handler expects "hotplug-key" and "hotplug-seq" values set on own and other hotplug-related changes.
  1731  func (m *InterfaceManager) doHotplugSeqWait(task *state.Task, _ *tomb.Tomb) error {
  1732  	st := task.State()
  1733  	st.Lock()
  1734  	defer st.Unlock()
  1735  
  1736  	chg := task.Change()
  1737  	if chg == nil || !isHotplugChange(chg) {
  1738  		return fmt.Errorf("internal error: task %q not in a hotplug change", task.Kind())
  1739  	}
  1740  
  1741  	seq, hotplugKey, err := getHotplugChangeAttrs(chg)
  1742  	if err != nil {
  1743  		return err
  1744  	}
  1745  
  1746  	for _, otherChg := range st.Changes() {
  1747  		if otherChg.Status().Ready() || otherChg.ID() == chg.ID() {
  1748  			continue
  1749  		}
  1750  
  1751  		// only inspect hotplug changes
  1752  		if !isHotplugChange(otherChg) {
  1753  			continue
  1754  		}
  1755  
  1756  		otherSeq, otherKey, err := getHotplugChangeAttrs(otherChg)
  1757  		if err != nil {
  1758  			return err
  1759  		}
  1760  
  1761  		// conflict with retry if there another change affecting same device and has lower sequence number
  1762  		if hotplugKey == otherKey && otherSeq < seq {
  1763  			task.Logf("Waiting processing of earlier hotplug event change %q affecting device with hotplug key %q", otherChg.Kind(), hotplugKey)
  1764  			// TODO: consider introducing a new task that runs last and does EnsureBefore(0) for hotplug changes
  1765  			return &state.Retry{After: hotplugRetryTimeout}
  1766  		}
  1767  	}
  1768  
  1769  	// no conflicting change for same hotplug key found
  1770  	return nil
  1771  }