github.com/hugh712/snapd@v0.0.0-20200910133618-1a99902bd583/overlord/ifacestate/handlers.go (about)

     1  // -*- Mode: Go; indent-tabs-mode: t -*-
     2  
     3  /*
     4   * Copyright (C) 2016 Canonical Ltd
     5   *
     6   * This program is free software: you can redistribute it and/or modify
     7   * it under the terms of the GNU General Public License version 3 as
     8   * published by the Free Software Foundation.
     9   *
    10   * This program is distributed in the hope that it will be useful,
    11   * but WITHOUT ANY WARRANTY; without even the implied warranty of
    12   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13   * GNU General Public License for more details.
    14   *
    15   * You should have received a copy of the GNU General Public License
    16   * along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17   *
    18   */
    19  
    20  package ifacestate
    21  
    22  import (
    23  	"fmt"
    24  	"reflect"
    25  	"sort"
    26  	"strings"
    27  	"time"
    28  
    29  	"gopkg.in/tomb.v2"
    30  
    31  	"github.com/snapcore/snapd/i18n"
    32  	"github.com/snapcore/snapd/interfaces"
    33  	"github.com/snapcore/snapd/interfaces/hotplug"
    34  	"github.com/snapcore/snapd/logger"
    35  	"github.com/snapcore/snapd/overlord/snapstate"
    36  	"github.com/snapcore/snapd/overlord/state"
    37  	"github.com/snapcore/snapd/snap"
    38  	"github.com/snapcore/snapd/timings"
    39  )
    40  
    41  // confinementOptions returns interfaces.ConfinementOptions from snapstate.Flags.
    42  func confinementOptions(flags snapstate.Flags) interfaces.ConfinementOptions {
    43  	return interfaces.ConfinementOptions{
    44  		DevMode:  flags.DevMode,
    45  		JailMode: flags.JailMode,
    46  		Classic:  flags.Classic,
    47  	}
    48  }
    49  
    50  func (m *InterfaceManager) setupAffectedSnaps(task *state.Task, affectingSnap string, affectedSnaps []string, tm timings.Measurer) error {
    51  	st := task.State()
    52  
    53  	// Setup security of the affected snaps.
    54  	for _, affectedInstanceName := range affectedSnaps {
    55  		// the snap that triggered the change needs to be skipped
    56  		if affectedInstanceName == affectingSnap {
    57  			continue
    58  		}
    59  		var snapst snapstate.SnapState
    60  		if err := snapstate.Get(st, affectedInstanceName, &snapst); err != nil {
    61  			task.Errorf("skipping security profiles setup for snap %q when handling snap %q: %v", affectedInstanceName, affectingSnap, err)
    62  			continue
    63  		}
    64  		affectedSnapInfo, err := snapst.CurrentInfo()
    65  		if err != nil {
    66  			return err
    67  		}
    68  		if err := addImplicitSlots(st, affectedSnapInfo); err != nil {
    69  			return err
    70  		}
    71  		opts := confinementOptions(snapst.Flags)
    72  		if err := m.setupSnapSecurity(task, affectedSnapInfo, opts, tm); err != nil {
    73  			return err
    74  		}
    75  	}
    76  	return nil
    77  }
    78  
    79  func (m *InterfaceManager) doSetupProfiles(task *state.Task, tomb *tomb.Tomb) error {
    80  	task.State().Lock()
    81  	defer task.State().Unlock()
    82  
    83  	perfTimings := state.TimingsForTask(task)
    84  	defer perfTimings.Save(task.State())
    85  
    86  	// Get snap.Info from bits handed by the snap manager.
    87  	snapsup, err := snapstate.TaskSnapSetup(task)
    88  	if err != nil {
    89  		return err
    90  	}
    91  
    92  	snapInfo, err := snap.ReadInfo(snapsup.InstanceName(), snapsup.SideInfo)
    93  	if err != nil {
    94  		return err
    95  	}
    96  
    97  	if len(snapInfo.BadInterfaces) > 0 {
    98  		task.State().Warnf("%s", snap.BadInterfacesSummary(snapInfo))
    99  	}
   100  
   101  	// We no longer do/need core-phase-2, see
   102  	//   https://github.com/snapcore/snapd/pull/5301
   103  	// This code is just here to deal with old state that may still
   104  	// have the 2nd setup-profiles with this flag set.
   105  	var corePhase2 bool
   106  	if err := task.Get("core-phase-2", &corePhase2); err != nil && err != state.ErrNoState {
   107  		return err
   108  	}
   109  	if corePhase2 {
   110  		// nothing to do
   111  		return nil
   112  	}
   113  
   114  	opts := confinementOptions(snapsup.Flags)
   115  	return m.setupProfilesForSnap(task, tomb, snapInfo, opts, perfTimings)
   116  }
   117  
   118  func (m *InterfaceManager) setupProfilesForSnap(task *state.Task, _ *tomb.Tomb, snapInfo *snap.Info, opts interfaces.ConfinementOptions, tm timings.Measurer) error {
   119  	st := task.State()
   120  
   121  	if err := addImplicitSlots(task.State(), snapInfo); err != nil {
   122  		return err
   123  	}
   124  
   125  	snapName := snapInfo.InstanceName()
   126  
   127  	// The snap may have been updated so perform the following operation to
   128  	// ensure that we are always working on the correct state:
   129  	//
   130  	// - disconnect all connections to/from the given snap
   131  	//   - remembering the snaps that were affected by this operation
   132  	// - remove the (old) snap from the interfaces repository
   133  	// - add the (new) snap to the interfaces repository
   134  	// - restore connections based on what is kept in the state
   135  	//   - if a connection cannot be restored then remove it from the state
   136  	// - setup the security of all the affected snaps
   137  	disconnectedSnaps, err := m.repo.DisconnectSnap(snapName)
   138  	if err != nil {
   139  		return err
   140  	}
   141  	// XXX: what about snap renames? We should remove the old name (or switch
   142  	// to IDs in the interfaces repository)
   143  	if err := m.repo.RemoveSnap(snapName); err != nil {
   144  		return err
   145  	}
   146  	if err := m.repo.AddSnap(snapInfo); err != nil {
   147  		return err
   148  	}
   149  	if len(snapInfo.BadInterfaces) > 0 {
   150  		task.Logf("%s", snap.BadInterfacesSummary(snapInfo))
   151  	}
   152  
   153  	// Reload the connections and compute the set of affected snaps. The set
   154  	// affectedSet set contains name of all the affected snap instances.  The
   155  	// arrays affectedNames and affectedSnaps contain, arrays of snap names and
   156  	// snapInfo's, respectively. The arrays are sorted by name with the special
   157  	// exception that the snap being setup is always first. The affectedSnaps
   158  	// array may be shorter than the set of affected snaps in case any of the
   159  	// snaps cannot be found in the state.
   160  	reconnectedSnaps, err := m.reloadConnections(snapName)
   161  	if err != nil {
   162  		return err
   163  	}
   164  	affectedSet := make(map[string]bool)
   165  	for _, name := range disconnectedSnaps {
   166  		affectedSet[name] = true
   167  	}
   168  	for _, name := range reconnectedSnaps {
   169  		affectedSet[name] = true
   170  	}
   171  
   172  	// Sort the set of affected names, ensuring that the snap being setup
   173  	// is first regardless of the name it has.
   174  	affectedNames := make([]string, 0, len(affectedSet))
   175  	for name := range affectedSet {
   176  		if name != snapName {
   177  			affectedNames = append(affectedNames, name)
   178  		}
   179  	}
   180  	sort.Strings(affectedNames)
   181  	affectedNames = append([]string{snapName}, affectedNames...)
   182  
   183  	// Obtain snap.Info for each affected snap, skipping those that cannot be
   184  	// found and compute the confinement options that apply to it.
   185  	affectedSnaps := make([]*snap.Info, 0, len(affectedSet))
   186  	confinementOpts := make([]interfaces.ConfinementOptions, 0, len(affectedSet))
   187  	// For the snap being setup we know exactly what was requested.
   188  	affectedSnaps = append(affectedSnaps, snapInfo)
   189  	confinementOpts = append(confinementOpts, opts)
   190  	// For remaining snaps we need to interrogate the state.
   191  	for _, name := range affectedNames[1:] {
   192  		var snapst snapstate.SnapState
   193  		if err := snapstate.Get(st, name, &snapst); err != nil {
   194  			task.Errorf("cannot obtain state of snap %s: %s", name, err)
   195  			continue
   196  		}
   197  		snapInfo, err := snapst.CurrentInfo()
   198  		if err != nil {
   199  			return err
   200  		}
   201  		if err := addImplicitSlots(st, snapInfo); err != nil {
   202  			return err
   203  		}
   204  		affectedSnaps = append(affectedSnaps, snapInfo)
   205  		confinementOpts = append(confinementOpts, confinementOptions(snapst.Flags))
   206  	}
   207  
   208  	return m.setupSecurityByBackend(task, affectedSnaps, confinementOpts, tm)
   209  }
   210  
   211  func (m *InterfaceManager) doRemoveProfiles(task *state.Task, tomb *tomb.Tomb) error {
   212  	st := task.State()
   213  	st.Lock()
   214  	defer st.Unlock()
   215  
   216  	perfTimings := state.TimingsForTask(task)
   217  	defer perfTimings.Save(st)
   218  
   219  	// Get SnapSetup for this snap. This is gives us the name of the snap.
   220  	snapSetup, err := snapstate.TaskSnapSetup(task)
   221  	if err != nil {
   222  		return err
   223  	}
   224  	snapName := snapSetup.InstanceName()
   225  
   226  	return m.removeProfilesForSnap(task, tomb, snapName, perfTimings)
   227  }
   228  
   229  func (m *InterfaceManager) removeProfilesForSnap(task *state.Task, _ *tomb.Tomb, snapName string, tm timings.Measurer) error {
   230  	// Disconnect the snap entirely.
   231  	// This is required to remove the snap from the interface repository.
   232  	// The returned list of affected snaps will need to have its security setup
   233  	// to reflect the change.
   234  	affectedSnaps, err := m.repo.DisconnectSnap(snapName)
   235  	if err != nil {
   236  		return err
   237  	}
   238  	if err := m.setupAffectedSnaps(task, snapName, affectedSnaps, tm); err != nil {
   239  		return err
   240  	}
   241  
   242  	// Remove the snap from the interface repository.
   243  	// This discards all the plugs and slots belonging to that snap.
   244  	if err := m.repo.RemoveSnap(snapName); err != nil {
   245  		return err
   246  	}
   247  
   248  	// Remove security artefacts of the snap.
   249  	if err := m.removeSnapSecurity(task, snapName); err != nil {
   250  		return err
   251  	}
   252  
   253  	return nil
   254  }
   255  
   256  func (m *InterfaceManager) undoSetupProfiles(task *state.Task, tomb *tomb.Tomb) error {
   257  	st := task.State()
   258  	st.Lock()
   259  	defer st.Unlock()
   260  
   261  	perfTimings := state.TimingsForTask(task)
   262  	defer perfTimings.Save(st)
   263  
   264  	var corePhase2 bool
   265  	if err := task.Get("core-phase-2", &corePhase2); err != nil && err != state.ErrNoState {
   266  		return err
   267  	}
   268  	if corePhase2 {
   269  		// let the first setup-profiles deal with this
   270  		return nil
   271  	}
   272  
   273  	snapsup, err := snapstate.TaskSnapSetup(task)
   274  	if err != nil {
   275  		return err
   276  	}
   277  	snapName := snapsup.InstanceName()
   278  
   279  	// Get the name from SnapSetup and use it to find the current SideInfo
   280  	// about the snap, if there is one.
   281  	var snapst snapstate.SnapState
   282  	err = snapstate.Get(st, snapName, &snapst)
   283  	if err != nil && err != state.ErrNoState {
   284  		return err
   285  	}
   286  	sideInfo := snapst.CurrentSideInfo()
   287  	if sideInfo == nil {
   288  		// The snap was not installed before so undo should remove security profiles.
   289  		return m.removeProfilesForSnap(task, tomb, snapName, perfTimings)
   290  	} else {
   291  		// The snap was installed before so undo should setup the old security profiles.
   292  		snapInfo, err := snap.ReadInfo(snapName, sideInfo)
   293  		if err != nil {
   294  			return err
   295  		}
   296  		opts := confinementOptions(snapst.Flags)
   297  		return m.setupProfilesForSnap(task, tomb, snapInfo, opts, perfTimings)
   298  	}
   299  }
   300  
   301  func (m *InterfaceManager) doDiscardConns(task *state.Task, _ *tomb.Tomb) error {
   302  	st := task.State()
   303  	st.Lock()
   304  	defer st.Unlock()
   305  
   306  	snapSetup, err := snapstate.TaskSnapSetup(task)
   307  	if err != nil {
   308  		return err
   309  	}
   310  
   311  	instanceName := snapSetup.InstanceName()
   312  
   313  	var snapst snapstate.SnapState
   314  	err = snapstate.Get(st, instanceName, &snapst)
   315  	if err != nil && err != state.ErrNoState {
   316  		return err
   317  	}
   318  
   319  	if err == nil && len(snapst.Sequence) != 0 {
   320  		return fmt.Errorf("cannot discard connections for snap %q while it is present", instanceName)
   321  	}
   322  	conns, err := getConns(st)
   323  	if err != nil {
   324  		return err
   325  	}
   326  	removed := make(map[string]*connState)
   327  	for id := range conns {
   328  		connRef, err := interfaces.ParseConnRef(id)
   329  		if err != nil {
   330  			return err
   331  		}
   332  		if connRef.PlugRef.Snap == instanceName || connRef.SlotRef.Snap == instanceName {
   333  			removed[id] = conns[id]
   334  			delete(conns, id)
   335  		}
   336  	}
   337  	task.Set("removed", removed)
   338  	setConns(st, conns)
   339  	return nil
   340  }
   341  
   342  func (m *InterfaceManager) undoDiscardConns(task *state.Task, _ *tomb.Tomb) error {
   343  	st := task.State()
   344  	st.Lock()
   345  	defer st.Unlock()
   346  
   347  	var removed map[string]*connState
   348  	err := task.Get("removed", &removed)
   349  	if err != nil && err != state.ErrNoState {
   350  		return err
   351  	}
   352  
   353  	conns, err := getConns(st)
   354  	if err != nil {
   355  		return err
   356  	}
   357  
   358  	for id, connState := range removed {
   359  		conns[id] = connState
   360  	}
   361  	setConns(st, conns)
   362  	task.Set("removed", nil)
   363  	return nil
   364  }
   365  
   366  func getDynamicHookAttributes(task *state.Task) (plugAttrs, slotAttrs map[string]interface{}, err error) {
   367  	if err = task.Get("plug-dynamic", &plugAttrs); err != nil && err != state.ErrNoState {
   368  		return nil, nil, err
   369  	}
   370  	if err = task.Get("slot-dynamic", &slotAttrs); err != nil && err != state.ErrNoState {
   371  		return nil, nil, err
   372  	}
   373  	if plugAttrs == nil {
   374  		plugAttrs = make(map[string]interface{})
   375  	}
   376  	if slotAttrs == nil {
   377  		slotAttrs = make(map[string]interface{})
   378  	}
   379  
   380  	return plugAttrs, slotAttrs, nil
   381  }
   382  
   383  func setDynamicHookAttributes(task *state.Task, plugAttrs, slotAttrs map[string]interface{}) {
   384  	task.Set("plug-dynamic", plugAttrs)
   385  	task.Set("slot-dynamic", slotAttrs)
   386  }
   387  
   388  func (m *InterfaceManager) doConnect(task *state.Task, _ *tomb.Tomb) error {
   389  	st := task.State()
   390  	st.Lock()
   391  	defer st.Unlock()
   392  
   393  	perfTimings := state.TimingsForTask(task)
   394  	defer perfTimings.Save(st)
   395  
   396  	plugRef, slotRef, err := getPlugAndSlotRefs(task)
   397  	if err != nil {
   398  		return err
   399  	}
   400  
   401  	var autoConnect bool
   402  	if err := task.Get("auto", &autoConnect); err != nil && err != state.ErrNoState {
   403  		return err
   404  	}
   405  	var byGadget bool
   406  	if err := task.Get("by-gadget", &byGadget); err != nil && err != state.ErrNoState {
   407  		return err
   408  	}
   409  	var delayedSetupProfiles bool
   410  	if err := task.Get("delayed-setup-profiles", &delayedSetupProfiles); err != nil && err != state.ErrNoState {
   411  		return err
   412  	}
   413  
   414  	deviceCtx, err := snapstate.DeviceCtx(st, task, nil)
   415  	if err != nil {
   416  		return err
   417  	}
   418  
   419  	conns, err := getConns(st)
   420  	if err != nil {
   421  		return err
   422  	}
   423  
   424  	connRef := &interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef}
   425  
   426  	var plugSnapst snapstate.SnapState
   427  	if err := snapstate.Get(st, plugRef.Snap, &plugSnapst); err != nil {
   428  		if autoConnect && err == state.ErrNoState {
   429  			// conflict logic should prevent this
   430  			return fmt.Errorf("internal error: snap %q is no longer available for auto-connecting", plugRef.Snap)
   431  		}
   432  		return err
   433  	}
   434  
   435  	var slotSnapst snapstate.SnapState
   436  	if err := snapstate.Get(st, slotRef.Snap, &slotSnapst); err != nil {
   437  		if autoConnect && err == state.ErrNoState {
   438  			// conflict logic should prevent this
   439  			return fmt.Errorf("internal error: snap %q is no longer available for auto-connecting", slotRef.Snap)
   440  		}
   441  		return err
   442  	}
   443  
   444  	plug := m.repo.Plug(connRef.PlugRef.Snap, connRef.PlugRef.Name)
   445  	if plug == nil {
   446  		// conflict logic should prevent this
   447  		return fmt.Errorf("snap %q has no %q plug", connRef.PlugRef.Snap, connRef.PlugRef.Name)
   448  	}
   449  
   450  	slot := m.repo.Slot(connRef.SlotRef.Snap, connRef.SlotRef.Name)
   451  	if slot == nil {
   452  		// conflict logic should prevent this
   453  		return fmt.Errorf("snap %q has no %q slot", connRef.SlotRef.Snap, connRef.SlotRef.Name)
   454  	}
   455  
   456  	// attributes are always present, even if there are no hooks (they're initialized by Connect).
   457  	plugDynamicAttrs, slotDynamicAttrs, err := getDynamicHookAttributes(task)
   458  	if err != nil {
   459  		return fmt.Errorf("failed to get hook attributes: %s", err)
   460  	}
   461  
   462  	var policyChecker interfaces.PolicyFunc
   463  
   464  	// manual connections and connections by the gadget obey the
   465  	// policy "connection" rules, other auto-connections obey the
   466  	// "auto-connection" rules
   467  	if autoConnect && !byGadget {
   468  		autochecker, err := newAutoConnectChecker(st, task, m.repo, deviceCtx)
   469  		if err != nil {
   470  			return err
   471  		}
   472  		policyChecker = func(plug *interfaces.ConnectedPlug, slot *interfaces.ConnectedSlot) (bool, error) {
   473  			ok, _, err := autochecker.check(plug, slot)
   474  			return ok, err
   475  		}
   476  	} else {
   477  		policyCheck, err := newConnectChecker(st, deviceCtx)
   478  		if err != nil {
   479  			return err
   480  		}
   481  		policyChecker = policyCheck.check
   482  	}
   483  
   484  	// static attributes of the plug and slot not provided, the ones from snap infos will be used
   485  	conn, err := m.repo.Connect(connRef, nil, plugDynamicAttrs, nil, slotDynamicAttrs, policyChecker)
   486  	if err != nil || conn == nil {
   487  		return err
   488  	}
   489  
   490  	if !delayedSetupProfiles {
   491  		slotOpts := confinementOptions(slotSnapst.Flags)
   492  		if err := m.setupSnapSecurity(task, slot.Snap, slotOpts, perfTimings); err != nil {
   493  			return err
   494  		}
   495  
   496  		plugOpts := confinementOptions(plugSnapst.Flags)
   497  		if err := m.setupSnapSecurity(task, plug.Snap, plugOpts, perfTimings); err != nil {
   498  			return err
   499  		}
   500  	} else {
   501  		logger.Debugf("Connect handler: skipping setupSnapSecurity for snaps %q and %q", plug.Snap.InstanceName(), slot.Snap.InstanceName())
   502  	}
   503  
   504  	// For undo handler. We need to remember old state of the connection only
   505  	// if undesired flag is set because that means there was a remembered
   506  	// inactive connection already and we should restore its properties
   507  	// in case of undo. Otherwise we don't have to keep old-conn because undo
   508  	// can simply delete any trace of the connection.
   509  	if old, ok := conns[connRef.ID()]; ok && old.Undesired {
   510  		task.Set("old-conn", old)
   511  	}
   512  
   513  	conns[connRef.ID()] = &connState{
   514  		Interface:        conn.Interface(),
   515  		StaticPlugAttrs:  conn.Plug.StaticAttrs(),
   516  		DynamicPlugAttrs: conn.Plug.DynamicAttrs(),
   517  		StaticSlotAttrs:  conn.Slot.StaticAttrs(),
   518  		DynamicSlotAttrs: conn.Slot.DynamicAttrs(),
   519  		Auto:             autoConnect,
   520  		ByGadget:         byGadget,
   521  		HotplugKey:       slot.HotplugKey,
   522  	}
   523  	setConns(st, conns)
   524  
   525  	// the dynamic attributes might have been updated by the interface's BeforeConnectPlug/Slot code,
   526  	// so we need to update the task for connect-plug- and connect-slot- hooks to see new values.
   527  	setDynamicHookAttributes(task, conn.Plug.DynamicAttrs(), conn.Slot.DynamicAttrs())
   528  	return nil
   529  }
   530  
   531  func (m *InterfaceManager) doDisconnect(task *state.Task, _ *tomb.Tomb) error {
   532  	st := task.State()
   533  	st.Lock()
   534  	defer st.Unlock()
   535  
   536  	perfTimings := state.TimingsForTask(task)
   537  	defer perfTimings.Save(st)
   538  
   539  	plugRef, slotRef, err := getPlugAndSlotRefs(task)
   540  	if err != nil {
   541  		return err
   542  	}
   543  
   544  	cref := interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef}
   545  
   546  	conns, err := getConns(st)
   547  	if err != nil {
   548  		return err
   549  	}
   550  
   551  	// forget flag can be passed with snap disconnect --forget
   552  	var forget bool
   553  	if err := task.Get("forget", &forget); err != nil && err != state.ErrNoState {
   554  		return fmt.Errorf("internal error: cannot read 'forget' flag: %s", err)
   555  	}
   556  
   557  	var snapStates []snapstate.SnapState
   558  	for _, instanceName := range []string{plugRef.Snap, slotRef.Snap} {
   559  		var snapst snapstate.SnapState
   560  		if err := snapstate.Get(st, instanceName, &snapst); err != nil {
   561  			if err == state.ErrNoState {
   562  				task.Logf("skipping disconnect operation for connection %s %s, snap %q doesn't exist", plugRef, slotRef, instanceName)
   563  				return nil
   564  			}
   565  			task.Errorf("skipping security profiles setup for snap %q when disconnecting %s from %s: %v", instanceName, plugRef, slotRef, err)
   566  		} else {
   567  			snapStates = append(snapStates, snapst)
   568  		}
   569  	}
   570  
   571  	conn, ok := conns[cref.ID()]
   572  	if !ok {
   573  		return fmt.Errorf("internal error: connection %q not found in state", cref.ID())
   574  	}
   575  
   576  	// store old connection for undo
   577  	task.Set("old-conn", conn)
   578  
   579  	err = m.repo.Disconnect(plugRef.Snap, plugRef.Name, slotRef.Snap, slotRef.Name)
   580  	if err != nil {
   581  		_, notConnected := err.(*interfaces.NotConnectedError)
   582  		_, noPlugOrSlot := err.(*interfaces.NoPlugOrSlotError)
   583  		// not connected, just forget it.
   584  		if forget && (notConnected || noPlugOrSlot) {
   585  			delete(conns, cref.ID())
   586  			setConns(st, conns)
   587  			return nil
   588  		}
   589  		return fmt.Errorf("snapd changed, please retry the operation: %v", err)
   590  	}
   591  
   592  	for _, snapst := range snapStates {
   593  		snapInfo, err := snapst.CurrentInfo()
   594  		if err != nil {
   595  			return err
   596  		}
   597  		opts := confinementOptions(snapst.Flags)
   598  		if err := m.setupSnapSecurity(task, snapInfo, opts, perfTimings); err != nil {
   599  			return err
   600  		}
   601  	}
   602  
   603  	// "auto-disconnect" flag indicates it's a disconnect triggered automatically as part of snap removal;
   604  	// such disconnects should not set undesired flag and instead just remove the connection.
   605  	var autoDisconnect bool
   606  	if err := task.Get("auto-disconnect", &autoDisconnect); err != nil && err != state.ErrNoState {
   607  		return fmt.Errorf("internal error: failed to read 'auto-disconnect' flag: %s", err)
   608  	}
   609  
   610  	// "by-hotplug" flag indicates it's a disconnect triggered by hotplug remove event;
   611  	// we want to keep information of the connection and just mark it as hotplug-gone.
   612  	var byHotplug bool
   613  	if err := task.Get("by-hotplug", &byHotplug); err != nil && err != state.ErrNoState {
   614  		return fmt.Errorf("internal error: cannot read 'by-hotplug' flag: %s", err)
   615  	}
   616  
   617  	switch {
   618  	case forget:
   619  		delete(conns, cref.ID())
   620  	case byHotplug:
   621  		conn.HotplugGone = true
   622  		conns[cref.ID()] = conn
   623  	case conn.Auto && !autoDisconnect:
   624  		conn.Undesired = true
   625  		conn.DynamicPlugAttrs = nil
   626  		conn.DynamicSlotAttrs = nil
   627  		conn.StaticPlugAttrs = nil
   628  		conn.StaticSlotAttrs = nil
   629  		conns[cref.ID()] = conn
   630  	default:
   631  		delete(conns, cref.ID())
   632  	}
   633  	setConns(st, conns)
   634  
   635  	return nil
   636  }
   637  
   638  func (m *InterfaceManager) undoDisconnect(task *state.Task, _ *tomb.Tomb) error {
   639  	st := task.State()
   640  	st.Lock()
   641  	defer st.Unlock()
   642  
   643  	perfTimings := state.TimingsForTask(task)
   644  	defer perfTimings.Save(st)
   645  
   646  	var oldconn connState
   647  	err := task.Get("old-conn", &oldconn)
   648  	if err == state.ErrNoState {
   649  		return nil
   650  	}
   651  	if err != nil {
   652  		return err
   653  	}
   654  
   655  	var forget bool
   656  	if err := task.Get("forget", &forget); err != nil && err != state.ErrNoState {
   657  		return fmt.Errorf("internal error: cannot read 'forget' flag: %s", err)
   658  	}
   659  
   660  	plugRef, slotRef, err := getPlugAndSlotRefs(task)
   661  	if err != nil {
   662  		return err
   663  	}
   664  
   665  	conns, err := getConns(st)
   666  	if err != nil {
   667  		return err
   668  	}
   669  
   670  	var plugSnapst snapstate.SnapState
   671  	if err := snapstate.Get(st, plugRef.Snap, &plugSnapst); err != nil {
   672  		return err
   673  	}
   674  	var slotSnapst snapstate.SnapState
   675  	if err := snapstate.Get(st, slotRef.Snap, &slotSnapst); err != nil {
   676  		return err
   677  	}
   678  
   679  	connRef := &interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef}
   680  
   681  	plug := m.repo.Plug(connRef.PlugRef.Snap, connRef.PlugRef.Name)
   682  	slot := m.repo.Slot(connRef.SlotRef.Snap, connRef.SlotRef.Name)
   683  	if forget && (plug == nil || slot == nil) {
   684  		// we were trying to forget an inactive connection that was
   685  		// referring to a non-existing plug or slot; just restore it
   686  		// in the conns state but do not reconnect via repository.
   687  		conns[connRef.ID()] = &oldconn
   688  		setConns(st, conns)
   689  		return nil
   690  	}
   691  	if plug == nil {
   692  		return fmt.Errorf("snap %q has no %q plug", connRef.PlugRef.Snap, connRef.PlugRef.Name)
   693  	}
   694  	if slot == nil {
   695  		return fmt.Errorf("snap %q has no %q slot", connRef.SlotRef.Snap, connRef.SlotRef.Name)
   696  	}
   697  
   698  	_, err = m.repo.Connect(connRef, nil, oldconn.DynamicPlugAttrs, nil, oldconn.DynamicSlotAttrs, nil)
   699  	if err != nil {
   700  		return err
   701  	}
   702  
   703  	slotOpts := confinementOptions(slotSnapst.Flags)
   704  	if err := m.setupSnapSecurity(task, slot.Snap, slotOpts, perfTimings); err != nil {
   705  		return err
   706  	}
   707  	plugOpts := confinementOptions(plugSnapst.Flags)
   708  	if err := m.setupSnapSecurity(task, plug.Snap, plugOpts, perfTimings); err != nil {
   709  		return err
   710  	}
   711  
   712  	conns[connRef.ID()] = &oldconn
   713  	setConns(st, conns)
   714  
   715  	return nil
   716  }
   717  
   718  func (m *InterfaceManager) undoConnect(task *state.Task, _ *tomb.Tomb) error {
   719  	st := task.State()
   720  	st.Lock()
   721  	defer st.Unlock()
   722  
   723  	perfTimings := state.TimingsForTask(task)
   724  	defer perfTimings.Save(st)
   725  
   726  	plugRef, slotRef, err := getPlugAndSlotRefs(task)
   727  	if err != nil {
   728  		return err
   729  	}
   730  	connRef := interfaces.ConnRef{PlugRef: plugRef, SlotRef: slotRef}
   731  	conns, err := getConns(st)
   732  	if err != nil {
   733  		return err
   734  	}
   735  
   736  	var old connState
   737  	err = task.Get("old-conn", &old)
   738  	if err != nil && err != state.ErrNoState {
   739  		return err
   740  	}
   741  	if err == nil {
   742  		conns[connRef.ID()] = &old
   743  	} else {
   744  		delete(conns, connRef.ID())
   745  	}
   746  	setConns(st, conns)
   747  
   748  	if err := m.repo.Disconnect(connRef.PlugRef.Snap, connRef.PlugRef.Name, connRef.SlotRef.Snap, connRef.SlotRef.Name); err != nil {
   749  		return err
   750  	}
   751  
   752  	var delayedSetupProfiles bool
   753  	if err := task.Get("delayed-setup-profiles", &delayedSetupProfiles); err != nil && err != state.ErrNoState {
   754  		return err
   755  	}
   756  	if delayedSetupProfiles {
   757  		logger.Debugf("Connect undo handler: skipping setupSnapSecurity for snaps %q and %q", connRef.PlugRef.Snap, connRef.SlotRef.Snap)
   758  		return nil
   759  	}
   760  
   761  	plug := m.repo.Plug(connRef.PlugRef.Snap, connRef.PlugRef.Name)
   762  	if plug == nil {
   763  		return fmt.Errorf("internal error: snap %q has no %q plug", connRef.PlugRef.Snap, connRef.PlugRef.Name)
   764  	}
   765  	slot := m.repo.Slot(connRef.SlotRef.Snap, connRef.SlotRef.Name)
   766  	if slot == nil {
   767  		return fmt.Errorf("internal error: snap %q has no %q slot", connRef.SlotRef.Snap, connRef.SlotRef.Name)
   768  	}
   769  
   770  	var plugSnapst snapstate.SnapState
   771  	err = snapstate.Get(st, plugRef.Snap, &plugSnapst)
   772  	if err == state.ErrNoState {
   773  		return fmt.Errorf("internal error: snap %q is no longer available", plugRef.Snap)
   774  	}
   775  	if err != nil {
   776  		return err
   777  	}
   778  	var slotSnapst snapstate.SnapState
   779  	err = snapstate.Get(st, slotRef.Snap, &slotSnapst)
   780  	if err == state.ErrNoState {
   781  		return fmt.Errorf("internal error: snap %q is no longer available", slotRef.Snap)
   782  	}
   783  	if err != nil {
   784  		return err
   785  	}
   786  	slotOpts := confinementOptions(slotSnapst.Flags)
   787  	if err := m.setupSnapSecurity(task, slot.Snap, slotOpts, perfTimings); err != nil {
   788  		return err
   789  	}
   790  	plugOpts := confinementOptions(plugSnapst.Flags)
   791  	if err := m.setupSnapSecurity(task, plug.Snap, plugOpts, perfTimings); err != nil {
   792  		return err
   793  	}
   794  
   795  	return nil
   796  }
   797  
   798  // timeout for shared content retry
   799  var contentLinkRetryTimeout = 30 * time.Second
   800  
   801  // timeout for retrying hotplug-related tasks
   802  var hotplugRetryTimeout = 300 * time.Millisecond
   803  
   804  func obsoleteCorePhase2SetupProfiles(kind string, task *state.Task) (bool, error) {
   805  	if kind != "setup-profiles" {
   806  		return false, nil
   807  	}
   808  
   809  	var corePhase2 bool
   810  	if err := task.Get("core-phase-2", &corePhase2); err != nil && err != state.ErrNoState {
   811  		return false, err
   812  	}
   813  	return corePhase2, nil
   814  }
   815  
   816  func checkAutoconnectConflicts(st *state.State, autoconnectTask *state.Task, plugSnap, slotSnap string) error {
   817  	for _, task := range st.Tasks() {
   818  		if task.Status().Ready() {
   819  			continue
   820  		}
   821  
   822  		k := task.Kind()
   823  		if k == "connect" || k == "disconnect" {
   824  			// retry if we found another connect/disconnect affecting same snap; note we can only encounter
   825  			// connects/disconnects created by doAutoDisconnect / doAutoConnect here as manual interface ops
   826  			// are rejected by conflict check logic in snapstate.
   827  			plugRef, slotRef, err := getPlugAndSlotRefs(task)
   828  			if err != nil {
   829  				return err
   830  			}
   831  			if plugRef.Snap == plugSnap {
   832  				return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting plug snap %s, task %q", plugSnap, k)}
   833  			}
   834  			if slotRef.Snap == slotSnap {
   835  				return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting slot snap %s, task %q", slotSnap, k)}
   836  			}
   837  			continue
   838  		}
   839  
   840  		snapsup, err := snapstate.TaskSnapSetup(task)
   841  		// e.g. hook tasks don't have task snap setup
   842  		if err != nil {
   843  			continue
   844  		}
   845  
   846  		otherSnapName := snapsup.InstanceName()
   847  
   848  		// different snaps - no conflict
   849  		if otherSnapName != plugSnap && otherSnapName != slotSnap {
   850  			continue
   851  		}
   852  
   853  		// setup-profiles core-phase-2 is now no-op, we shouldn't
   854  		// conflict on it; note, old snapd would create this task even
   855  		// for regular snaps if installed with the dangerous flag.
   856  		obsoleteCorePhase2, err := obsoleteCorePhase2SetupProfiles(k, task)
   857  		if err != nil {
   858  			return err
   859  		}
   860  		if obsoleteCorePhase2 {
   861  			continue
   862  		}
   863  
   864  		// other snap that affects us because of plug or slot
   865  		if k == "unlink-snap" || k == "link-snap" || k == "setup-profiles" || k == "discard-snap" {
   866  			// discard-snap is scheduled as part of garbage collection during refresh, if multiple revsions are already installed.
   867  			// this revision check avoids conflict with own discard tasks created as part of install/refresh.
   868  			if k == "discard-snap" && autoconnectTask.Change() != nil && autoconnectTask.Change().ID() == task.Change().ID() {
   869  				continue
   870  			}
   871  			// if snap is getting removed, we will retry but the snap will be gone and auto-connect becomes no-op
   872  			// if snap is getting installed/refreshed - temporary conflict, retry later
   873  			return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting snap %s with task %q", otherSnapName, k)}
   874  		}
   875  	}
   876  	return nil
   877  }
   878  
   879  func checkDisconnectConflicts(st *state.State, disconnectingSnap, plugSnap, slotSnap string) error {
   880  	for _, task := range st.Tasks() {
   881  		if task.Status().Ready() {
   882  			continue
   883  		}
   884  
   885  		k := task.Kind()
   886  		if k == "connect" || k == "disconnect" {
   887  			// retry if we found another connect/disconnect affecting same snap; note we can only encounter
   888  			// connects/disconnects created by doAutoDisconnect / doAutoConnect here as manual interface ops
   889  			// are rejected by conflict check logic in snapstate.
   890  			plugRef, slotRef, err := getPlugAndSlotRefs(task)
   891  			if err != nil {
   892  				return err
   893  			}
   894  			if plugRef.Snap == plugSnap || slotRef.Snap == slotSnap {
   895  				return &state.Retry{After: connectRetryTimeout}
   896  			}
   897  			continue
   898  		}
   899  
   900  		snapsup, err := snapstate.TaskSnapSetup(task)
   901  		// e.g. hook tasks don't have task snap setup
   902  		if err != nil {
   903  			continue
   904  		}
   905  
   906  		otherSnapName := snapsup.InstanceName()
   907  
   908  		// different snaps - no conflict
   909  		if otherSnapName != plugSnap && otherSnapName != slotSnap {
   910  			continue
   911  		}
   912  
   913  		// another task related to same snap op (unrelated op would be blocked by snapstate conflict logic)
   914  		if otherSnapName == disconnectingSnap {
   915  			continue
   916  		}
   917  
   918  		// note, don't care about unlink-snap for the opposite end. This relies
   919  		// on the fact that auto-disconnect will create conflicting "disconnect" tasks that
   920  		// we will retry with the logic above.
   921  		if k == "link-snap" || k == "setup-profiles" {
   922  			// other snap is getting installed/refreshed - temporary conflict
   923  			return &state.Retry{After: connectRetryTimeout}
   924  		}
   925  	}
   926  	return nil
   927  }
   928  
   929  func checkHotplugDisconnectConflicts(st *state.State, plugSnap, slotSnap string) error {
   930  	for _, task := range st.Tasks() {
   931  		if task.Status().Ready() {
   932  			continue
   933  		}
   934  
   935  		k := task.Kind()
   936  		if k == "connect" || k == "disconnect" {
   937  			plugRef, slotRef, err := getPlugAndSlotRefs(task)
   938  			if err != nil {
   939  				return err
   940  			}
   941  			if plugRef.Snap == plugSnap {
   942  				return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting plug snap %s, task %q", plugSnap, k)}
   943  			}
   944  			if slotRef.Snap == slotSnap {
   945  				return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting slot snap %s, task %q", slotSnap, k)}
   946  			}
   947  			continue
   948  		}
   949  
   950  		snapsup, err := snapstate.TaskSnapSetup(task)
   951  		// e.g. hook tasks don't have task snap setup
   952  		if err != nil {
   953  			continue
   954  		}
   955  		otherSnapName := snapsup.InstanceName()
   956  
   957  		// different snaps - no conflict
   958  		if otherSnapName != plugSnap && otherSnapName != slotSnap {
   959  			continue
   960  		}
   961  
   962  		if k == "link-snap" || k == "setup-profiles" || k == "unlink-snap" {
   963  			// other snap is getting installed/refreshed/removed - temporary conflict
   964  			return &state.Retry{After: connectRetryTimeout, Reason: fmt.Sprintf("conflicting snap %s with task %q", otherSnapName, k)}
   965  		}
   966  	}
   967  	return nil
   968  }
   969  
   970  // inSameChangeWaitChains returns true if there is a wait chain so
   971  // that `startT` is run before `searchT` in the same state.Change.
   972  func inSameChangeWaitChain(startT, searchT *state.Task) bool {
   973  	// Trivial case, tasks in different changes (they could in theory
   974  	// still have cross-change waits but we don't do these today).
   975  	// In this case, return quickly.
   976  	if startT.Change() != searchT.Change() {
   977  		return false
   978  	}
   979  	// Do a recursive check if its in the same change
   980  	return waitChainSearch(startT, searchT)
   981  }
   982  
   983  func waitChainSearch(startT, searchT *state.Task) bool {
   984  	for _, cand := range startT.HaltTasks() {
   985  		if cand == searchT {
   986  			return true
   987  		}
   988  		if waitChainSearch(cand, searchT) {
   989  			return true
   990  		}
   991  	}
   992  
   993  	return false
   994  }
   995  
   996  // batchConnectTasks creates connect tasks and interface hooks for
   997  // conns and sets their wait chain with regard to the setupProfiles
   998  // task.
   999  //
  1000  // The tasks are chained so that: - prepare-plug-, prepare-slot- and
  1001  // connect tasks are all executed before setup-profiles -
  1002  // connect-plug-, connect-slot- are all executed after setup-profiles.
  1003  // The "delayed-setup-profiles" flag is set on the connect tasks to
  1004  // indicate that doConnect handler should not set security backends up
  1005  // because this will be done later by the setup-profiles task.
  1006  func batchConnectTasks(st *state.State, snapsup *snapstate.SnapSetup, conns map[string]*interfaces.ConnRef, connOpts map[string]*connectOpts) (*state.TaskSet, error) {
  1007  	setupProfiles := st.NewTask("setup-profiles", fmt.Sprintf(i18n.G("Setup snap %q (%s) security profiles for auto-connections"), snapsup.InstanceName(), snapsup.Revision()))
  1008  	setupProfiles.Set("snap-setup", snapsup)
  1009  
  1010  	ts := state.NewTaskSet()
  1011  	for connID, conn := range conns {
  1012  		var opts connectOpts
  1013  		if providedOpts := connOpts[connID]; providedOpts != nil {
  1014  			opts = *providedOpts
  1015  		} else {
  1016  			// default
  1017  			opts.AutoConnect = true
  1018  		}
  1019  		opts.DelayedSetupProfiles = true
  1020  		connectTs, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, opts)
  1021  		if err != nil {
  1022  			return nil, fmt.Errorf("internal error: auto-connect of %q failed: %s", conn, err)
  1023  		}
  1024  
  1025  		// setup-profiles needs to wait for the main "connect" task
  1026  		connectTask, _ := connectTs.Edge(ConnectTaskEdge)
  1027  		if connectTask == nil {
  1028  			return nil, fmt.Errorf("internal error: no 'connect' task found for %q", conn)
  1029  		}
  1030  		setupProfiles.WaitFor(connectTask)
  1031  
  1032  		// setup-profiles must be run before the task that marks the end of connect-plug- and connect-slot- hooks
  1033  		afterConnectTask, _ := connectTs.Edge(AfterConnectHooksEdge)
  1034  		if afterConnectTask != nil {
  1035  			afterConnectTask.WaitFor(setupProfiles)
  1036  		}
  1037  		ts.AddAll(connectTs)
  1038  	}
  1039  	if len(ts.Tasks()) > 0 {
  1040  		ts.AddTask(setupProfiles)
  1041  	}
  1042  	return ts, nil
  1043  }
  1044  
  1045  func filterForSlot(slot *snap.SlotInfo) func(candSlots []*snap.SlotInfo) []*snap.SlotInfo {
  1046  	return func(candSlots []*snap.SlotInfo) []*snap.SlotInfo {
  1047  		for _, candSlot := range candSlots {
  1048  			if candSlot.String() == slot.String() {
  1049  				return []*snap.SlotInfo{slot}
  1050  			}
  1051  		}
  1052  		return nil
  1053  	}
  1054  }
  1055  
  1056  // doAutoConnect creates task(s) to connect the given snap to viable candidates.
  1057  func (m *InterfaceManager) doAutoConnect(task *state.Task, _ *tomb.Tomb) error {
  1058  	st := task.State()
  1059  	st.Lock()
  1060  	defer st.Unlock()
  1061  
  1062  	snapsup, err := snapstate.TaskSnapSetup(task)
  1063  	if err != nil {
  1064  		return err
  1065  	}
  1066  
  1067  	deviceCtx, err := snapstate.DeviceCtx(st, task, nil)
  1068  	if err != nil {
  1069  		return err
  1070  	}
  1071  
  1072  	conns, err := getConns(st)
  1073  	if err != nil {
  1074  		return err
  1075  	}
  1076  
  1077  	// The previous task (link-snap) may have triggered a restart,
  1078  	// if this is the case we can only proceed once the restart
  1079  	// has happened or we may not have all the interfaces of the
  1080  	// new core/base snap.
  1081  	if err := snapstate.WaitRestart(task, snapsup); err != nil {
  1082  		return err
  1083  	}
  1084  
  1085  	snapName := snapsup.InstanceName()
  1086  
  1087  	autochecker, err := newAutoConnectChecker(st, task, m.repo, deviceCtx)
  1088  	if err != nil {
  1089  		return err
  1090  	}
  1091  
  1092  	gadgectConnect := newGadgetConnect(st, task, m.repo, snapName, deviceCtx)
  1093  
  1094  	// wait for auto-install, started by prerequisites code, for
  1095  	// the default-providers of content ifaces so we can
  1096  	// auto-connect to them; snapstate prerequisites does a bit
  1097  	// more filtering than this so defaultProviders here can
  1098  	// contain some more snaps; should not be an issue in practice
  1099  	// given the check below checks for same chain and we don't
  1100  	// forcefully wait for defaultProviders; we just retry for
  1101  	// things in the intersection between defaultProviders here and
  1102  	// snaps with not ready link-snap|setup-profiles tasks
  1103  	defaultProviders := snap.DefaultContentProviders(m.repo.Plugs(snapName))
  1104  	for _, chg := range st.Changes() {
  1105  		if chg.Status().Ready() {
  1106  			continue
  1107  		}
  1108  		for _, t := range chg.Tasks() {
  1109  			if t.Status().Ready() {
  1110  				continue
  1111  			}
  1112  			if t.Kind() != "link-snap" && t.Kind() != "setup-profiles" {
  1113  				continue
  1114  			}
  1115  			if snapsup, err := snapstate.TaskSnapSetup(t); err == nil {
  1116  				// Only retry if the task that installs the
  1117  				// content provider is not waiting for us
  1118  				// (or this will just hang forever).
  1119  				_, ok := defaultProviders[snapsup.InstanceName()]
  1120  				if ok && !inSameChangeWaitChain(task, t) {
  1121  					return &state.Retry{After: contentLinkRetryTimeout}
  1122  				}
  1123  			}
  1124  		}
  1125  	}
  1126  
  1127  	plugs := m.repo.Plugs(snapName)
  1128  	slots := m.repo.Slots(snapName)
  1129  	newconns := make(map[string]*interfaces.ConnRef, len(plugs)+len(slots))
  1130  	var connOpts map[string]*connectOpts
  1131  
  1132  	conflictError := func(retry *state.Retry, err error) error {
  1133  		if retry != nil {
  1134  			task.Logf("Waiting for conflicting change in progress: %s", retry.Reason)
  1135  			return retry // will retry
  1136  		}
  1137  		return fmt.Errorf("auto-connect conflict check failed: %v", err)
  1138  	}
  1139  
  1140  	// Consider gadget connections, we want to remember them in
  1141  	// any case with "by-gadget" set, so they should be processed
  1142  	// before the auto-connection ones.
  1143  	if err := gadgectConnect.addGadgetConnections(newconns, conns, conflictError); err != nil {
  1144  		return err
  1145  	}
  1146  	if len(newconns) > 0 {
  1147  		connOpts = make(map[string]*connectOpts, len(newconns))
  1148  		byGadgetOpts := &connectOpts{AutoConnect: true, ByGadget: true}
  1149  		for key := range newconns {
  1150  			connOpts[key] = byGadgetOpts
  1151  		}
  1152  	}
  1153  
  1154  	// Auto-connect all the plugs
  1155  	cannotAutoConnectLog := func(plug *snap.PlugInfo, candRefs []string) string {
  1156  		return fmt.Sprintf("cannot auto-connect plug %s, candidates found: %s", plug, strings.Join(candRefs, ", "))
  1157  	}
  1158  	if err := autochecker.addAutoConnections(newconns, plugs, nil, conns, cannotAutoConnectLog, conflictError); err != nil {
  1159  		return err
  1160  	}
  1161  	// Auto-connect all the slots
  1162  	for _, slot := range slots {
  1163  		candidates := m.repo.AutoConnectCandidatePlugs(snapName, slot.Name, autochecker.check)
  1164  		if len(candidates) == 0 {
  1165  			continue
  1166  		}
  1167  
  1168  		cannotAutoConnectLog := func(plug *snap.PlugInfo, candRefs []string) string {
  1169  			return fmt.Sprintf("cannot auto-connect slot %s to plug %s, candidates found: %s", slot, plug, strings.Join(candRefs, ", "))
  1170  		}
  1171  		if err := autochecker.addAutoConnections(newconns, candidates, filterForSlot(slot), conns, cannotAutoConnectLog, conflictError); err != nil {
  1172  			return err
  1173  		}
  1174  	}
  1175  
  1176  	autots, err := batchConnectTasks(st, snapsup, newconns, connOpts)
  1177  	if err != nil {
  1178  		return err
  1179  	}
  1180  
  1181  	if m.preseed && len(autots.Tasks()) > 2 { // connect task and setup-profiles tasks are 2 tasks, other tasks are hooks
  1182  		// TODO: in preseed mode make interface hooks wait for mark-preseeded task.
  1183  		for _, t := range autots.Tasks() {
  1184  			if t.Kind() == "run-hook" {
  1185  				return fmt.Errorf("interface hooks are not yet supported in preseed mode")
  1186  			}
  1187  		}
  1188  	}
  1189  
  1190  	if len(autots.Tasks()) > 0 {
  1191  		snapstate.InjectTasks(task, autots)
  1192  
  1193  		st.EnsureBefore(0)
  1194  	}
  1195  
  1196  	task.SetStatus(state.DoneStatus)
  1197  	return nil
  1198  }
  1199  
  1200  // doAutoDisconnect creates tasks for disconnecting all interfaces of a snap and running its interface hooks.
  1201  func (m *InterfaceManager) doAutoDisconnect(task *state.Task, _ *tomb.Tomb) error {
  1202  	st := task.State()
  1203  	st.Lock()
  1204  	defer st.Unlock()
  1205  
  1206  	snapsup, err := snapstate.TaskSnapSetup(task)
  1207  	if err != nil {
  1208  		return err
  1209  	}
  1210  
  1211  	snapName := snapsup.InstanceName()
  1212  	connections, err := m.repo.Connections(snapName)
  1213  	if err != nil {
  1214  		return err
  1215  	}
  1216  
  1217  	// check for conflicts on all connections first before creating disconnect hooks
  1218  	for _, connRef := range connections {
  1219  		if err := checkDisconnectConflicts(st, snapName, connRef.PlugRef.Snap, connRef.SlotRef.Snap); err != nil {
  1220  			if _, retry := err.(*state.Retry); retry {
  1221  				logger.Debugf("disconnecting interfaces of snap %q will be retried because of %q - %q conflict", snapName, connRef.PlugRef.Snap, connRef.SlotRef.Snap)
  1222  				task.Logf("Waiting for conflicting change in progress...")
  1223  				return err // will retry
  1224  			}
  1225  			return fmt.Errorf("cannot check conflicts when disconnecting interfaces: %s", err)
  1226  		}
  1227  	}
  1228  
  1229  	hookTasks := state.NewTaskSet()
  1230  	for _, connRef := range connections {
  1231  		conn, err := m.repo.Connection(connRef)
  1232  		if err != nil {
  1233  			break
  1234  		}
  1235  		// "auto-disconnect" flag indicates it's a disconnect triggered as part of snap removal, in which
  1236  		// case we want to skip the logic of marking auto-connections as 'undesired' and instead just remove
  1237  		// them so they can be automatically connected if the snap is installed again.
  1238  		ts, err := disconnectTasks(st, conn, disconnectOpts{AutoDisconnect: true})
  1239  		if err != nil {
  1240  			return err
  1241  		}
  1242  		hookTasks.AddAll(ts)
  1243  	}
  1244  
  1245  	snapstate.InjectTasks(task, hookTasks)
  1246  
  1247  	// make sure that we add tasks and mark this task done in the same atomic write, otherwise there is a risk of re-adding tasks again
  1248  	task.SetStatus(state.DoneStatus)
  1249  	return nil
  1250  }
  1251  
  1252  func (m *InterfaceManager) undoAutoConnect(task *state.Task, _ *tomb.Tomb) error {
  1253  	// TODO Introduce disconnection hooks, and run them here as well to give a chance
  1254  	// for the snap to undo whatever it did when the connection was established.
  1255  	return nil
  1256  }
  1257  
  1258  // transitionConnectionsCoreMigration will transition all connections
  1259  // from oldName to newName. Note that this is only useful when you
  1260  // know that newName supports everything that oldName supports,
  1261  // otherwise you will be in a world of pain.
  1262  func (m *InterfaceManager) transitionConnectionsCoreMigration(st *state.State, oldName, newName string) error {
  1263  	// transition over, ubuntu-core has only slots
  1264  	conns, err := getConns(st)
  1265  	if err != nil {
  1266  		return err
  1267  	}
  1268  
  1269  	for id := range conns {
  1270  		connRef, err := interfaces.ParseConnRef(id)
  1271  		if err != nil {
  1272  			return err
  1273  		}
  1274  		if connRef.SlotRef.Snap == oldName {
  1275  			connRef.SlotRef.Snap = newName
  1276  			conns[connRef.ID()] = conns[id]
  1277  			delete(conns, id)
  1278  		}
  1279  	}
  1280  	setConns(st, conns)
  1281  
  1282  	// After migrating connections in state, remove them from repo so they stay in sync and we don't
  1283  	// attempt to run disconnects on when the old core gets removed as part of the transition.
  1284  	if err := m.removeConnections(oldName); err != nil {
  1285  		return err
  1286  	}
  1287  
  1288  	// The reloadConnections() just modifies the repository object, it
  1289  	// has no effect on the running system, i.e. no security profiles
  1290  	// on disk are rewritten. This is ok because core/ubuntu-core have
  1291  	// exactly the same profiles and nothing in the generated policies
  1292  	// has the core snap-name encoded.
  1293  	if _, err := m.reloadConnections(newName); err != nil {
  1294  		return err
  1295  	}
  1296  
  1297  	return nil
  1298  }
  1299  
  1300  func (m *InterfaceManager) doTransitionUbuntuCore(t *state.Task, _ *tomb.Tomb) error {
  1301  	st := t.State()
  1302  	st.Lock()
  1303  	defer st.Unlock()
  1304  
  1305  	var oldName, newName string
  1306  	if err := t.Get("old-name", &oldName); err != nil {
  1307  		return err
  1308  	}
  1309  	if err := t.Get("new-name", &newName); err != nil {
  1310  		return err
  1311  	}
  1312  
  1313  	return m.transitionConnectionsCoreMigration(st, oldName, newName)
  1314  }
  1315  
  1316  func (m *InterfaceManager) undoTransitionUbuntuCore(t *state.Task, _ *tomb.Tomb) error {
  1317  	st := t.State()
  1318  	st.Lock()
  1319  	defer st.Unlock()
  1320  
  1321  	// symmetrical to the "do" method, just reverse them again
  1322  	var oldName, newName string
  1323  	if err := t.Get("old-name", &oldName); err != nil {
  1324  		return err
  1325  	}
  1326  	if err := t.Get("new-name", &newName); err != nil {
  1327  		return err
  1328  	}
  1329  
  1330  	return m.transitionConnectionsCoreMigration(st, newName, oldName)
  1331  }
  1332  
  1333  // doHotplugConnect creates task(s) to (re)create old connections or auto-connect viable slots in response to hotplug "add" event.
  1334  func (m *InterfaceManager) doHotplugConnect(task *state.Task, _ *tomb.Tomb) error {
  1335  	st := task.State()
  1336  	st.Lock()
  1337  	defer st.Unlock()
  1338  
  1339  	deviceCtx, err := snapstate.DeviceCtx(st, task, nil)
  1340  	if err != nil {
  1341  		return err
  1342  	}
  1343  
  1344  	conns, err := getConns(st)
  1345  	if err != nil {
  1346  		return err
  1347  	}
  1348  
  1349  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1350  	if err != nil {
  1351  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1352  	}
  1353  
  1354  	slot, err := m.repo.SlotForHotplugKey(ifaceName, hotplugKey)
  1355  	if err != nil {
  1356  		return err
  1357  	}
  1358  	if slot == nil {
  1359  		return fmt.Errorf("cannot find hotplug slot for interface %s and hotplug key %q", ifaceName, hotplugKey)
  1360  	}
  1361  
  1362  	// find old connections for slots of this device - note we can't ask the repository since we need
  1363  	// to recreate old connections that are only remembered in the state.
  1364  	connsForDevice := findConnsForHotplugKey(conns, ifaceName, hotplugKey)
  1365  
  1366  	conflictError := func(retry *state.Retry, err error) error {
  1367  		if retry != nil {
  1368  			task.Logf("hotplug connect will be retried: %s", retry.Reason)
  1369  			return retry // will retry
  1370  		}
  1371  		return fmt.Errorf("hotplug-connect conflict check failed: %v", err)
  1372  	}
  1373  
  1374  	// find old connections to recreate
  1375  	var recreate []*interfaces.ConnRef
  1376  	for _, id := range connsForDevice {
  1377  		conn := conns[id]
  1378  		// device was not unplugged, this is the case if snapd is restarted and we enumerate devices.
  1379  		// note, the situation where device was not unplugged but has changed is handled
  1380  		// by hotlugDeviceAdded handler - updateDevice.
  1381  		if !conn.HotplugGone || conn.Undesired {
  1382  			continue
  1383  		}
  1384  
  1385  		// the device was unplugged while connected, so it had disconnect hooks run; recreate the connection
  1386  		connRef, err := interfaces.ParseConnRef(id)
  1387  		if err != nil {
  1388  			return err
  1389  		}
  1390  
  1391  		if err := checkAutoconnectConflicts(st, task, connRef.PlugRef.Snap, connRef.SlotRef.Snap); err != nil {
  1392  			retry, _ := err.(*state.Retry)
  1393  			return conflictError(retry, err)
  1394  		}
  1395  		recreate = append(recreate, connRef)
  1396  	}
  1397  
  1398  	// find new auto-connections
  1399  	autochecker, err := newAutoConnectChecker(st, task, m.repo, deviceCtx)
  1400  	if err != nil {
  1401  		return err
  1402  	}
  1403  
  1404  	instanceName := slot.Snap.InstanceName()
  1405  	candidates := m.repo.AutoConnectCandidatePlugs(instanceName, slot.Name, autochecker.check)
  1406  
  1407  	newconns := make(map[string]*interfaces.ConnRef, len(candidates))
  1408  	// Auto-connect the plugs
  1409  	cannotAutoConnectLog := func(plug *snap.PlugInfo, candRefs []string) string {
  1410  		return fmt.Sprintf("cannot auto-connect hotplug slot %s to plug %s, candidates found: %s", slot, plug, strings.Join(candRefs, ", "))
  1411  	}
  1412  	if err := autochecker.addAutoConnections(newconns, candidates, filterForSlot(slot), conns, cannotAutoConnectLog, conflictError); err != nil {
  1413  		return err
  1414  	}
  1415  
  1416  	if len(recreate) == 0 && len(newconns) == 0 {
  1417  		return nil
  1418  	}
  1419  
  1420  	// Create connect tasks and interface hooks for old connections
  1421  	connectTs := state.NewTaskSet()
  1422  	for _, conn := range recreate {
  1423  		wasAutoconnected := conns[conn.ID()].Auto
  1424  		ts, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, connectOpts{AutoConnect: wasAutoconnected})
  1425  		if err != nil {
  1426  			return fmt.Errorf("internal error: connect of %q failed: %s", conn, err)
  1427  		}
  1428  		connectTs.AddAll(ts)
  1429  	}
  1430  	// Create connect tasks and interface hooks for new auto-connections
  1431  	for _, conn := range newconns {
  1432  		ts, err := connect(st, conn.PlugRef.Snap, conn.PlugRef.Name, conn.SlotRef.Snap, conn.SlotRef.Name, connectOpts{AutoConnect: true})
  1433  		if err != nil {
  1434  			return fmt.Errorf("internal error: auto-connect of %q failed: %s", conn, err)
  1435  		}
  1436  		connectTs.AddAll(ts)
  1437  	}
  1438  
  1439  	if len(connectTs.Tasks()) > 0 {
  1440  		snapstate.InjectTasks(task, connectTs)
  1441  		st.EnsureBefore(0)
  1442  	}
  1443  
  1444  	// make sure that we add tasks and mark this task done in the same atomic write, otherwise there is a risk of re-adding tasks again
  1445  	task.SetStatus(state.DoneStatus)
  1446  
  1447  	return nil
  1448  }
  1449  
  1450  // doHotplugUpdateSlot updates static attributes of a hotplug slot for given device.
  1451  func (m *InterfaceManager) doHotplugUpdateSlot(task *state.Task, _ *tomb.Tomb) error {
  1452  	st := task.State()
  1453  	st.Lock()
  1454  	defer st.Unlock()
  1455  
  1456  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1457  	if err != nil {
  1458  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1459  	}
  1460  
  1461  	var attrs map[string]interface{}
  1462  	if err := task.Get("slot-attrs", &attrs); err != nil {
  1463  		return fmt.Errorf("internal error: cannot get slot-attrs attribute for device %s, interface %s: %s", hotplugKey, ifaceName, err)
  1464  	}
  1465  
  1466  	stateSlots, err := getHotplugSlots(st)
  1467  	if err != nil {
  1468  		return fmt.Errorf("internal error: cannot obtain hotplug slots: %v", err)
  1469  	}
  1470  
  1471  	slot, err := m.repo.UpdateHotplugSlotAttrs(ifaceName, hotplugKey, attrs)
  1472  	if err != nil {
  1473  		return err
  1474  	}
  1475  
  1476  	if slotSpec, ok := stateSlots[slot.Name]; ok {
  1477  		slotSpec.StaticAttrs = attrs
  1478  		stateSlots[slot.Name] = slotSpec
  1479  		setHotplugSlots(st, stateSlots)
  1480  	} else {
  1481  		return fmt.Errorf("internal error: cannot find slot %s for device %q", slot.Name, hotplugKey)
  1482  	}
  1483  
  1484  	return nil
  1485  }
  1486  
  1487  // doHotplugRemoveSlot removes hotplug slot for given device from the repository in response to udev "remove" event.
  1488  // This task must necessarily be run after all affected slot gets disconnected in the repo.
  1489  func (m *InterfaceManager) doHotplugRemoveSlot(task *state.Task, _ *tomb.Tomb) error {
  1490  	st := task.State()
  1491  	st.Lock()
  1492  	defer st.Unlock()
  1493  
  1494  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1495  	if err != nil {
  1496  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1497  	}
  1498  
  1499  	slot, err := m.repo.SlotForHotplugKey(ifaceName, hotplugKey)
  1500  	if err != nil {
  1501  		return fmt.Errorf("internal error: cannot determine slots: %v", err)
  1502  	}
  1503  	if slot != nil {
  1504  		if err := m.repo.RemoveSlot(slot.Snap.InstanceName(), slot.Name); err != nil {
  1505  			return fmt.Errorf("cannot remove hotplug slot: %v", err)
  1506  		}
  1507  	}
  1508  
  1509  	stateSlots, err := getHotplugSlots(st)
  1510  	if err != nil {
  1511  		return fmt.Errorf("internal error: cannot obtain hotplug slots: %v", err)
  1512  	}
  1513  
  1514  	// remove the slot from hotplug-slots in the state as long as there are no connections referencing it,
  1515  	// including connection with hotplug-gone=true.
  1516  	slotDef := findHotplugSlot(stateSlots, ifaceName, hotplugKey)
  1517  	if slotDef == nil {
  1518  		return fmt.Errorf("internal error: cannot find hotplug slot for interface %s, hotplug key %q", ifaceName, hotplugKey)
  1519  	}
  1520  	conns, err := getConns(st)
  1521  	if err != nil {
  1522  		return err
  1523  	}
  1524  	for _, conn := range conns {
  1525  		if conn.Interface == slotDef.Interface && conn.HotplugKey == slotDef.HotplugKey {
  1526  			// there is a connection referencing this slot, do not remove it, only mark as "gone"
  1527  			slotDef.HotplugGone = true
  1528  			stateSlots[slotDef.Name] = slotDef
  1529  			setHotplugSlots(st, stateSlots)
  1530  			return nil
  1531  		}
  1532  	}
  1533  	delete(stateSlots, slotDef.Name)
  1534  	setHotplugSlots(st, stateSlots)
  1535  
  1536  	return nil
  1537  }
  1538  
  1539  // doHotplugDisconnect creates task(s) to disconnect connections and remove slots in response to hotplug "remove" event.
  1540  func (m *InterfaceManager) doHotplugDisconnect(task *state.Task, _ *tomb.Tomb) error {
  1541  	st := task.State()
  1542  	st.Lock()
  1543  	defer st.Unlock()
  1544  
  1545  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1546  	if err != nil {
  1547  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1548  	}
  1549  
  1550  	connections, err := m.repo.ConnectionsForHotplugKey(ifaceName, hotplugKey)
  1551  	if err != nil {
  1552  		return err
  1553  	}
  1554  	if len(connections) == 0 {
  1555  		return nil
  1556  	}
  1557  
  1558  	// check for conflicts on all connections first before creating disconnect hooks
  1559  	for _, connRef := range connections {
  1560  		if err := checkHotplugDisconnectConflicts(st, connRef.PlugRef.Snap, connRef.SlotRef.Snap); err != nil {
  1561  			if retry, ok := err.(*state.Retry); ok {
  1562  				task.Logf("Waiting for conflicting change in progress: %s", retry.Reason)
  1563  				return err // will retry
  1564  			}
  1565  			return fmt.Errorf("cannot check conflicts when disconnecting interfaces: %s", err)
  1566  		}
  1567  	}
  1568  
  1569  	dts := state.NewTaskSet()
  1570  	for _, connRef := range connections {
  1571  		conn, err := m.repo.Connection(connRef)
  1572  		if err != nil {
  1573  			// this should never happen since we get all connections from the repo
  1574  			return fmt.Errorf("internal error: cannot get connection %q: %s", connRef, err)
  1575  		}
  1576  		// "by-hotplug" flag indicates it's a disconnect triggered as part of hotplug removal.
  1577  		ts, err := disconnectTasks(st, conn, disconnectOpts{ByHotplug: true})
  1578  		if err != nil {
  1579  			return fmt.Errorf("internal error: cannot create disconnect tasks: %s", err)
  1580  		}
  1581  		dts.AddAll(ts)
  1582  	}
  1583  
  1584  	snapstate.InjectTasks(task, dts)
  1585  	st.EnsureBefore(0)
  1586  
  1587  	// make sure that we add tasks and mark this task done in the same atomic write, otherwise there is a risk of re-adding tasks again
  1588  	task.SetStatus(state.DoneStatus)
  1589  
  1590  	return nil
  1591  }
  1592  
  1593  func (m *InterfaceManager) doHotplugAddSlot(task *state.Task, _ *tomb.Tomb) error {
  1594  	st := task.State()
  1595  	st.Lock()
  1596  	defer st.Unlock()
  1597  
  1598  	systemSnap, err := systemSnapInfo(st)
  1599  	if err != nil {
  1600  		return fmt.Errorf("system snap not available")
  1601  	}
  1602  
  1603  	ifaceName, hotplugKey, err := getHotplugAttrs(task)
  1604  	if err != nil {
  1605  		return fmt.Errorf("internal error: cannot get hotplug task attributes: %s", err)
  1606  	}
  1607  
  1608  	var proposedSlot hotplug.ProposedSlot
  1609  	if err := task.Get("proposed-slot", &proposedSlot); err != nil {
  1610  		return fmt.Errorf("internal error: cannot get proposed hotplug slot from task attributes: %s", err)
  1611  	}
  1612  	var devinfo hotplug.HotplugDeviceInfo
  1613  	if err := task.Get("device-info", &devinfo); err != nil {
  1614  		return fmt.Errorf("internal error: cannot get hotplug device info from task attributes: %s", err)
  1615  	}
  1616  
  1617  	stateSlots, err := getHotplugSlots(st)
  1618  	if err != nil {
  1619  		return fmt.Errorf("internal error obtaining hotplug slots: %v", err.Error())
  1620  	}
  1621  
  1622  	iface := m.repo.Interface(ifaceName)
  1623  	if iface == nil {
  1624  		return fmt.Errorf("internal error: cannot find interface %s", ifaceName)
  1625  	}
  1626  
  1627  	slot := findHotplugSlot(stateSlots, ifaceName, hotplugKey)
  1628  
  1629  	// if we know this slot already, restore / update it.
  1630  	if slot != nil {
  1631  		if slot.HotplugGone {
  1632  			// hotplugGone means the device was unplugged, so its disconnect hooks were run and can now
  1633  			// simply recreate the slot with potentially new attributes, and old connections will be re-created
  1634  			newSlot := &snap.SlotInfo{
  1635  				Name:       slot.Name,
  1636  				Label:      proposedSlot.Label,
  1637  				Snap:       systemSnap,
  1638  				Interface:  ifaceName,
  1639  				Attrs:      proposedSlot.Attrs,
  1640  				HotplugKey: hotplugKey,
  1641  			}
  1642  			return addHotplugSlot(st, m.repo, stateSlots, iface, newSlot)
  1643  		}
  1644  
  1645  		// else - not gone, restored already by reloadConnections, but may need updating.
  1646  		if !reflect.DeepEqual(proposedSlot.Attrs, slot.StaticAttrs) {
  1647  			ts := updateDevice(st, iface.Name(), hotplugKey, proposedSlot.Attrs)
  1648  			snapstate.InjectTasks(task, ts)
  1649  			st.EnsureBefore(0)
  1650  			task.SetStatus(state.DoneStatus)
  1651  		} // else - nothing to do
  1652  		return nil
  1653  	}
  1654  
  1655  	// New slot.
  1656  	slotName := hotplugSlotName(hotplugKey, systemSnap.InstanceName(), proposedSlot.Name, iface.Name(), &devinfo, m.repo, stateSlots)
  1657  	newSlot := &snap.SlotInfo{
  1658  		Name:       slotName,
  1659  		Label:      proposedSlot.Label,
  1660  		Snap:       systemSnap,
  1661  		Interface:  iface.Name(),
  1662  		Attrs:      proposedSlot.Attrs,
  1663  		HotplugKey: hotplugKey,
  1664  	}
  1665  	return addHotplugSlot(st, m.repo, stateSlots, iface, newSlot)
  1666  }
  1667  
  1668  // doHotplugSeqWait returns Retry error if there is another change for same hotplug key and a lower sequence number.
  1669  // Sequence numbers control the order of execution of hotplug-related changes, which would otherwise be executed in
  1670  // arbitrary order by task runner, leading to unexpected results if multiple events for same device are in flight
  1671  // (e.g. plugging, followed by immediate unplugging, or snapd restart with pending hotplug changes).
  1672  // The handler expects "hotplug-key" and "hotplug-seq" values set on own and other hotplug-related changes.
  1673  func (m *InterfaceManager) doHotplugSeqWait(task *state.Task, _ *tomb.Tomb) error {
  1674  	st := task.State()
  1675  	st.Lock()
  1676  	defer st.Unlock()
  1677  
  1678  	chg := task.Change()
  1679  	if chg == nil || !isHotplugChange(chg) {
  1680  		return fmt.Errorf("internal error: task %q not in a hotplug change", task.Kind())
  1681  	}
  1682  
  1683  	seq, hotplugKey, err := getHotplugChangeAttrs(chg)
  1684  	if err != nil {
  1685  		return err
  1686  	}
  1687  
  1688  	for _, otherChg := range st.Changes() {
  1689  		if otherChg.Status().Ready() || otherChg.ID() == chg.ID() {
  1690  			continue
  1691  		}
  1692  
  1693  		// only inspect hotplug changes
  1694  		if !isHotplugChange(otherChg) {
  1695  			continue
  1696  		}
  1697  
  1698  		otherSeq, otherKey, err := getHotplugChangeAttrs(otherChg)
  1699  		if err != nil {
  1700  			return err
  1701  		}
  1702  
  1703  		// conflict with retry if there another change affecting same device and has lower sequence number
  1704  		if hotplugKey == otherKey && otherSeq < seq {
  1705  			task.Logf("Waiting processing of earlier hotplug event change %q affecting device with hotplug key %q", otherChg.Kind(), hotplugKey)
  1706  			// TODO: consider introducing a new task that runs last and does EnsureBefore(0) for hotplug changes
  1707  			return &state.Retry{After: hotplugRetryTimeout}
  1708  		}
  1709  	}
  1710  
  1711  	// no conflicting change for same hotplug key found
  1712  	return nil
  1713  }