github.com/amnezia-vpn/amnezia-wg@v0.1.8/device/device.go (about)

     1  /* SPDX-License-Identifier: MIT
     2   *
     3   * Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
     4   */
     5  
     6  package device
     7  
     8  import (
     9  	"runtime"
    10  	"sync"
    11  	"sync/atomic"
    12  	"time"
    13  
    14  	"github.com/amnezia-vpn/amnezia-wg/conn"
    15  	"github.com/amnezia-vpn/amnezia-wg/ipc"
    16  	"github.com/amnezia-vpn/amnezia-wg/ratelimiter"
    17  	"github.com/amnezia-vpn/amnezia-wg/rwcancel"
    18  	"github.com/amnezia-vpn/amnezia-wg/tun"
    19  	"github.com/tevino/abool/v2"
    20  )
    21  
    22  type Device struct {
    23  	state struct {
    24  		// state holds the device's state. It is accessed atomically.
    25  		// Use the device.deviceState method to read it.
    26  		// device.deviceState does not acquire the mutex, so it captures only a snapshot.
    27  		// During state transitions, the state variable is updated before the device itself.
    28  		// The state is thus either the current state of the device or
    29  		// the intended future state of the device.
    30  		// For example, while executing a call to Up, state will be deviceStateUp.
    31  		// There is no guarantee that that intended future state of the device
    32  		// will become the actual state; Up can fail.
    33  		// The device can also change state multiple times between time of check and time of use.
    34  		// Unsynchronized uses of state must therefore be advisory/best-effort only.
    35  		state atomic.Uint32 // actually a deviceState, but typed uint32 for convenience
    36  		// stopping blocks until all inputs to Device have been closed.
    37  		stopping sync.WaitGroup
    38  		// mu protects state changes.
    39  		sync.Mutex
    40  	}
    41  
    42  	net struct {
    43  		stopping sync.WaitGroup
    44  		sync.RWMutex
    45  		bind          conn.Bind // bind interface
    46  		netlinkCancel *rwcancel.RWCancel
    47  		port          uint16 // listening port
    48  		fwmark        uint32 // mark value (0 = disabled)
    49  		brokenRoaming bool
    50  	}
    51  
    52  	staticIdentity struct {
    53  		sync.RWMutex
    54  		privateKey NoisePrivateKey
    55  		publicKey  NoisePublicKey
    56  	}
    57  
    58  	peers struct {
    59  		sync.RWMutex // protects keyMap
    60  		keyMap       map[NoisePublicKey]*Peer
    61  	}
    62  
    63  	rate struct {
    64  		underLoadUntil atomic.Int64
    65  		limiter        ratelimiter.Ratelimiter
    66  	}
    67  
    68  	allowedips    AllowedIPs
    69  	indexTable    IndexTable
    70  	cookieChecker CookieChecker
    71  
    72  	pool struct {
    73  		inboundElementsContainer  *WaitPool
    74  		outboundElementsContainer *WaitPool
    75  		messageBuffers            *WaitPool
    76  		inboundElements           *WaitPool
    77  		outboundElements          *WaitPool
    78  	}
    79  
    80  	queue struct {
    81  		encryption *outboundQueue
    82  		decryption *inboundQueue
    83  		handshake  *handshakeQueue
    84  	}
    85  
    86  	tun struct {
    87  		device tun.Device
    88  		mtu    atomic.Int32
    89  	}
    90  
    91  	ipcMutex sync.RWMutex
    92  	closed   chan struct{}
    93  	log      *Logger
    94  
    95  	isASecOn abool.AtomicBool
    96  	aSecMux  sync.RWMutex
    97  	aSecCfg  aSecCfgType
    98  }
    99  
   100  type aSecCfgType struct {
   101  	isSet                      bool
   102  	junkPacketCount            int
   103  	junkPacketMinSize          int
   104  	junkPacketMaxSize          int
   105  	initPacketJunkSize         int
   106  	responsePacketJunkSize     int
   107  	initPacketMagicHeader      uint32
   108  	responsePacketMagicHeader  uint32
   109  	underloadPacketMagicHeader uint32
   110  	transportPacketMagicHeader uint32
   111  }
   112  
   113  // deviceState represents the state of a Device.
   114  // There are three states: down, up, closed.
   115  // Transitions:
   116  //
   117  //	down -----+
   118  //	  ↑↓      ↓
   119  //	  up -> closed
   120  type deviceState uint32
   121  
   122  //go:generate go run golang.org/x/tools/cmd/stringer -type deviceState -trimprefix=deviceState
   123  const (
   124  	deviceStateDown deviceState = iota
   125  	deviceStateUp
   126  	deviceStateClosed
   127  )
   128  
   129  // deviceState returns device.state.state as a deviceState
   130  // See those docs for how to interpret this value.
   131  func (device *Device) deviceState() deviceState {
   132  	return deviceState(device.state.state.Load())
   133  }
   134  
   135  // isClosed reports whether the device is closed (or is closing).
   136  // See device.state.state comments for how to interpret this value.
   137  func (device *Device) isClosed() bool {
   138  	return device.deviceState() == deviceStateClosed
   139  }
   140  
   141  // isUp reports whether the device is up (or is attempting to come up).
   142  // See device.state.state comments for how to interpret this value.
   143  func (device *Device) isUp() bool {
   144  	return device.deviceState() == deviceStateUp
   145  }
   146  
   147  // Must hold device.peers.Lock()
   148  func removePeerLocked(device *Device, peer *Peer, key NoisePublicKey) {
   149  	// stop routing and processing of packets
   150  	device.allowedips.RemoveByPeer(peer)
   151  	peer.Stop()
   152  
   153  	// remove from peer map
   154  	delete(device.peers.keyMap, key)
   155  }
   156  
   157  // changeState attempts to change the device state to match want.
   158  func (device *Device) changeState(want deviceState) (err error) {
   159  	device.state.Lock()
   160  	defer device.state.Unlock()
   161  	old := device.deviceState()
   162  	if old == deviceStateClosed {
   163  		// once closed, always closed
   164  		device.log.Verbosef("Interface closed, ignored requested state %s", want)
   165  		return nil
   166  	}
   167  	switch want {
   168  	case old:
   169  		return nil
   170  	case deviceStateUp:
   171  		device.state.state.Store(uint32(deviceStateUp))
   172  		err = device.upLocked()
   173  		if err == nil {
   174  			break
   175  		}
   176  		fallthrough // up failed; bring the device all the way back down
   177  	case deviceStateDown:
   178  		device.state.state.Store(uint32(deviceStateDown))
   179  		errDown := device.downLocked()
   180  		if err == nil {
   181  			err = errDown
   182  		}
   183  	}
   184  	device.log.Verbosef(
   185  		"Interface state was %s, requested %s, now %s", old, want, device.deviceState())
   186  	return
   187  }
   188  
   189  // upLocked attempts to bring the device up and reports whether it succeeded.
   190  // The caller must hold device.state.mu and is responsible for updating device.state.state.
   191  func (device *Device) upLocked() error {
   192  	if err := device.BindUpdate(); err != nil {
   193  		device.log.Errorf("Unable to update bind: %v", err)
   194  		return err
   195  	}
   196  
   197  	// The IPC set operation waits for peers to be created before calling Start() on them,
   198  	// so if there's a concurrent IPC set request happening, we should wait for it to complete.
   199  	device.ipcMutex.Lock()
   200  	defer device.ipcMutex.Unlock()
   201  
   202  	device.peers.RLock()
   203  	for _, peer := range device.peers.keyMap {
   204  		peer.Start()
   205  		if peer.persistentKeepaliveInterval.Load() > 0 {
   206  			peer.SendKeepalive()
   207  		}
   208  	}
   209  	device.peers.RUnlock()
   210  	return nil
   211  }
   212  
   213  // downLocked attempts to bring the device down.
   214  // The caller must hold device.state.mu and is responsible for updating device.state.state.
   215  func (device *Device) downLocked() error {
   216  	err := device.BindClose()
   217  	if err != nil {
   218  		device.log.Errorf("Bind close failed: %v", err)
   219  	}
   220  
   221  	device.peers.RLock()
   222  	for _, peer := range device.peers.keyMap {
   223  		peer.Stop()
   224  	}
   225  	device.peers.RUnlock()
   226  	return err
   227  }
   228  
   229  func (device *Device) Up() error {
   230  	return device.changeState(deviceStateUp)
   231  }
   232  
   233  func (device *Device) Down() error {
   234  	return device.changeState(deviceStateDown)
   235  }
   236  
   237  func (device *Device) IsUnderLoad() bool {
   238  	// check if currently under load
   239  	now := time.Now()
   240  	underLoad := len(device.queue.handshake.c) >= QueueHandshakeSize/8
   241  	if underLoad {
   242  		device.rate.underLoadUntil.Store(now.Add(UnderLoadAfterTime).UnixNano())
   243  		return true
   244  	}
   245  	// check if recently under load
   246  	return device.rate.underLoadUntil.Load() > now.UnixNano()
   247  }
   248  
   249  func (device *Device) SetPrivateKey(sk NoisePrivateKey) error {
   250  	// lock required resources
   251  
   252  	device.staticIdentity.Lock()
   253  	defer device.staticIdentity.Unlock()
   254  
   255  	if sk.Equals(device.staticIdentity.privateKey) {
   256  		return nil
   257  	}
   258  
   259  	device.peers.Lock()
   260  	defer device.peers.Unlock()
   261  
   262  	lockedPeers := make([]*Peer, 0, len(device.peers.keyMap))
   263  	for _, peer := range device.peers.keyMap {
   264  		peer.handshake.mutex.RLock()
   265  		lockedPeers = append(lockedPeers, peer)
   266  	}
   267  
   268  	// remove peers with matching public keys
   269  
   270  	publicKey := sk.publicKey()
   271  	for key, peer := range device.peers.keyMap {
   272  		if peer.handshake.remoteStatic.Equals(publicKey) {
   273  			peer.handshake.mutex.RUnlock()
   274  			removePeerLocked(device, peer, key)
   275  			peer.handshake.mutex.RLock()
   276  		}
   277  	}
   278  
   279  	// update key material
   280  
   281  	device.staticIdentity.privateKey = sk
   282  	device.staticIdentity.publicKey = publicKey
   283  	device.cookieChecker.Init(publicKey)
   284  
   285  	// do static-static DH pre-computations
   286  
   287  	expiredPeers := make([]*Peer, 0, len(device.peers.keyMap))
   288  	for _, peer := range device.peers.keyMap {
   289  		handshake := &peer.handshake
   290  		handshake.precomputedStaticStatic, _ = device.staticIdentity.privateKey.sharedSecret(handshake.remoteStatic)
   291  		expiredPeers = append(expiredPeers, peer)
   292  	}
   293  
   294  	for _, peer := range lockedPeers {
   295  		peer.handshake.mutex.RUnlock()
   296  	}
   297  	for _, peer := range expiredPeers {
   298  		peer.ExpireCurrentKeypairs()
   299  	}
   300  
   301  	return nil
   302  }
   303  
   304  func NewDevice(tunDevice tun.Device, bind conn.Bind, logger *Logger) *Device {
   305  	device := new(Device)
   306  	device.state.state.Store(uint32(deviceStateDown))
   307  	device.closed = make(chan struct{})
   308  	device.log = logger
   309  	device.net.bind = bind
   310  	device.tun.device = tunDevice
   311  	mtu, err := device.tun.device.MTU()
   312  	if err != nil {
   313  		device.log.Errorf("Trouble determining MTU, assuming default: %v", err)
   314  		mtu = DefaultMTU
   315  	}
   316  	device.tun.mtu.Store(int32(mtu))
   317  	device.peers.keyMap = make(map[NoisePublicKey]*Peer)
   318  	device.rate.limiter.Init()
   319  	device.indexTable.Init()
   320  
   321  	device.PopulatePools()
   322  
   323  	// create queues
   324  
   325  	device.queue.handshake = newHandshakeQueue()
   326  	device.queue.encryption = newOutboundQueue()
   327  	device.queue.decryption = newInboundQueue()
   328  
   329  	// start workers
   330  
   331  	cpus := runtime.NumCPU()
   332  	device.state.stopping.Wait()
   333  	device.queue.encryption.wg.Add(cpus) // One for each RoutineHandshake
   334  	for i := 0; i < cpus; i++ {
   335  		go device.RoutineEncryption(i + 1)
   336  		go device.RoutineDecryption(i + 1)
   337  		go device.RoutineHandshake(i + 1)
   338  	}
   339  
   340  	device.state.stopping.Add(1)      // RoutineReadFromTUN
   341  	device.queue.encryption.wg.Add(1) // RoutineReadFromTUN
   342  	go device.RoutineReadFromTUN()
   343  	go device.RoutineTUNEventReader()
   344  
   345  	return device
   346  }
   347  
   348  // BatchSize returns the BatchSize for the device as a whole which is the max of
   349  // the bind batch size and the tun batch size. The batch size reported by device
   350  // is the size used to construct memory pools, and is the allowed batch size for
   351  // the lifetime of the device.
   352  func (device *Device) BatchSize() int {
   353  	size := device.net.bind.BatchSize()
   354  	dSize := device.tun.device.BatchSize()
   355  	if size < dSize {
   356  		size = dSize
   357  	}
   358  	return size
   359  }
   360  
   361  func (device *Device) LookupPeer(pk NoisePublicKey) *Peer {
   362  	device.peers.RLock()
   363  	defer device.peers.RUnlock()
   364  
   365  	return device.peers.keyMap[pk]
   366  }
   367  
   368  func (device *Device) RemovePeer(key NoisePublicKey) {
   369  	device.peers.Lock()
   370  	defer device.peers.Unlock()
   371  	// stop peer and remove from routing
   372  
   373  	peer, ok := device.peers.keyMap[key]
   374  	if ok {
   375  		removePeerLocked(device, peer, key)
   376  	}
   377  }
   378  
   379  func (device *Device) RemoveAllPeers() {
   380  	device.peers.Lock()
   381  	defer device.peers.Unlock()
   382  
   383  	for key, peer := range device.peers.keyMap {
   384  		removePeerLocked(device, peer, key)
   385  	}
   386  
   387  	device.peers.keyMap = make(map[NoisePublicKey]*Peer)
   388  }
   389  
   390  func (device *Device) Close() {
   391  	device.state.Lock()
   392  	defer device.state.Unlock()
   393  	device.ipcMutex.Lock()
   394  	defer device.ipcMutex.Unlock()
   395  	if device.isClosed() {
   396  		return
   397  	}
   398  	device.state.state.Store(uint32(deviceStateClosed))
   399  	device.log.Verbosef("Device closing")
   400  
   401  	device.tun.device.Close()
   402  	device.downLocked()
   403  
   404  	// Remove peers before closing queues,
   405  	// because peers assume that queues are active.
   406  	device.RemoveAllPeers()
   407  
   408  	// We kept a reference to the encryption and decryption queues,
   409  	// in case we started any new peers that might write to them.
   410  	// No new peers are coming; we are done with these queues.
   411  	device.queue.encryption.wg.Done()
   412  	device.queue.decryption.wg.Done()
   413  	device.queue.handshake.wg.Done()
   414  	device.state.stopping.Wait()
   415  
   416  	device.rate.limiter.Close()
   417  
   418  	device.log.Verbosef("Device closed")
   419  	close(device.closed)
   420  }
   421  
   422  func (device *Device) Wait() chan struct{} {
   423  	return device.closed
   424  }
   425  
   426  func (device *Device) SendKeepalivesToPeersWithCurrentKeypair() {
   427  	if !device.isUp() {
   428  		return
   429  	}
   430  
   431  	device.peers.RLock()
   432  	for _, peer := range device.peers.keyMap {
   433  		peer.keypairs.RLock()
   434  		sendKeepalive := peer.keypairs.current != nil && !peer.keypairs.current.created.Add(RejectAfterTime).Before(time.Now())
   435  		peer.keypairs.RUnlock()
   436  		if sendKeepalive {
   437  			peer.SendKeepalive()
   438  		}
   439  	}
   440  	device.peers.RUnlock()
   441  }
   442  
   443  // closeBindLocked closes the device's net.bind.
   444  // The caller must hold the net mutex.
   445  func closeBindLocked(device *Device) error {
   446  	var err error
   447  	netc := &device.net
   448  	if netc.netlinkCancel != nil {
   449  		netc.netlinkCancel.Cancel()
   450  	}
   451  	if netc.bind != nil {
   452  		err = netc.bind.Close()
   453  	}
   454  	netc.stopping.Wait()
   455  	return err
   456  }
   457  
   458  func (device *Device) Bind() conn.Bind {
   459  	device.net.Lock()
   460  	defer device.net.Unlock()
   461  	return device.net.bind
   462  }
   463  
   464  func (device *Device) BindSetMark(mark uint32) error {
   465  	device.net.Lock()
   466  	defer device.net.Unlock()
   467  
   468  	// check if modified
   469  	if device.net.fwmark == mark {
   470  		return nil
   471  	}
   472  
   473  	// update fwmark on existing bind
   474  	device.net.fwmark = mark
   475  	if device.isUp() && device.net.bind != nil {
   476  		if err := device.net.bind.SetMark(mark); err != nil {
   477  			return err
   478  		}
   479  	}
   480  
   481  	// clear cached source addresses
   482  	device.peers.RLock()
   483  	for _, peer := range device.peers.keyMap {
   484  		peer.markEndpointSrcForClearing()
   485  	}
   486  	device.peers.RUnlock()
   487  
   488  	return nil
   489  }
   490  
   491  func (device *Device) BindUpdate() error {
   492  	device.net.Lock()
   493  	defer device.net.Unlock()
   494  
   495  	// close existing sockets
   496  	if err := closeBindLocked(device); err != nil {
   497  		return err
   498  	}
   499  
   500  	// open new sockets
   501  	if !device.isUp() {
   502  		return nil
   503  	}
   504  
   505  	// bind to new port
   506  	var err error
   507  	var recvFns []conn.ReceiveFunc
   508  	netc := &device.net
   509  
   510  	recvFns, netc.port, err = netc.bind.Open(netc.port)
   511  	if err != nil {
   512  		netc.port = 0
   513  		return err
   514  	}
   515  
   516  	netc.netlinkCancel, err = device.startRouteListener(netc.bind)
   517  	if err != nil {
   518  		netc.bind.Close()
   519  		netc.port = 0
   520  		return err
   521  	}
   522  
   523  	// set fwmark
   524  	if netc.fwmark != 0 {
   525  		err = netc.bind.SetMark(netc.fwmark)
   526  		if err != nil {
   527  			return err
   528  		}
   529  	}
   530  
   531  	// clear cached source addresses
   532  	device.peers.RLock()
   533  	for _, peer := range device.peers.keyMap {
   534  		peer.markEndpointSrcForClearing()
   535  	}
   536  	device.peers.RUnlock()
   537  
   538  	// start receiving routines
   539  	device.net.stopping.Add(len(recvFns))
   540  	device.queue.decryption.wg.Add(len(recvFns)) // each RoutineReceiveIncoming goroutine writes to device.queue.decryption
   541  	device.queue.handshake.wg.Add(len(recvFns))  // each RoutineReceiveIncoming goroutine writes to device.queue.handshake
   542  	batchSize := netc.bind.BatchSize()
   543  	for _, fn := range recvFns {
   544  		go device.RoutineReceiveIncoming(batchSize, fn)
   545  	}
   546  
   547  	device.log.Verbosef("UDP bind has been updated")
   548  	return nil
   549  }
   550  
   551  func (device *Device) BindClose() error {
   552  	device.net.Lock()
   553  	err := closeBindLocked(device)
   554  	device.net.Unlock()
   555  	return err
   556  }
   557  func (device *Device) isAdvancedSecurityOn() bool {
   558  	return device.isASecOn.IsSet()
   559  }
   560  
   561  func (device *Device) handlePostConfig(tempASecCfg *aSecCfgType) (err error) {
   562  
   563  	if !tempASecCfg.isSet {
   564  		return err
   565  	}
   566  
   567  	isASecOn := false
   568  	device.aSecMux.Lock()
   569  	if tempASecCfg.junkPacketCount < 0 {
   570  		err = ipcErrorf(
   571  			ipc.IpcErrorInvalid,
   572  			"JunkPacketCount should be non negative",
   573  		)
   574  	}
   575  	device.aSecCfg.junkPacketCount = tempASecCfg.junkPacketCount
   576  	if tempASecCfg.junkPacketCount != 0 {
   577  		isASecOn = true
   578  	}
   579  
   580  	device.aSecCfg.junkPacketMinSize = tempASecCfg.junkPacketMinSize
   581  	if tempASecCfg.junkPacketMinSize != 0 {
   582  		isASecOn = true
   583  	}
   584  
   585  	if device.aSecCfg.junkPacketCount > 0 &&
   586  		tempASecCfg.junkPacketMaxSize == tempASecCfg.junkPacketMinSize {
   587  
   588  		tempASecCfg.junkPacketMaxSize++ // to make rand gen work
   589  	}
   590  
   591  	if tempASecCfg.junkPacketMaxSize >= MaxSegmentSize {
   592  		device.aSecCfg.junkPacketMinSize = 0
   593  		device.aSecCfg.junkPacketMaxSize = 1
   594  		if err != nil {
   595  			err = ipcErrorf(
   596  				ipc.IpcErrorInvalid,
   597  				"JunkPacketMaxSize: %d; should be smaller than maxSegmentSize: %d; %w",
   598  				tempASecCfg.junkPacketMaxSize,
   599  				MaxSegmentSize,
   600  				err,
   601  			)
   602  		} else {
   603  			err = ipcErrorf(
   604  				ipc.IpcErrorInvalid,
   605  				"JunkPacketMaxSize: %d; should be smaller than maxSegmentSize: %d",
   606  				tempASecCfg.junkPacketMaxSize,
   607  				MaxSegmentSize,
   608  			)
   609  		}
   610  	} else if tempASecCfg.junkPacketMaxSize < tempASecCfg.junkPacketMinSize {
   611  		if err != nil {
   612  			err = ipcErrorf(
   613  				ipc.IpcErrorInvalid,
   614  				"maxSize: %d; should be greater than minSize: %d; %w",
   615  				tempASecCfg.junkPacketMaxSize,
   616  				tempASecCfg.junkPacketMinSize,
   617  				err,
   618  			)
   619  		} else {
   620  			err = ipcErrorf(
   621  				ipc.IpcErrorInvalid,
   622  				"maxSize: %d; should be greater than minSize: %d",
   623  				tempASecCfg.junkPacketMaxSize,
   624  				tempASecCfg.junkPacketMinSize,
   625  			)
   626  		}
   627  	} else {
   628  		device.aSecCfg.junkPacketMaxSize = tempASecCfg.junkPacketMaxSize
   629  	}
   630  
   631  	if tempASecCfg.junkPacketMaxSize != 0 {
   632  		isASecOn = true
   633  	}
   634  
   635  	if MessageInitiationSize+tempASecCfg.initPacketJunkSize >= MaxSegmentSize {
   636  		if err != nil {
   637  			err = ipcErrorf(
   638  				ipc.IpcErrorInvalid,
   639  				`init header size(148) + junkSize:%d; should be smaller than maxSegmentSize: %d; %w`,
   640  				tempASecCfg.initPacketJunkSize,
   641  				MaxSegmentSize,
   642  				err,
   643  			)
   644  		} else {
   645  			err = ipcErrorf(
   646  				ipc.IpcErrorInvalid,
   647  				`init header size(148) + junkSize:%d; should be smaller than maxSegmentSize: %d`,
   648  				tempASecCfg.initPacketJunkSize,
   649  				MaxSegmentSize,
   650  			)
   651  		}
   652  	} else {
   653  		device.aSecCfg.initPacketJunkSize = tempASecCfg.initPacketJunkSize
   654  	}
   655  
   656  	if tempASecCfg.initPacketJunkSize != 0 {
   657  		isASecOn = true
   658  	}
   659  
   660  	if MessageResponseSize+tempASecCfg.responsePacketJunkSize >= MaxSegmentSize {
   661  		if err != nil {
   662  			err = ipcErrorf(
   663  				ipc.IpcErrorInvalid,
   664  				`response header size(92) + junkSize:%d; should be smaller than maxSegmentSize: %d; %w`,
   665  				tempASecCfg.responsePacketJunkSize,
   666  				MaxSegmentSize,
   667  				err,
   668  			)
   669  		} else {
   670  			err = ipcErrorf(
   671  				ipc.IpcErrorInvalid,
   672  				`response header size(92) + junkSize:%d; should be smaller than maxSegmentSize: %d`,
   673  				tempASecCfg.responsePacketJunkSize,
   674  				MaxSegmentSize,
   675  			)
   676  		}
   677  	} else {
   678  		device.aSecCfg.responsePacketJunkSize = tempASecCfg.responsePacketJunkSize
   679  	}
   680  
   681  	if tempASecCfg.responsePacketJunkSize != 0 {
   682  		isASecOn = true
   683  	}
   684  
   685  	if tempASecCfg.initPacketMagicHeader > 4 {
   686  		isASecOn = true
   687  		device.log.Verbosef("UAPI: Updating init_packet_magic_header")
   688  		device.aSecCfg.initPacketMagicHeader = tempASecCfg.initPacketMagicHeader
   689  		MessageInitiationType = device.aSecCfg.initPacketMagicHeader
   690  	} else {
   691  		device.log.Verbosef("UAPI: Using default init type")
   692  		MessageInitiationType = 1
   693  	}
   694  
   695  	if tempASecCfg.responsePacketMagicHeader > 4 {
   696  		isASecOn = true
   697  		device.log.Verbosef("UAPI: Updating response_packet_magic_header")
   698  		device.aSecCfg.responsePacketMagicHeader = tempASecCfg.responsePacketMagicHeader
   699  		MessageResponseType = device.aSecCfg.responsePacketMagicHeader
   700  	} else {
   701  		device.log.Verbosef("UAPI: Using default response type")
   702  		MessageResponseType = 2
   703  	}
   704  
   705  	if tempASecCfg.underloadPacketMagicHeader > 4 {
   706  		isASecOn = true
   707  		device.log.Verbosef("UAPI: Updating underload_packet_magic_header")
   708  		device.aSecCfg.underloadPacketMagicHeader = tempASecCfg.underloadPacketMagicHeader
   709  		MessageCookieReplyType = device.aSecCfg.underloadPacketMagicHeader
   710  	} else {
   711  		device.log.Verbosef("UAPI: Using default underload type")
   712  		MessageCookieReplyType = 3
   713  	}
   714  
   715  	if tempASecCfg.transportPacketMagicHeader > 4 {
   716  		isASecOn = true
   717  		device.log.Verbosef("UAPI: Updating transport_packet_magic_header")
   718  		device.aSecCfg.transportPacketMagicHeader = tempASecCfg.transportPacketMagicHeader
   719  		MessageTransportType = device.aSecCfg.transportPacketMagicHeader
   720  	} else {
   721  		device.log.Verbosef("UAPI: Using default transport type")
   722  		MessageTransportType = 4
   723  	}
   724  
   725  	isSameMap := map[uint32]bool{}
   726  	isSameMap[MessageInitiationType] = true
   727  	isSameMap[MessageResponseType] = true
   728  	isSameMap[MessageCookieReplyType] = true
   729  	isSameMap[MessageTransportType] = true
   730  
   731  	// size will be different if same values
   732  	if len(isSameMap) != 4 {
   733  		if err != nil {
   734  			err = ipcErrorf(
   735  				ipc.IpcErrorInvalid,
   736  				`magic headers should differ; got: init:%d; recv:%d; unde:%d; tran:%d; %w`,
   737  				MessageInitiationType,
   738  				MessageResponseType,
   739  				MessageCookieReplyType,
   740  				MessageTransportType,
   741  				err,
   742  			)
   743  		} else {
   744  			err = ipcErrorf(
   745  				ipc.IpcErrorInvalid,
   746  				`magic headers should differ; got: init:%d; recv:%d; unde:%d; tran:%d`,
   747  				MessageInitiationType,
   748  				MessageResponseType,
   749  				MessageCookieReplyType,
   750  				MessageTransportType,
   751  			)
   752  		}
   753  	}
   754  
   755  	newInitSize := MessageInitiationSize + device.aSecCfg.initPacketJunkSize
   756  	newResponseSize := MessageResponseSize + device.aSecCfg.responsePacketJunkSize
   757  
   758  	if newInitSize == newResponseSize {
   759  		if err != nil {
   760  			err = ipcErrorf(
   761  				ipc.IpcErrorInvalid,
   762  				`new init size:%d; and new response size:%d; should differ; %w`,
   763  				newInitSize,
   764  				newResponseSize,
   765  				err,
   766  			)
   767  		} else {
   768  			err = ipcErrorf(
   769  				ipc.IpcErrorInvalid,
   770  				`new init size:%d; and new response size:%d; should differ`,
   771  				newInitSize,
   772  				newResponseSize,
   773  			)
   774  		}
   775  	} else {
   776  		packetSizeToMsgType = map[int]uint32{
   777  			newInitSize:            MessageInitiationType,
   778  			newResponseSize:        MessageResponseType,
   779  			MessageCookieReplySize: MessageCookieReplyType,
   780  			MessageTransportSize:   MessageTransportType,
   781  		}
   782  
   783  		msgTypeToJunkSize = map[uint32]int{
   784  			MessageInitiationType:  device.aSecCfg.initPacketJunkSize,
   785  			MessageResponseType:    device.aSecCfg.responsePacketJunkSize,
   786  			MessageCookieReplyType: 0,
   787  			MessageTransportType:   0,
   788  		}
   789  	}
   790  
   791  	device.isASecOn.SetTo(isASecOn)
   792  	device.aSecMux.Unlock()
   793  
   794  	return err
   795  }