github.com/amnezia-vpn/amneziawg-go@v0.2.8/device/device.go (about)

     1  /* SPDX-License-Identifier: MIT
     2   *
     3   * Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
     4   */
     5  
     6  package device
     7  
     8  import (
     9  	"runtime"
    10  	"sync"
    11  	"sync/atomic"
    12  	"time"
    13  
    14  	"github.com/amnezia-vpn/amneziawg-go/conn"
    15  	"github.com/amnezia-vpn/amneziawg-go/ipc"
    16  	"github.com/amnezia-vpn/amneziawg-go/ratelimiter"
    17  	"github.com/amnezia-vpn/amneziawg-go/rwcancel"
    18  	"github.com/amnezia-vpn/amneziawg-go/tun"
    19  	"github.com/tevino/abool/v2"
    20  )
    21  
    22  type Device struct {
    23  	state struct {
    24  		// state holds the device's state. It is accessed atomically.
    25  		// Use the device.deviceState method to read it.
    26  		// device.deviceState does not acquire the mutex, so it captures only a snapshot.
    27  		// During state transitions, the state variable is updated before the device itself.
    28  		// The state is thus either the current state of the device or
    29  		// the intended future state of the device.
    30  		// For example, while executing a call to Up, state will be deviceStateUp.
    31  		// There is no guarantee that that intended future state of the device
    32  		// will become the actual state; Up can fail.
    33  		// The device can also change state multiple times between time of check and time of use.
    34  		// Unsynchronized uses of state must therefore be advisory/best-effort only.
    35  		state atomic.Uint32 // actually a deviceState, but typed uint32 for convenience
    36  		// stopping blocks until all inputs to Device have been closed.
    37  		stopping sync.WaitGroup
    38  		// mu protects state changes.
    39  		sync.Mutex
    40  	}
    41  
    42  	net struct {
    43  		stopping sync.WaitGroup
    44  		sync.RWMutex
    45  		bind          conn.Bind // bind interface
    46  		netlinkCancel *rwcancel.RWCancel
    47  		port          uint16 // listening port
    48  		fwmark        uint32 // mark value (0 = disabled)
    49  		brokenRoaming bool
    50  	}
    51  
    52  	staticIdentity struct {
    53  		sync.RWMutex
    54  		privateKey NoisePrivateKey
    55  		publicKey  NoisePublicKey
    56  	}
    57  
    58  	peers struct {
    59  		sync.RWMutex // protects keyMap
    60  		keyMap       map[NoisePublicKey]*Peer
    61  	}
    62  
    63  	rate struct {
    64  		underLoadUntil atomic.Int64
    65  		limiter        ratelimiter.Ratelimiter
    66  	}
    67  
    68  	allowedips    AllowedIPs
    69  	indexTable    IndexTable
    70  	cookieChecker CookieChecker
    71  
    72  	pool struct {
    73  		inboundElementsContainer  *WaitPool
    74  		outboundElementsContainer *WaitPool
    75  		messageBuffers            *WaitPool
    76  		inboundElements           *WaitPool
    77  		outboundElements          *WaitPool
    78  	}
    79  
    80  	queue struct {
    81  		encryption *outboundQueue
    82  		decryption *inboundQueue
    83  		handshake  *handshakeQueue
    84  	}
    85  
    86  	tun struct {
    87  		device tun.Device
    88  		mtu    atomic.Int32
    89  	}
    90  
    91  	ipcMutex sync.RWMutex
    92  	closed   chan struct{}
    93  	log      *Logger
    94  
    95  	isASecOn abool.AtomicBool
    96  	aSecMux  sync.RWMutex
    97  	aSecCfg  aSecCfgType
    98  }
    99  
   100  type aSecCfgType struct {
   101  	isSet                      bool
   102  	junkPacketCount            int
   103  	junkPacketMinSize          int
   104  	junkPacketMaxSize          int
   105  	initPacketJunkSize         int
   106  	responsePacketJunkSize     int
   107  	initPacketMagicHeader      uint32
   108  	responsePacketMagicHeader  uint32
   109  	underloadPacketMagicHeader uint32
   110  	transportPacketMagicHeader uint32
   111  }
   112  
   113  // deviceState represents the state of a Device.
   114  // There are three states: down, up, closed.
   115  // Transitions:
   116  //
   117  //	down -----+
   118  //	  ↑↓      ↓
   119  //	  up -> closed
   120  type deviceState uint32
   121  
   122  //go:generate go run golang.org/x/tools/cmd/stringer -type deviceState -trimprefix=deviceState
   123  const (
   124  	deviceStateDown deviceState = iota
   125  	deviceStateUp
   126  	deviceStateClosed
   127  )
   128  
   129  // deviceState returns device.state.state as a deviceState
   130  // See those docs for how to interpret this value.
   131  func (device *Device) deviceState() deviceState {
   132  	return deviceState(device.state.state.Load())
   133  }
   134  
   135  // isClosed reports whether the device is closed (or is closing).
   136  // See device.state.state comments for how to interpret this value.
   137  func (device *Device) isClosed() bool {
   138  	return device.deviceState() == deviceStateClosed
   139  }
   140  
   141  // isUp reports whether the device is up (or is attempting to come up).
   142  // See device.state.state comments for how to interpret this value.
   143  func (device *Device) isUp() bool {
   144  	return device.deviceState() == deviceStateUp
   145  }
   146  
   147  // Must hold device.peers.Lock()
   148  func removePeerLocked(device *Device, peer *Peer, key NoisePublicKey) {
   149  	// stop routing and processing of packets
   150  	device.allowedips.RemoveByPeer(peer)
   151  	peer.Stop()
   152  
   153  	// remove from peer map
   154  	delete(device.peers.keyMap, key)
   155  }
   156  
   157  // changeState attempts to change the device state to match want.
   158  func (device *Device) changeState(want deviceState) (err error) {
   159  	device.state.Lock()
   160  	defer device.state.Unlock()
   161  	old := device.deviceState()
   162  	if old == deviceStateClosed {
   163  		// once closed, always closed
   164  		device.log.Verbosef("Interface closed, ignored requested state %s", want)
   165  		return nil
   166  	}
   167  	switch want {
   168  	case old:
   169  		return nil
   170  	case deviceStateUp:
   171  		device.state.state.Store(uint32(deviceStateUp))
   172  		err = device.upLocked()
   173  		if err == nil {
   174  			break
   175  		}
   176  		fallthrough // up failed; bring the device all the way back down
   177  	case deviceStateDown:
   178  		device.state.state.Store(uint32(deviceStateDown))
   179  		errDown := device.downLocked()
   180  		if err == nil {
   181  			err = errDown
   182  		}
   183  	}
   184  	device.log.Verbosef(
   185  		"Interface state was %s, requested %s, now %s", old, want, device.deviceState())
   186  	return
   187  }
   188  
   189  // upLocked attempts to bring the device up and reports whether it succeeded.
   190  // The caller must hold device.state.mu and is responsible for updating device.state.state.
   191  func (device *Device) upLocked() error {
   192  	if err := device.BindUpdate(); err != nil {
   193  		device.log.Errorf("Unable to update bind: %v", err)
   194  		return err
   195  	}
   196  
   197  	// The IPC set operation waits for peers to be created before calling Start() on them,
   198  	// so if there's a concurrent IPC set request happening, we should wait for it to complete.
   199  	device.ipcMutex.Lock()
   200  	defer device.ipcMutex.Unlock()
   201  
   202  	device.peers.RLock()
   203  	for _, peer := range device.peers.keyMap {
   204  		peer.Start()
   205  		if peer.persistentKeepaliveInterval.Load() > 0 {
   206  			peer.SendKeepalive()
   207  		}
   208  	}
   209  	device.peers.RUnlock()
   210  	return nil
   211  }
   212  
   213  // downLocked attempts to bring the device down.
   214  // The caller must hold device.state.mu and is responsible for updating device.state.state.
   215  func (device *Device) downLocked() error {
   216  	err := device.BindClose()
   217  	if err != nil {
   218  		device.log.Errorf("Bind close failed: %v", err)
   219  	}
   220  
   221  	device.peers.RLock()
   222  	for _, peer := range device.peers.keyMap {
   223  		peer.Stop()
   224  	}
   225  	device.peers.RUnlock()
   226  	return err
   227  }
   228  
   229  func (device *Device) Up() error {
   230  	return device.changeState(deviceStateUp)
   231  }
   232  
   233  func (device *Device) Down() error {
   234  	return device.changeState(deviceStateDown)
   235  }
   236  
   237  func (device *Device) IsUnderLoad() bool {
   238  	// check if currently under load
   239  	now := time.Now()
   240  	underLoad := len(device.queue.handshake.c) >= QueueHandshakeSize/8
   241  	if underLoad {
   242  		device.rate.underLoadUntil.Store(now.Add(UnderLoadAfterTime).UnixNano())
   243  		return true
   244  	}
   245  	// check if recently under load
   246  	return device.rate.underLoadUntil.Load() > now.UnixNano()
   247  }
   248  
   249  func (device *Device) SetPrivateKey(sk NoisePrivateKey) error {
   250  	// lock required resources
   251  
   252  	device.staticIdentity.Lock()
   253  	defer device.staticIdentity.Unlock()
   254  
   255  	if sk.Equals(device.staticIdentity.privateKey) {
   256  		return nil
   257  	}
   258  
   259  	device.peers.Lock()
   260  	defer device.peers.Unlock()
   261  
   262  	lockedPeers := make([]*Peer, 0, len(device.peers.keyMap))
   263  	for _, peer := range device.peers.keyMap {
   264  		peer.handshake.mutex.RLock()
   265  		lockedPeers = append(lockedPeers, peer)
   266  	}
   267  
   268  	// remove peers with matching public keys
   269  
   270  	publicKey := sk.publicKey()
   271  	for key, peer := range device.peers.keyMap {
   272  		if peer.handshake.remoteStatic.Equals(publicKey) {
   273  			peer.handshake.mutex.RUnlock()
   274  			removePeerLocked(device, peer, key)
   275  			peer.handshake.mutex.RLock()
   276  		}
   277  	}
   278  
   279  	// update key material
   280  
   281  	device.staticIdentity.privateKey = sk
   282  	device.staticIdentity.publicKey = publicKey
   283  	device.cookieChecker.Init(publicKey)
   284  
   285  	// do static-static DH pre-computations
   286  
   287  	expiredPeers := make([]*Peer, 0, len(device.peers.keyMap))
   288  	for _, peer := range device.peers.keyMap {
   289  		handshake := &peer.handshake
   290  		handshake.precomputedStaticStatic, _ = device.staticIdentity.privateKey.sharedSecret(handshake.remoteStatic)
   291  		expiredPeers = append(expiredPeers, peer)
   292  	}
   293  
   294  	for _, peer := range lockedPeers {
   295  		peer.handshake.mutex.RUnlock()
   296  	}
   297  	for _, peer := range expiredPeers {
   298  		peer.ExpireCurrentKeypairs()
   299  	}
   300  
   301  	return nil
   302  }
   303  
   304  func NewDevice(tunDevice tun.Device, bind conn.Bind, logger *Logger) *Device {
   305  	device := new(Device)
   306  	device.state.state.Store(uint32(deviceStateDown))
   307  	device.closed = make(chan struct{})
   308  	device.log = logger
   309  	device.net.bind = bind
   310  	device.tun.device = tunDevice
   311  	mtu, err := device.tun.device.MTU()
   312  	if err != nil {
   313  		device.log.Errorf("Trouble determining MTU, assuming default: %v", err)
   314  		mtu = DefaultMTU
   315  	}
   316  	device.tun.mtu.Store(int32(mtu))
   317  	device.peers.keyMap = make(map[NoisePublicKey]*Peer)
   318  	device.rate.limiter.Init()
   319  	device.indexTable.Init()
   320  
   321  	device.PopulatePools()
   322  
   323  	// create queues
   324  
   325  	device.queue.handshake = newHandshakeQueue()
   326  	device.queue.encryption = newOutboundQueue()
   327  	device.queue.decryption = newInboundQueue()
   328  
   329  	// start workers
   330  
   331  	cpus := runtime.NumCPU()
   332  	device.state.stopping.Wait()
   333  	device.queue.encryption.wg.Add(cpus) // One for each RoutineHandshake
   334  	for i := 0; i < cpus; i++ {
   335  		go device.RoutineEncryption(i + 1)
   336  		go device.RoutineDecryption(i + 1)
   337  		go device.RoutineHandshake(i + 1)
   338  	}
   339  
   340  	device.state.stopping.Add(1)      // RoutineReadFromTUN
   341  	device.queue.encryption.wg.Add(1) // RoutineReadFromTUN
   342  	go device.RoutineReadFromTUN()
   343  	go device.RoutineTUNEventReader()
   344  
   345  	return device
   346  }
   347  
   348  // BatchSize returns the BatchSize for the device as a whole which is the max of
   349  // the bind batch size and the tun batch size. The batch size reported by device
   350  // is the size used to construct memory pools, and is the allowed batch size for
   351  // the lifetime of the device.
   352  func (device *Device) BatchSize() int {
   353  	size := device.net.bind.BatchSize()
   354  	dSize := device.tun.device.BatchSize()
   355  	if size < dSize {
   356  		size = dSize
   357  	}
   358  	return size
   359  }
   360  
   361  func (device *Device) LookupPeer(pk NoisePublicKey) *Peer {
   362  	device.peers.RLock()
   363  	defer device.peers.RUnlock()
   364  
   365  	return device.peers.keyMap[pk]
   366  }
   367  
   368  func (device *Device) RemovePeer(key NoisePublicKey) {
   369  	device.peers.Lock()
   370  	defer device.peers.Unlock()
   371  	// stop peer and remove from routing
   372  
   373  	peer, ok := device.peers.keyMap[key]
   374  	if ok {
   375  		removePeerLocked(device, peer, key)
   376  	}
   377  }
   378  
   379  func (device *Device) RemoveAllPeers() {
   380  	device.peers.Lock()
   381  	defer device.peers.Unlock()
   382  
   383  	for key, peer := range device.peers.keyMap {
   384  		removePeerLocked(device, peer, key)
   385  	}
   386  
   387  	device.peers.keyMap = make(map[NoisePublicKey]*Peer)
   388  }
   389  
   390  func (device *Device) Close() {
   391  	device.state.Lock()
   392  	defer device.state.Unlock()
   393  	device.ipcMutex.Lock()
   394  	defer device.ipcMutex.Unlock()
   395  	if device.isClosed() {
   396  		return
   397  	}
   398  	device.state.state.Store(uint32(deviceStateClosed))
   399  	device.log.Verbosef("Device closing")
   400  
   401  	device.tun.device.Close()
   402  	device.downLocked()
   403  
   404  	// Remove peers before closing queues,
   405  	// because peers assume that queues are active.
   406  	device.RemoveAllPeers()
   407  
   408  	// We kept a reference to the encryption and decryption queues,
   409  	// in case we started any new peers that might write to them.
   410  	// No new peers are coming; we are done with these queues.
   411  	device.queue.encryption.wg.Done()
   412  	device.queue.decryption.wg.Done()
   413  	device.queue.handshake.wg.Done()
   414  	device.state.stopping.Wait()
   415  
   416  	device.rate.limiter.Close()
   417  
   418  	device.resetProtocol()
   419  
   420  	device.log.Verbosef("Device closed")
   421  	close(device.closed)
   422  }
   423  
   424  func (device *Device) Wait() chan struct{} {
   425  	return device.closed
   426  }
   427  
   428  func (device *Device) SendKeepalivesToPeersWithCurrentKeypair() {
   429  	if !device.isUp() {
   430  		return
   431  	}
   432  
   433  	device.peers.RLock()
   434  	for _, peer := range device.peers.keyMap {
   435  		peer.keypairs.RLock()
   436  		sendKeepalive := peer.keypairs.current != nil && !peer.keypairs.current.created.Add(RejectAfterTime).Before(time.Now())
   437  		peer.keypairs.RUnlock()
   438  		if sendKeepalive {
   439  			peer.SendKeepalive()
   440  		}
   441  	}
   442  	device.peers.RUnlock()
   443  }
   444  
   445  // closeBindLocked closes the device's net.bind.
   446  // The caller must hold the net mutex.
   447  func closeBindLocked(device *Device) error {
   448  	var err error
   449  	netc := &device.net
   450  	if netc.netlinkCancel != nil {
   451  		netc.netlinkCancel.Cancel()
   452  	}
   453  	if netc.bind != nil {
   454  		err = netc.bind.Close()
   455  	}
   456  	netc.stopping.Wait()
   457  	return err
   458  }
   459  
   460  func (device *Device) Bind() conn.Bind {
   461  	device.net.Lock()
   462  	defer device.net.Unlock()
   463  	return device.net.bind
   464  }
   465  
   466  func (device *Device) BindSetMark(mark uint32) error {
   467  	device.net.Lock()
   468  	defer device.net.Unlock()
   469  
   470  	// check if modified
   471  	if device.net.fwmark == mark {
   472  		return nil
   473  	}
   474  
   475  	// update fwmark on existing bind
   476  	device.net.fwmark = mark
   477  	if device.isUp() && device.net.bind != nil {
   478  		if err := device.net.bind.SetMark(mark); err != nil {
   479  			return err
   480  		}
   481  	}
   482  
   483  	// clear cached source addresses
   484  	device.peers.RLock()
   485  	for _, peer := range device.peers.keyMap {
   486  		peer.markEndpointSrcForClearing()
   487  	}
   488  	device.peers.RUnlock()
   489  
   490  	return nil
   491  }
   492  
   493  func (device *Device) BindUpdate() error {
   494  	device.net.Lock()
   495  	defer device.net.Unlock()
   496  
   497  	// close existing sockets
   498  	if err := closeBindLocked(device); err != nil {
   499  		return err
   500  	}
   501  
   502  	// open new sockets
   503  	if !device.isUp() {
   504  		return nil
   505  	}
   506  
   507  	// bind to new port
   508  	var err error
   509  	var recvFns []conn.ReceiveFunc
   510  	netc := &device.net
   511  
   512  	recvFns, netc.port, err = netc.bind.Open(netc.port)
   513  	if err != nil {
   514  		netc.port = 0
   515  		return err
   516  	}
   517  
   518  	netc.netlinkCancel, err = device.startRouteListener(netc.bind)
   519  	if err != nil {
   520  		netc.bind.Close()
   521  		netc.port = 0
   522  		return err
   523  	}
   524  
   525  	// set fwmark
   526  	if netc.fwmark != 0 {
   527  		err = netc.bind.SetMark(netc.fwmark)
   528  		if err != nil {
   529  			return err
   530  		}
   531  	}
   532  
   533  	// clear cached source addresses
   534  	device.peers.RLock()
   535  	for _, peer := range device.peers.keyMap {
   536  		peer.markEndpointSrcForClearing()
   537  	}
   538  	device.peers.RUnlock()
   539  
   540  	// start receiving routines
   541  	device.net.stopping.Add(len(recvFns))
   542  	device.queue.decryption.wg.Add(len(recvFns)) // each RoutineReceiveIncoming goroutine writes to device.queue.decryption
   543  	device.queue.handshake.wg.Add(len(recvFns))  // each RoutineReceiveIncoming goroutine writes to device.queue.handshake
   544  	batchSize := netc.bind.BatchSize()
   545  	for _, fn := range recvFns {
   546  		go device.RoutineReceiveIncoming(batchSize, fn)
   547  	}
   548  
   549  	device.log.Verbosef("UDP bind has been updated")
   550  	device.log.Verbosef(netc.bind.GetOffloadInfo())
   551  	return nil
   552  }
   553  
   554  func (device *Device) BindClose() error {
   555  	device.net.Lock()
   556  	err := closeBindLocked(device)
   557  	device.net.Unlock()
   558  	return err
   559  }
   560  func (device *Device) isAdvancedSecurityOn() bool {
   561  	return device.isASecOn.IsSet()
   562  }
   563  
   564  func (device *Device) resetProtocol() {
   565  	// restore default message type values
   566  	MessageInitiationType = 1
   567  	MessageResponseType = 2
   568  	MessageCookieReplyType = 3
   569  	MessageTransportType = 4
   570  }
   571  
   572  func (device *Device) handlePostConfig(tempASecCfg *aSecCfgType) (err error) {
   573  
   574  	if !tempASecCfg.isSet {
   575  		return err
   576  	}
   577  
   578  	isASecOn := false
   579  	device.aSecMux.Lock()
   580  	if tempASecCfg.junkPacketCount < 0 {
   581  		err = ipcErrorf(
   582  			ipc.IpcErrorInvalid,
   583  			"JunkPacketCount should be non negative",
   584  		)
   585  	}
   586  	device.aSecCfg.junkPacketCount = tempASecCfg.junkPacketCount
   587  	if tempASecCfg.junkPacketCount != 0 {
   588  		isASecOn = true
   589  	}
   590  
   591  	device.aSecCfg.junkPacketMinSize = tempASecCfg.junkPacketMinSize
   592  	if tempASecCfg.junkPacketMinSize != 0 {
   593  		isASecOn = true
   594  	}
   595  
   596  	if device.aSecCfg.junkPacketCount > 0 &&
   597  		tempASecCfg.junkPacketMaxSize == tempASecCfg.junkPacketMinSize {
   598  
   599  		tempASecCfg.junkPacketMaxSize++ // to make rand gen work
   600  	}
   601  
   602  	if tempASecCfg.junkPacketMaxSize >= MaxSegmentSize {
   603  		device.aSecCfg.junkPacketMinSize = 0
   604  		device.aSecCfg.junkPacketMaxSize = 1
   605  		if err != nil {
   606  			err = ipcErrorf(
   607  				ipc.IpcErrorInvalid,
   608  				"JunkPacketMaxSize: %d; should be smaller than maxSegmentSize: %d; %w",
   609  				tempASecCfg.junkPacketMaxSize,
   610  				MaxSegmentSize,
   611  				err,
   612  			)
   613  		} else {
   614  			err = ipcErrorf(
   615  				ipc.IpcErrorInvalid,
   616  				"JunkPacketMaxSize: %d; should be smaller than maxSegmentSize: %d",
   617  				tempASecCfg.junkPacketMaxSize,
   618  				MaxSegmentSize,
   619  			)
   620  		}
   621  	} else if tempASecCfg.junkPacketMaxSize < tempASecCfg.junkPacketMinSize {
   622  		if err != nil {
   623  			err = ipcErrorf(
   624  				ipc.IpcErrorInvalid,
   625  				"maxSize: %d; should be greater than minSize: %d; %w",
   626  				tempASecCfg.junkPacketMaxSize,
   627  				tempASecCfg.junkPacketMinSize,
   628  				err,
   629  			)
   630  		} else {
   631  			err = ipcErrorf(
   632  				ipc.IpcErrorInvalid,
   633  				"maxSize: %d; should be greater than minSize: %d",
   634  				tempASecCfg.junkPacketMaxSize,
   635  				tempASecCfg.junkPacketMinSize,
   636  			)
   637  		}
   638  	} else {
   639  		device.aSecCfg.junkPacketMaxSize = tempASecCfg.junkPacketMaxSize
   640  	}
   641  
   642  	if tempASecCfg.junkPacketMaxSize != 0 {
   643  		isASecOn = true
   644  	}
   645  
   646  	if MessageInitiationSize+tempASecCfg.initPacketJunkSize >= MaxSegmentSize {
   647  		if err != nil {
   648  			err = ipcErrorf(
   649  				ipc.IpcErrorInvalid,
   650  				`init header size(148) + junkSize:%d; should be smaller than maxSegmentSize: %d; %w`,
   651  				tempASecCfg.initPacketJunkSize,
   652  				MaxSegmentSize,
   653  				err,
   654  			)
   655  		} else {
   656  			err = ipcErrorf(
   657  				ipc.IpcErrorInvalid,
   658  				`init header size(148) + junkSize:%d; should be smaller than maxSegmentSize: %d`,
   659  				tempASecCfg.initPacketJunkSize,
   660  				MaxSegmentSize,
   661  			)
   662  		}
   663  	} else {
   664  		device.aSecCfg.initPacketJunkSize = tempASecCfg.initPacketJunkSize
   665  	}
   666  
   667  	if tempASecCfg.initPacketJunkSize != 0 {
   668  		isASecOn = true
   669  	}
   670  
   671  	if MessageResponseSize+tempASecCfg.responsePacketJunkSize >= MaxSegmentSize {
   672  		if err != nil {
   673  			err = ipcErrorf(
   674  				ipc.IpcErrorInvalid,
   675  				`response header size(92) + junkSize:%d; should be smaller than maxSegmentSize: %d; %w`,
   676  				tempASecCfg.responsePacketJunkSize,
   677  				MaxSegmentSize,
   678  				err,
   679  			)
   680  		} else {
   681  			err = ipcErrorf(
   682  				ipc.IpcErrorInvalid,
   683  				`response header size(92) + junkSize:%d; should be smaller than maxSegmentSize: %d`,
   684  				tempASecCfg.responsePacketJunkSize,
   685  				MaxSegmentSize,
   686  			)
   687  		}
   688  	} else {
   689  		device.aSecCfg.responsePacketJunkSize = tempASecCfg.responsePacketJunkSize
   690  	}
   691  
   692  	if tempASecCfg.responsePacketJunkSize != 0 {
   693  		isASecOn = true
   694  	}
   695  
   696  	if tempASecCfg.initPacketMagicHeader > 4 {
   697  		isASecOn = true
   698  		device.log.Verbosef("UAPI: Updating init_packet_magic_header")
   699  		device.aSecCfg.initPacketMagicHeader = tempASecCfg.initPacketMagicHeader
   700  		MessageInitiationType = device.aSecCfg.initPacketMagicHeader
   701  	} else {
   702  		device.log.Verbosef("UAPI: Using default init type")
   703  		MessageInitiationType = 1
   704  	}
   705  
   706  	if tempASecCfg.responsePacketMagicHeader > 4 {
   707  		isASecOn = true
   708  		device.log.Verbosef("UAPI: Updating response_packet_magic_header")
   709  		device.aSecCfg.responsePacketMagicHeader = tempASecCfg.responsePacketMagicHeader
   710  		MessageResponseType = device.aSecCfg.responsePacketMagicHeader
   711  	} else {
   712  		device.log.Verbosef("UAPI: Using default response type")
   713  		MessageResponseType = 2
   714  	}
   715  
   716  	if tempASecCfg.underloadPacketMagicHeader > 4 {
   717  		isASecOn = true
   718  		device.log.Verbosef("UAPI: Updating underload_packet_magic_header")
   719  		device.aSecCfg.underloadPacketMagicHeader = tempASecCfg.underloadPacketMagicHeader
   720  		MessageCookieReplyType = device.aSecCfg.underloadPacketMagicHeader
   721  	} else {
   722  		device.log.Verbosef("UAPI: Using default underload type")
   723  		MessageCookieReplyType = 3
   724  	}
   725  
   726  	if tempASecCfg.transportPacketMagicHeader > 4 {
   727  		isASecOn = true
   728  		device.log.Verbosef("UAPI: Updating transport_packet_magic_header")
   729  		device.aSecCfg.transportPacketMagicHeader = tempASecCfg.transportPacketMagicHeader
   730  		MessageTransportType = device.aSecCfg.transportPacketMagicHeader
   731  	} else {
   732  		device.log.Verbosef("UAPI: Using default transport type")
   733  		MessageTransportType = 4
   734  	}
   735  
   736  	isSameMap := map[uint32]bool{}
   737  	isSameMap[MessageInitiationType] = true
   738  	isSameMap[MessageResponseType] = true
   739  	isSameMap[MessageCookieReplyType] = true
   740  	isSameMap[MessageTransportType] = true
   741  
   742  	// size will be different if same values
   743  	if len(isSameMap) != 4 {
   744  		if err != nil {
   745  			err = ipcErrorf(
   746  				ipc.IpcErrorInvalid,
   747  				`magic headers should differ; got: init:%d; recv:%d; unde:%d; tran:%d; %w`,
   748  				MessageInitiationType,
   749  				MessageResponseType,
   750  				MessageCookieReplyType,
   751  				MessageTransportType,
   752  				err,
   753  			)
   754  		} else {
   755  			err = ipcErrorf(
   756  				ipc.IpcErrorInvalid,
   757  				`magic headers should differ; got: init:%d; recv:%d; unde:%d; tran:%d`,
   758  				MessageInitiationType,
   759  				MessageResponseType,
   760  				MessageCookieReplyType,
   761  				MessageTransportType,
   762  			)
   763  		}
   764  	}
   765  
   766  	newInitSize := MessageInitiationSize + device.aSecCfg.initPacketJunkSize
   767  	newResponseSize := MessageResponseSize + device.aSecCfg.responsePacketJunkSize
   768  
   769  	if newInitSize == newResponseSize {
   770  		if err != nil {
   771  			err = ipcErrorf(
   772  				ipc.IpcErrorInvalid,
   773  				`new init size:%d; and new response size:%d; should differ; %w`,
   774  				newInitSize,
   775  				newResponseSize,
   776  				err,
   777  			)
   778  		} else {
   779  			err = ipcErrorf(
   780  				ipc.IpcErrorInvalid,
   781  				`new init size:%d; and new response size:%d; should differ`,
   782  				newInitSize,
   783  				newResponseSize,
   784  			)
   785  		}
   786  	} else {
   787  		packetSizeToMsgType = map[int]uint32{
   788  			newInitSize:            MessageInitiationType,
   789  			newResponseSize:        MessageResponseType,
   790  			MessageCookieReplySize: MessageCookieReplyType,
   791  			MessageTransportSize:   MessageTransportType,
   792  		}
   793  
   794  		msgTypeToJunkSize = map[uint32]int{
   795  			MessageInitiationType:  device.aSecCfg.initPacketJunkSize,
   796  			MessageResponseType:    device.aSecCfg.responsePacketJunkSize,
   797  			MessageCookieReplyType: 0,
   798  			MessageTransportType:   0,
   799  		}
   800  	}
   801  
   802  	device.isASecOn.SetTo(isASecOn)
   803  	device.aSecMux.Unlock()
   804  
   805  	return err
   806  }