github.com/liloew/wireguard-go@v0.0.0-20220224014633-9cd745e6f114/device/send.go (about) 1 /* SPDX-License-Identifier: MIT 2 * 3 * Copyright (C) 2017-2021 WireGuard LLC. All Rights Reserved. 4 */ 5 6 package device 7 8 import ( 9 "bytes" 10 "encoding/binary" 11 "errors" 12 "net" 13 "os" 14 "sync" 15 "sync/atomic" 16 "time" 17 18 "golang.org/x/crypto/chacha20poly1305" 19 "golang.org/x/net/ipv4" 20 "golang.org/x/net/ipv6" 21 ) 22 23 /* Outbound flow 24 * 25 * 1. TUN queue 26 * 2. Routing (sequential) 27 * 3. Nonce assignment (sequential) 28 * 4. Encryption (parallel) 29 * 5. Transmission (sequential) 30 * 31 * The functions in this file occur (roughly) in the order in 32 * which the packets are processed. 33 * 34 * Locking, Producers and Consumers 35 * 36 * The order of packets (per peer) must be maintained, 37 * but encryption of packets happen out-of-order: 38 * 39 * The sequential consumers will attempt to take the lock, 40 * workers release lock when they have completed work (encryption) on the packet. 41 * 42 * If the element is inserted into the "encryption queue", 43 * the content is preceded by enough "junk" to contain the transport header 44 * (to allow the construction of transport messages in-place) 45 */ 46 47 type QueueOutboundElement struct { 48 sync.Mutex 49 buffer *[MaxMessageSize]byte // slice holding the packet data 50 packet []byte // slice of "buffer" (always!) 51 nonce uint64 // nonce for encryption 52 keypair *Keypair // keypair for encryption 53 peer *Peer // related peer 54 } 55 56 func (device *Device) NewOutboundElement() *QueueOutboundElement { 57 elem := device.GetOutboundElement() 58 elem.buffer = device.GetMessageBuffer() 59 elem.Mutex = sync.Mutex{} 60 elem.nonce = 0 61 // keypair and peer were cleared (if necessary) by clearPointers. 62 return elem 63 } 64 65 // clearPointers clears elem fields that contain pointers. 66 // This makes the garbage collector's life easier and 67 // avoids accidentally keeping other objects around unnecessarily. 68 // It also reduces the possible collateral damage from use-after-free bugs. 69 func (elem *QueueOutboundElement) clearPointers() { 70 elem.buffer = nil 71 elem.packet = nil 72 elem.keypair = nil 73 elem.peer = nil 74 } 75 76 /* Queues a keepalive if no packets are queued for peer 77 */ 78 func (peer *Peer) SendKeepalive() { 79 if len(peer.queue.staged) == 0 && peer.isRunning.Get() { 80 elem := peer.device.NewOutboundElement() 81 select { 82 case peer.queue.staged <- elem: 83 peer.device.log.Verbosef("%v - Sending keepalive packet", peer) 84 default: 85 peer.device.PutMessageBuffer(elem.buffer) 86 peer.device.PutOutboundElement(elem) 87 } 88 } 89 peer.SendStagedPackets() 90 } 91 92 func (peer *Peer) SendHandshakeInitiation(isRetry bool) error { 93 if !isRetry { 94 atomic.StoreUint32(&peer.timers.handshakeAttempts, 0) 95 } 96 97 peer.handshake.mutex.RLock() 98 if time.Since(peer.handshake.lastSentHandshake) < RekeyTimeout { 99 peer.handshake.mutex.RUnlock() 100 return nil 101 } 102 peer.handshake.mutex.RUnlock() 103 104 peer.handshake.mutex.Lock() 105 if time.Since(peer.handshake.lastSentHandshake) < RekeyTimeout { 106 peer.handshake.mutex.Unlock() 107 return nil 108 } 109 peer.handshake.lastSentHandshake = time.Now() 110 peer.handshake.mutex.Unlock() 111 112 peer.device.log.Verbosef("%v - Sending handshake initiation", peer) 113 114 msg, err := peer.device.CreateMessageInitiation(peer) 115 if err != nil { 116 peer.device.log.Errorf("%v - Failed to create initiation message: %v", peer, err) 117 return err 118 } 119 120 var buff [MessageInitiationSize]byte 121 writer := bytes.NewBuffer(buff[:0]) 122 binary.Write(writer, binary.LittleEndian, msg) 123 packet := writer.Bytes() 124 peer.cookieGenerator.AddMacs(packet) 125 126 peer.timersAnyAuthenticatedPacketTraversal() 127 peer.timersAnyAuthenticatedPacketSent() 128 129 err = peer.SendBuffer(packet) 130 if err != nil { 131 peer.device.log.Errorf("%v - Failed to send handshake initiation: %v", peer, err) 132 } 133 peer.timersHandshakeInitiated() 134 135 return err 136 } 137 138 func (peer *Peer) SendHandshakeResponse() error { 139 peer.handshake.mutex.Lock() 140 peer.handshake.lastSentHandshake = time.Now() 141 peer.handshake.mutex.Unlock() 142 143 peer.device.log.Verbosef("%v - Sending handshake response", peer) 144 145 response, err := peer.device.CreateMessageResponse(peer) 146 if err != nil { 147 peer.device.log.Errorf("%v - Failed to create response message: %v", peer, err) 148 return err 149 } 150 151 var buff [MessageResponseSize]byte 152 writer := bytes.NewBuffer(buff[:0]) 153 binary.Write(writer, binary.LittleEndian, response) 154 packet := writer.Bytes() 155 peer.cookieGenerator.AddMacs(packet) 156 157 err = peer.BeginSymmetricSession() 158 if err != nil { 159 peer.device.log.Errorf("%v - Failed to derive keypair: %v", peer, err) 160 return err 161 } 162 163 peer.timersSessionDerived() 164 peer.timersAnyAuthenticatedPacketTraversal() 165 peer.timersAnyAuthenticatedPacketSent() 166 167 err = peer.SendBuffer(packet) 168 if err != nil { 169 peer.device.log.Errorf("%v - Failed to send handshake response: %v", peer, err) 170 } 171 return err 172 } 173 174 func (device *Device) SendHandshakeCookie(initiatingElem *QueueHandshakeElement) error { 175 device.log.Verbosef("Sending cookie response for denied handshake message for %v", initiatingElem.endpoint.DstToString()) 176 177 sender := binary.LittleEndian.Uint32(initiatingElem.packet[4:8]) 178 reply, err := device.cookieChecker.CreateReply(initiatingElem.packet, sender, initiatingElem.endpoint.DstToBytes()) 179 if err != nil { 180 device.log.Errorf("Failed to create cookie reply: %v", err) 181 return err 182 } 183 184 var buff [MessageCookieReplySize]byte 185 writer := bytes.NewBuffer(buff[:0]) 186 binary.Write(writer, binary.LittleEndian, reply) 187 device.net.bind.Send(writer.Bytes(), initiatingElem.endpoint) 188 return nil 189 } 190 191 func (peer *Peer) keepKeyFreshSending() { 192 keypair := peer.keypairs.Current() 193 if keypair == nil { 194 return 195 } 196 nonce := atomic.LoadUint64(&keypair.sendNonce) 197 if nonce > RekeyAfterMessages || (keypair.isInitiator && time.Since(keypair.created) > RekeyAfterTime) { 198 peer.SendHandshakeInitiation(false) 199 } 200 } 201 202 /* Reads packets from the TUN and inserts 203 * into staged queue for peer 204 * 205 * Obs. Single instance per TUN device 206 */ 207 func (device *Device) RoutineReadFromTUN() { 208 defer func() { 209 device.log.Verbosef("Routine: TUN reader - stopped") 210 device.state.stopping.Done() 211 device.queue.encryption.wg.Done() 212 }() 213 214 device.log.Verbosef("Routine: TUN reader - started") 215 216 var elem *QueueOutboundElement 217 218 for { 219 if elem != nil { 220 device.PutMessageBuffer(elem.buffer) 221 device.PutOutboundElement(elem) 222 } 223 elem = device.NewOutboundElement() 224 225 // read packet 226 227 offset := MessageTransportHeaderSize 228 size, err := device.tun.device.Read(elem.buffer[:], offset) 229 if err != nil { 230 if !device.isClosed() { 231 if !errors.Is(err, os.ErrClosed) { 232 device.log.Errorf("Failed to read packet from TUN device: %v", err) 233 } 234 go device.Close() 235 } 236 device.PutMessageBuffer(elem.buffer) 237 device.PutOutboundElement(elem) 238 return 239 } 240 241 if size == 0 || size > MaxContentSize { 242 continue 243 } 244 245 elem.packet = elem.buffer[offset : offset+size] 246 247 // lookup peer 248 249 var peer *Peer 250 switch elem.packet[0] >> 4 { 251 case ipv4.Version: 252 if len(elem.packet) < ipv4.HeaderLen { 253 continue 254 } 255 dst := elem.packet[IPv4offsetDst : IPv4offsetDst+net.IPv4len] 256 peer = device.allowedips.Lookup(dst) 257 258 case ipv6.Version: 259 if len(elem.packet) < ipv6.HeaderLen { 260 continue 261 } 262 dst := elem.packet[IPv6offsetDst : IPv6offsetDst+net.IPv6len] 263 peer = device.allowedips.Lookup(dst) 264 265 default: 266 device.log.Verbosef("Received packet with unknown IP version") 267 } 268 269 if peer == nil { 270 continue 271 } 272 if peer.isRunning.Get() { 273 peer.StagePacket(elem) 274 elem = nil 275 peer.SendStagedPackets() 276 } 277 } 278 } 279 280 func (peer *Peer) StagePacket(elem *QueueOutboundElement) { 281 for { 282 select { 283 case peer.queue.staged <- elem: 284 return 285 default: 286 } 287 select { 288 case tooOld := <-peer.queue.staged: 289 peer.device.PutMessageBuffer(tooOld.buffer) 290 peer.device.PutOutboundElement(tooOld) 291 default: 292 } 293 } 294 } 295 296 func (peer *Peer) SendStagedPackets() { 297 top: 298 if len(peer.queue.staged) == 0 || !peer.device.isUp() { 299 return 300 } 301 302 keypair := peer.keypairs.Current() 303 if keypair == nil || atomic.LoadUint64(&keypair.sendNonce) >= RejectAfterMessages || time.Since(keypair.created) >= RejectAfterTime { 304 peer.SendHandshakeInitiation(false) 305 return 306 } 307 308 for { 309 select { 310 case elem := <-peer.queue.staged: 311 elem.peer = peer 312 elem.nonce = atomic.AddUint64(&keypair.sendNonce, 1) - 1 313 if elem.nonce >= RejectAfterMessages { 314 atomic.StoreUint64(&keypair.sendNonce, RejectAfterMessages) 315 peer.StagePacket(elem) // XXX: Out of order, but we can't front-load go chans 316 goto top 317 } 318 319 elem.keypair = keypair 320 elem.Lock() 321 322 // add to parallel and sequential queue 323 if peer.isRunning.Get() { 324 peer.queue.outbound.c <- elem 325 peer.device.queue.encryption.c <- elem 326 } else { 327 peer.device.PutMessageBuffer(elem.buffer) 328 peer.device.PutOutboundElement(elem) 329 } 330 default: 331 return 332 } 333 } 334 } 335 336 func (peer *Peer) FlushStagedPackets() { 337 for { 338 select { 339 case elem := <-peer.queue.staged: 340 peer.device.PutMessageBuffer(elem.buffer) 341 peer.device.PutOutboundElement(elem) 342 default: 343 return 344 } 345 } 346 } 347 348 func calculatePaddingSize(packetSize, mtu int) int { 349 lastUnit := packetSize 350 if mtu == 0 { 351 return ((lastUnit + PaddingMultiple - 1) & ^(PaddingMultiple - 1)) - lastUnit 352 } 353 if lastUnit > mtu { 354 lastUnit %= mtu 355 } 356 paddedSize := ((lastUnit + PaddingMultiple - 1) & ^(PaddingMultiple - 1)) 357 if paddedSize > mtu { 358 paddedSize = mtu 359 } 360 return paddedSize - lastUnit 361 } 362 363 /* Encrypts the elements in the queue 364 * and marks them for sequential consumption (by releasing the mutex) 365 * 366 * Obs. One instance per core 367 */ 368 func (device *Device) RoutineEncryption(id int) { 369 var paddingZeros [PaddingMultiple]byte 370 var nonce [chacha20poly1305.NonceSize]byte 371 372 defer device.log.Verbosef("Routine: encryption worker %d - stopped", id) 373 device.log.Verbosef("Routine: encryption worker %d - started", id) 374 375 for elem := range device.queue.encryption.c { 376 // populate header fields 377 header := elem.buffer[:MessageTransportHeaderSize] 378 379 fieldType := header[0:4] 380 fieldReceiver := header[4:8] 381 fieldNonce := header[8:16] 382 383 binary.LittleEndian.PutUint32(fieldType, MessageTransportType) 384 binary.LittleEndian.PutUint32(fieldReceiver, elem.keypair.remoteIndex) 385 binary.LittleEndian.PutUint64(fieldNonce, elem.nonce) 386 387 // pad content to multiple of 16 388 paddingSize := calculatePaddingSize(len(elem.packet), int(atomic.LoadInt32(&device.tun.mtu))) 389 elem.packet = append(elem.packet, paddingZeros[:paddingSize]...) 390 391 // encrypt content and release to consumer 392 393 binary.LittleEndian.PutUint64(nonce[4:], elem.nonce) 394 elem.packet = elem.keypair.send.Seal( 395 header, 396 nonce[:], 397 elem.packet, 398 nil, 399 ) 400 elem.Unlock() 401 } 402 } 403 404 /* Sequentially reads packets from queue and sends to endpoint 405 * 406 * Obs. Single instance per peer. 407 * The routine terminates then the outbound queue is closed. 408 */ 409 func (peer *Peer) RoutineSequentialSender() { 410 device := peer.device 411 defer func() { 412 defer device.log.Verbosef("%v - Routine: sequential sender - stopped", peer) 413 peer.stopping.Done() 414 }() 415 device.log.Verbosef("%v - Routine: sequential sender - started", peer) 416 417 for elem := range peer.queue.outbound.c { 418 if elem == nil { 419 return 420 } 421 elem.Lock() 422 if !peer.isRunning.Get() { 423 // peer has been stopped; return re-usable elems to the shared pool. 424 // This is an optimization only. It is possible for the peer to be stopped 425 // immediately after this check, in which case, elem will get processed. 426 // The timers and SendBuffer code are resilient to a few stragglers. 427 // TODO: rework peer shutdown order to ensure 428 // that we never accidentally keep timers alive longer than necessary. 429 device.PutMessageBuffer(elem.buffer) 430 device.PutOutboundElement(elem) 431 continue 432 } 433 434 peer.timersAnyAuthenticatedPacketTraversal() 435 peer.timersAnyAuthenticatedPacketSent() 436 437 // send message and return buffer to pool 438 439 err := peer.SendBuffer(elem.packet) 440 if len(elem.packet) != MessageKeepaliveSize { 441 peer.timersDataSent() 442 } 443 device.PutMessageBuffer(elem.buffer) 444 device.PutOutboundElement(elem) 445 if err != nil { 446 device.log.Errorf("%v - Failed to send data packet: %v", peer, err) 447 continue 448 } 449 450 peer.keepKeyFreshSending() 451 } 452 }