github.com/cloudwego/hertz@v0.9.3/pkg/network/standard/connection.go (about) 1 /* 2 * Copyright 2022 CloudWeGo Authors 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package standard 18 19 import ( 20 "crypto/tls" 21 "errors" 22 "io" 23 "net" 24 "runtime" 25 "strconv" 26 "syscall" 27 "time" 28 29 errs "github.com/cloudwego/hertz/pkg/common/errors" 30 "github.com/cloudwego/hertz/pkg/common/hlog" 31 "github.com/cloudwego/hertz/pkg/network" 32 ) 33 34 const ( 35 block1k = 1024 36 block4k = 4096 37 block8k = 8192 38 mallocMax = block1k * 512 39 defaultMallocSize = block4k 40 maxConsecutiveEmptyReads = 100 41 ) 42 43 type Conn struct { 44 c net.Conn 45 inputBuffer *linkBuffer 46 outputBuffer *linkBuffer 47 caches [][]byte // buf allocated by Next when cross-package, which should be freed when release 48 maxSize int // history max malloc size 49 50 err error 51 } 52 53 func (c *Conn) ToHertzError(err error) error { 54 if errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ENOTCONN) { 55 return errs.ErrConnectionClosed 56 } 57 if netErr, ok := err.(*net.OpError); ok && netErr.Timeout() { 58 return errs.ErrTimeout 59 } 60 61 return err 62 } 63 64 func (c *Conn) SetWriteTimeout(t time.Duration) error { 65 if t <= 0 { 66 return c.c.SetWriteDeadline(time.Time{}) 67 } 68 return c.c.SetWriteDeadline(time.Now().Add(t)) 69 } 70 71 func (c *Conn) SetReadTimeout(t time.Duration) error { 72 if t <= 0 { 73 return c.c.SetReadDeadline(time.Time{}) 74 } 75 return c.c.SetReadDeadline(time.Now().Add(t)) 76 } 77 78 type TLSConn struct { 79 Conn 80 } 81 82 func min(a, b int) int { 83 if a < b { 84 return a 85 } 86 return b 87 } 88 89 func (c *Conn) Read(b []byte) (l int, err error) { 90 l = c.Len() 91 // If there is some data in inputBuffer, copy it to b and return. 92 if l > 0 { 93 l = min(l, len(b)) 94 return l, c.next(l, b) 95 } 96 97 // If left buffer size is less than block4k, first Peek(1) to fill the buffer. 98 // Then copy min(c.Len, len(b)) to b. 99 if len(b) <= block4k { 100 // If c.fill(1) return err, conn.Read must return 0, err. So there is no need 101 // to check c.Len 102 err = c.fill(1) 103 if err != nil { 104 return 0, err 105 } 106 l = min(c.Len(), len(b)) 107 return l, c.next(l, b) 108 } 109 110 // Call Read() directly to fill buffer b 111 return c.c.Read(b) 112 } 113 114 // Write calls Write syscall directly to send data. 115 // Will flush buffer immediately, for performance considerations use WriteBinary instead. 116 func (c *Conn) Write(b []byte) (n int, err error) { 117 if err = c.Flush(); err != nil { 118 return 119 } 120 return c.c.Write(b) 121 } 122 123 // ReadFrom implements io.ReaderFrom. If the underlying writer 124 // supports the ReadFrom method, and c has no buffered data yet, 125 // this calls the underlying ReadFrom without buffering. 126 func (c *Conn) ReadFrom(r io.Reader) (n int64, err error) { 127 if err = c.Flush(); err != nil { 128 return 129 } 130 131 if w, ok := c.c.(io.ReaderFrom); ok { 132 n, err = w.ReadFrom(r) 133 return 134 } 135 136 var m int 137 bufNode := c.outputBuffer.write 138 139 // if there is no available buffer, create one. 140 if !bufNode.recyclable() || cap(bufNode.buf) == 0 { 141 c.Malloc(block4k) 142 c.outputBuffer.write.Reset() 143 c.outputBuffer.len = cap(c.outputBuffer.write.buf) 144 bufNode = c.outputBuffer.write 145 } 146 147 for { 148 if bufNode.Cap() == 0 { 149 if err1 := c.Flush(); err1 != nil { 150 return n, err1 151 } 152 } 153 154 nr := 0 155 for nr < maxConsecutiveEmptyReads { 156 m, err = r.Read(bufNode.buf[bufNode.malloc:cap(bufNode.buf)]) 157 if m != 0 || err != nil { 158 break 159 } 160 nr++ 161 } 162 if nr == maxConsecutiveEmptyReads { 163 return n, io.ErrNoProgress 164 } 165 bufNode.malloc += m 166 n += int64(m) 167 if err != nil { 168 break 169 } 170 } 171 if err == io.EOF { 172 // If we filled the buffer exactly, flush preemptively. 173 if bufNode.Cap() == 0 { 174 err = c.Flush() 175 } else { 176 err = nil 177 // Update buffer available length for next Malloc 178 c.outputBuffer.len = bufNode.Cap() 179 } 180 } 181 return 182 } 183 184 // Close closes the connection 185 func (c *Conn) Close() error { 186 return c.c.Close() 187 } 188 189 // CloseNoResetBuffer closes the connection without reset buffer. 190 func (c *Conn) CloseNoResetBuffer() error { 191 return c.c.Close() 192 } 193 194 // LocalAddr returns the local address of the connection. 195 func (c *Conn) LocalAddr() net.Addr { 196 return c.c.LocalAddr() 197 } 198 199 // RemoteAddr returns the remote address of the connection. 200 func (c *Conn) RemoteAddr() net.Addr { 201 return c.c.RemoteAddr() 202 } 203 204 // SetDeadline sets the connection deadline. 205 func (c *Conn) SetDeadline(t time.Time) error { 206 return c.c.SetDeadline(t) 207 } 208 209 // SetReadDeadline sets the read deadline of the connection. 210 func (c *Conn) SetReadDeadline(t time.Time) error { 211 return c.c.SetReadDeadline(t) 212 } 213 214 // SetWriteDeadline sets the write deadline of the connection. 215 func (c *Conn) SetWriteDeadline(t time.Time) error { 216 return c.c.SetWriteDeadline(t) 217 } 218 219 func (c *Conn) releaseCaches() { 220 for i := range c.caches { 221 free(c.caches[i]) 222 c.caches[i] = nil 223 } 224 c.caches = c.caches[:0] 225 } 226 227 // Release release linkBuffer. 228 // 229 // NOTE: This function should only be called in inputBuffer. 230 func (c *Conn) Release() error { 231 // c.Len() is used to check whether the data has been fully read. If there 232 // is some data in inputBuffer, we mustn't use head and write to check 233 // whether current node can be released. We should use head and read as the 234 // judge connection. 235 if c.Len() == 0 { 236 // Reset buffer so that we can reuse it 237 // In this case, the request can be held in one single node. We just need 238 // Reset this node to hold next request. 239 // 240 // NOTE: Each connection will bind a buffer. We need to care about the memory usage. 241 if c.inputBuffer.head == c.inputBuffer.write { 242 c.inputBuffer.write.Reset() 243 return nil 244 } 245 246 // Critical condition that the buffer is big enough to hold the whole request 247 // In this case, head holds the last request and current request has been held 248 // in write node. So we just need to release head and reset write. 249 if c.inputBuffer.head.next == c.inputBuffer.write { 250 // Recalculate the maxSize 251 size := c.inputBuffer.head.malloc 252 node := c.inputBuffer.head 253 node.Release() 254 size += c.inputBuffer.write.malloc 255 if size > mallocMax { 256 size = mallocMax 257 } 258 if size > c.maxSize { 259 c.maxSize = size 260 } 261 c.handleTail() 262 c.inputBuffer.head, c.inputBuffer.read = c.inputBuffer.write, c.inputBuffer.write 263 c.releaseCaches() 264 return nil 265 } 266 } 267 268 // If there is some data in the buffer, it means the request hasn't been fully handled. 269 // Or the request is too big to hold in a single node. 270 // Cross multi node. 271 size := 0 272 for c.inputBuffer.head != c.inputBuffer.read { 273 node := c.inputBuffer.head 274 c.inputBuffer.head = c.inputBuffer.head.next 275 size += c.inputBuffer.head.malloc 276 node.Release() 277 } 278 // The readOnly field in readOnly is just used to malloc a new node so that next 279 // request can be held in one node. 280 // It has nothing to do with release logic. 281 c.inputBuffer.write.readOnly = true 282 if size > mallocMax { 283 size = mallocMax 284 } 285 if size > c.maxSize { 286 c.maxSize = size 287 } 288 c.releaseCaches() 289 return nil 290 } 291 292 // handleTail prevents too large tail node to ensure the memory usage. 293 func (c *Conn) handleTail() { 294 if cap(c.inputBuffer.write.buf) > mallocMax { 295 node := c.inputBuffer.write 296 c.inputBuffer.write.next = newBufferNode(c.maxSize) 297 c.inputBuffer.write = c.inputBuffer.write.next 298 node.Release() 299 return 300 } 301 c.inputBuffer.write.Reset() 302 } 303 304 // Peek returns the next n bytes without advancing the reader. The bytes stop 305 // being valid at the next read call. If Peek returns fewer than n bytes, it 306 // also returns an error explaining why the read is short. 307 func (c *Conn) Peek(i int) (p []byte, err error) { 308 node := c.inputBuffer.read 309 // fill the inputBuffer so that there is enough data 310 err = c.fill(i) 311 if err != nil { 312 return 313 } 314 315 if c.Len() < i { 316 i = c.Len() 317 err = c.readErr() 318 } 319 320 l := node.Len() 321 // Enough data in a single node, so that just return the slice of the node. 322 if l >= i { 323 return node.buf[node.off : node.off+i], err 324 } 325 326 // not enough data in a signal node 327 if block1k < i && i <= mallocMax { 328 p = malloc(i, i) 329 c.caches = append(c.caches, p) 330 } else { 331 p = make([]byte, i) 332 } 333 c.peekBuffer(i, p) 334 return p, err 335 } 336 337 // peekBuffer loads the buf with data of size i without moving read pointer. 338 func (c *Conn) peekBuffer(i int, buf []byte) { 339 l, pIdx, node := 0, 0, c.inputBuffer.read 340 for ack := i; ack > 0; ack = ack - l { 341 l = node.Len() 342 if l >= ack { 343 copy(buf[pIdx:], node.buf[node.off:node.off+ack]) 344 break 345 } else if l > 0 { 346 pIdx += copy(buf[pIdx:], node.buf[node.off:node.off+l]) 347 } 348 node = node.next 349 } 350 } 351 352 // next loads the buf with data of size i with moving read pointer. 353 func (c *Conn) next(length int, b []byte) error { 354 c.peekBuffer(length, b) 355 err := c.Skip(length) 356 if err != nil { 357 return err 358 } 359 return c.Release() 360 } 361 362 // fill loads more data than size i, otherwise it will block read. 363 // NOTE: fill may fill data less than i and store err in Conn.err 364 // when last read returns n > 0 and err != nil. So after calling 365 // fill, it is necessary to check whether c.Len() > i 366 func (c *Conn) fill(i int) (err error) { 367 // Check if there is enough data in inputBuffer. 368 if c.Len() >= i { 369 return nil 370 } 371 // check whether conn has returned err before. 372 if err = c.readErr(); err != nil { 373 if c.Len() > 0 { 374 c.err = err 375 return nil 376 } 377 return 378 } 379 node := c.inputBuffer.write 380 node.buf = node.buf[:cap(node.buf)] 381 left := cap(node.buf) - node.malloc 382 383 // If left capacity is less than the length of expected data 384 // or it is a new request, we malloc an enough node to hold 385 // the data 386 if left < i-c.Len() || node.readOnly { 387 // not enough capacity 388 malloc := i 389 if i < c.maxSize { 390 malloc = c.maxSize 391 } 392 c.inputBuffer.write.next = newBufferNode(malloc) 393 c.inputBuffer.write = c.inputBuffer.write.next 394 // Set readOnly flag to false so that current node can be recycled. 395 // In inputBuffer, whether readOnly value is, the node need to be recycled. 396 node.readOnly = false 397 } 398 399 i -= c.Len() 400 node = c.inputBuffer.write 401 node.buf = node.buf[:cap(node.buf)] 402 403 // Circulate reading data so that the node holds enough data 404 for i > 0 { 405 n, err := c.c.Read(c.inputBuffer.write.buf[node.malloc:]) 406 if n > 0 { 407 node.malloc += n 408 c.inputBuffer.len += n 409 i -= n 410 if err != nil { 411 c.err = err 412 return nil 413 } 414 } 415 if err != nil { 416 return err 417 } 418 } 419 return nil 420 } 421 422 // Skip discards the next n bytes. 423 func (c *Conn) Skip(n int) error { 424 // check whether enough or not. 425 if c.Len() < n { 426 return errs.NewPrivate("link buffer skip[" + strconv.Itoa(n) + "] not enough") 427 } 428 c.inputBuffer.len -= n // re-cal length 429 430 var l int 431 for ack := n; ack > 0; ack = ack - l { 432 l = c.inputBuffer.read.Len() 433 if l >= ack { 434 c.inputBuffer.read.off += ack 435 break 436 } 437 c.inputBuffer.read = c.inputBuffer.read.next 438 } 439 return nil 440 } 441 442 // ReadByte is used to read one byte with advancing the read pointer. 443 func (c *Conn) ReadByte() (p byte, err error) { 444 b, err := c.Peek(1) 445 if err != nil { 446 return ' ', err 447 } 448 err = c.Skip(1) 449 if err != nil { 450 return ' ', err 451 } 452 return b[0], nil 453 } 454 455 // ReadBinary is used to read next n byte with copy, and the read pointer will be advanced. 456 func (c *Conn) ReadBinary(i int) ([]byte, error) { 457 out := make([]byte, i) 458 b, err := c.Peek(i) 459 if err != nil { 460 return nil, err 461 } 462 copy(out, b) 463 err = c.Skip(i) 464 return out, err 465 } 466 467 // Len returns the total length of the readable data in the reader. 468 func (c *Conn) Len() int { 469 return c.inputBuffer.len 470 } 471 472 // Malloc will provide a n bytes buffer to send data. 473 func (c *Conn) Malloc(n int) (buf []byte, err error) { 474 if n == 0 { 475 return 476 } 477 478 // If the capacity of the current buffer is larger than we need, 479 // there is no need to malloc new node 480 if c.outputBuffer.len > n { 481 node := c.outputBuffer.write 482 malloc := node.malloc 483 node.malloc += n 484 node.buf = node.buf[:node.malloc] 485 c.outputBuffer.len -= n 486 return node.buf[malloc:node.malloc], nil 487 } 488 489 mallocSize := n 490 if n < defaultMallocSize { 491 mallocSize = defaultMallocSize 492 } 493 node := newBufferNode(mallocSize) 494 node.malloc = n 495 c.outputBuffer.len = cap(node.buf) - n 496 c.outputBuffer.write.next = node 497 c.outputBuffer.write = c.outputBuffer.write.next 498 return node.buf[:n], nil 499 } 500 501 // WriteBinary will use the user buffer to flush. 502 // 503 // NOTE: Before flush successfully, the buffer b should be valid. 504 func (c *Conn) WriteBinary(b []byte) (n int, err error) { 505 // If the data size is less than 4k, then just copy to outputBuffer. 506 if len(b) < block4k { 507 buf, err := c.Malloc(len(b)) 508 if err != nil { 509 return 0, err 510 } 511 return copy(buf, b), nil 512 } 513 // Build a new node with buffer b. 514 node := newBufferNode(0) 515 node.malloc = len(b) 516 node.readOnly = true 517 node.buf = b 518 c.outputBuffer.write.next = node 519 c.outputBuffer.write = c.outputBuffer.write.next 520 c.outputBuffer.len = 0 521 return len(b), nil 522 } 523 524 // Flush will send data to the peer end. 525 func (c *Conn) Flush() (err error) { 526 // No data to flush 527 if c.outputBuffer.head == c.outputBuffer.write && c.outputBuffer.head.Len() == 0 { 528 return 529 } 530 531 // Current node is the tail node of last request, so move to next node. 532 if c.outputBuffer.head.Len() == 0 { 533 node := c.outputBuffer.head 534 c.outputBuffer.head = c.outputBuffer.head.next 535 node.Release() 536 } 537 538 for { 539 n, err := c.c.Write(c.outputBuffer.head.buf[c.outputBuffer.head.off:c.outputBuffer.head.malloc]) 540 if err != nil { 541 return err 542 } 543 c.outputBuffer.head.off += n 544 if c.outputBuffer.head == c.outputBuffer.write { 545 // If the capacity of buffer is less than 8k, then just reset the node 546 if c.outputBuffer.head.recyclable() { 547 c.outputBuffer.head.Reset() 548 c.outputBuffer.len = cap(c.outputBuffer.head.buf) 549 } 550 break 551 } 552 // Flush next node 553 node := c.outputBuffer.head 554 c.outputBuffer.head = c.outputBuffer.head.next 555 node.Release() 556 } 557 return nil 558 } 559 560 func (c *Conn) HandleSpecificError(err error, rip string) (needIgnore bool) { 561 if errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) { 562 hlog.SystemLogger().Debugf("Go net library error=%s, remoteAddr=%s", err.Error(), rip) 563 return true 564 } 565 return false 566 } 567 568 func (c *Conn) readErr() error { 569 err := c.err 570 c.err = nil 571 return err 572 } 573 574 func (c *TLSConn) Handshake() error { 575 return c.c.(network.ConnTLSer).Handshake() 576 } 577 578 func (c *TLSConn) ConnectionState() tls.ConnectionState { 579 return c.c.(network.ConnTLSer).ConnectionState() 580 } 581 582 func newConn(c net.Conn, size int) network.Conn { 583 maxSize := defaultMallocSize 584 if size > maxSize { 585 maxSize = size 586 } 587 588 node := newBufferNode(maxSize) 589 inputBuffer := &linkBuffer{ 590 head: node, 591 read: node, 592 write: node, 593 } 594 runtime.SetFinalizer(inputBuffer, (*linkBuffer).release) 595 596 outputNode := newBufferNode(0) 597 outputBuffer := &linkBuffer{ 598 head: outputNode, 599 write: outputNode, 600 } 601 runtime.SetFinalizer(outputBuffer, (*linkBuffer).release) 602 603 return &Conn{ 604 c: c, 605 inputBuffer: inputBuffer, 606 outputBuffer: outputBuffer, 607 maxSize: maxSize, 608 } 609 } 610 611 func newTLSConn(c net.Conn, size int) network.Conn { 612 maxSize := defaultMallocSize 613 if size > maxSize { 614 maxSize = size 615 } 616 617 node := newBufferNode(maxSize) 618 inputBuffer := &linkBuffer{ 619 head: node, 620 read: node, 621 write: node, 622 } 623 runtime.SetFinalizer(inputBuffer, (*linkBuffer).release) 624 625 outputNode := newBufferNode(0) 626 outputBuffer := &linkBuffer{ 627 head: outputNode, 628 write: outputNode, 629 } 630 runtime.SetFinalizer(outputBuffer, (*linkBuffer).release) 631 632 return &TLSConn{ 633 Conn{ 634 c: c, 635 inputBuffer: inputBuffer, 636 outputBuffer: outputBuffer, 637 maxSize: maxSize, 638 }, 639 } 640 }