inet.af/netstack@v0.0.0-20220214151720-7585b01ddccf/tcpip/transport/tcp/segment_queue.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package tcp
    16  
    17  import (
    18  	"inet.af/netstack/sync"
    19  )
    20  
    21  // segmentQueue is a bounded, thread-safe queue of TCP segments.
    22  //
    23  // +stateify savable
    24  type segmentQueue struct {
    25  	mu     sync.Mutex  `state:"nosave"`
    26  	list   segmentList `state:"wait"`
    27  	ep     *endpoint
    28  	frozen bool
    29  }
    30  
    31  // emptyLocked determines if the queue is empty.
    32  // Preconditions: q.mu must be held.
    33  func (q *segmentQueue) emptyLocked() bool {
    34  	return q.list.Empty()
    35  }
    36  
    37  // empty determines if the queue is empty.
    38  func (q *segmentQueue) empty() bool {
    39  	q.mu.Lock()
    40  	r := q.emptyLocked()
    41  	q.mu.Unlock()
    42  
    43  	return r
    44  }
    45  
    46  // enqueue adds the given segment to the queue.
    47  //
    48  // Returns true when the segment is successfully added to the queue, in which
    49  // case ownership of the reference is transferred to the queue. And returns
    50  // false if the queue is full, in which case ownership is retained by the
    51  // caller.
    52  func (q *segmentQueue) enqueue(s *segment) bool {
    53  	// q.ep.receiveBufferParams() must be called without holding q.mu to
    54  	// avoid lock order inversion.
    55  	bufSz := q.ep.ops.GetReceiveBufferSize()
    56  	used := q.ep.receiveMemUsed()
    57  	q.mu.Lock()
    58  	// Allow zero sized segments (ACK/FIN/RSTs etc even if the segment queue
    59  	// is currently full).
    60  	allow := (used <= int(bufSz) || s.payloadSize() == 0) && !q.frozen
    61  
    62  	if allow {
    63  		q.list.PushBack(s)
    64  		// Set the owner now that the endpoint owns the segment.
    65  		s.setOwner(q.ep, recvQ)
    66  	}
    67  	q.mu.Unlock()
    68  
    69  	return allow
    70  }
    71  
    72  // dequeue removes and returns the next segment from queue, if one exists.
    73  // Ownership is transferred to the caller, who is responsible for decrementing
    74  // the ref count when done.
    75  func (q *segmentQueue) dequeue() *segment {
    76  	q.mu.Lock()
    77  	s := q.list.Front()
    78  	if s != nil {
    79  		q.list.Remove(s)
    80  	}
    81  	q.mu.Unlock()
    82  
    83  	return s
    84  }
    85  
    86  // freeze prevents any more segments from being added to the queue. i.e all
    87  // future segmentQueue.enqueue will return false and not add the segment to the
    88  // queue till the queue is unfroze with a corresponding segmentQueue.thaw call.
    89  func (q *segmentQueue) freeze() {
    90  	q.mu.Lock()
    91  	q.frozen = true
    92  	q.mu.Unlock()
    93  }
    94  
    95  // thaw unfreezes a previously frozen queue using segmentQueue.freeze() and
    96  // allows new segments to be queued again.
    97  func (q *segmentQueue) thaw() {
    98  	q.mu.Lock()
    99  	q.frozen = false
   100  	q.mu.Unlock()
   101  }