github.com/geph-official/geph2@v0.22.6-0.20210211030601-f527cb59b0df/libs/kcp-go/kcp.go (about)

     1  package kcp
     2  
     3  import (
     4  	"context"
     5  	"encoding/binary"
     6  	"log"
     7  	"math"
     8  	mrand "math/rand"
     9  	"os"
    10  	"sync"
    11  	"sync/atomic"
    12  	"time"
    13  
    14  	"github.com/patrickmn/go-cache"
    15  	"golang.org/x/time/rate"
    16  )
    17  
    18  const (
    19  	IKCP_RTO_NDL     = 30  // no delay min rto
    20  	IKCP_RTO_MIN     = 100 // normal min rto
    21  	IKCP_RTO_DEF     = 200
    22  	IKCP_RTO_MAX     = 60000
    23  	IKCP_CMD_PUSH    = 81 // cmd: push data
    24  	IKCP_CMD_ACK     = 82 // cmd: ack
    25  	IKCP_CMD_WASK    = 83 // cmd: window probe (ask)
    26  	IKCP_CMD_WINS    = 84 // cmd: window size (tell)
    27  	IKCP_ASK_SEND    = 1  // need to send IKCP_CMD_WASK
    28  	IKCP_ASK_TELL    = 2  // need to send IKCP_CMD_WINS
    29  	IKCP_WND_SND     = 32
    30  	IKCP_WND_RCV     = 32
    31  	IKCP_MTU_DEF     = 1400
    32  	IKCP_ACK_FAST    = 3
    33  	IKCP_INTERVAL    = 100
    34  	IKCP_OVERHEAD    = 24
    35  	IKCP_DEADLINK    = 20
    36  	IKCP_THRESH_INIT = 2
    37  	IKCP_THRESH_MIN  = 2
    38  	IKCP_PROBE_INIT  = 7000   // 7 secs to probe window size
    39  	IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
    40  )
    41  
    42  var QuiescentMax = 20
    43  
    44  var CongestionControl = "BIC"
    45  
    46  var doLogging = false
    47  
    48  func init() {
    49  	doLogging = os.Getenv("KCPLOG") != ""
    50  }
    51  
    52  // monotonic reference time point
    53  var refTime time.Time = time.Now()
    54  
    55  // currentMs returns current elasped monotonic milliseconds since program startup
    56  func currentMs() uint32 { return uint32(time.Now().Sub(refTime) / time.Millisecond) }
    57  
    58  // output_callback is a prototype which ought capture conn and call conn.Write
    59  type output_callback func(buf []byte, size int)
    60  
    61  /* encode 8 bits unsigned int */
    62  func ikcp_encode8u(p []byte, c byte) []byte {
    63  	p[0] = c
    64  	return p[1:]
    65  }
    66  
    67  /* decode 8 bits unsigned int */
    68  func ikcp_decode8u(p []byte, c *byte) []byte {
    69  	*c = p[0]
    70  	return p[1:]
    71  }
    72  
    73  /* encode 16 bits unsigned int (lsb) */
    74  func ikcp_encode16u(p []byte, w uint16) []byte {
    75  	binary.LittleEndian.PutUint16(p, w)
    76  	return p[2:]
    77  }
    78  
    79  /* decode 16 bits unsigned int (lsb) */
    80  func ikcp_decode16u(p []byte, w *uint16) []byte {
    81  	*w = binary.LittleEndian.Uint16(p)
    82  	return p[2:]
    83  }
    84  
    85  /* encode 32 bits unsigned int (lsb) */
    86  func ikcp_encode32u(p []byte, l uint32) []byte {
    87  	binary.LittleEndian.PutUint32(p, l)
    88  	return p[4:]
    89  }
    90  
    91  /* decode 32 bits unsigned int (lsb) */
    92  func ikcp_decode32u(p []byte, l *uint32) []byte {
    93  	*l = binary.LittleEndian.Uint32(p)
    94  	return p[4:]
    95  }
    96  
    97  func _imin_(a, b uint32) uint32 {
    98  	if a <= b {
    99  		return a
   100  	}
   101  	return b
   102  }
   103  
   104  func _imax_(a, b uint32) uint32 {
   105  	if a >= b {
   106  		return a
   107  	}
   108  	return b
   109  }
   110  
   111  func _ibound_(lower, middle, upper uint32) uint32 {
   112  	return _imin_(_imax_(lower, middle), upper)
   113  }
   114  
   115  func _itimediff(later, earlier uint32) int32 {
   116  	return (int32)(later - earlier)
   117  }
   118  
   119  // segment defines a KCP segment
   120  type segment struct {
   121  	conv        uint32
   122  	cmd         uint8
   123  	frg         uint8
   124  	wnd         uint16
   125  	ts          uint32
   126  	sn          uint32
   127  	una         uint32
   128  	rto         uint32
   129  	xmit        uint32
   130  	resendts    uint32
   131  	fastack     uint32
   132  	lastfastack uint32
   133  	acked       uint32 // mark if the seg has acked
   134  	data        []byte
   135  }
   136  
   137  // encode a segment into buffer
   138  func (seg *segment) encode(ptr []byte) []byte {
   139  	ptr = ikcp_encode32u(ptr, seg.conv)
   140  	ptr = ikcp_encode8u(ptr, seg.cmd)
   141  	ptr = ikcp_encode8u(ptr, seg.frg)
   142  	ptr = ikcp_encode16u(ptr, seg.wnd)
   143  	ptr = ikcp_encode32u(ptr, seg.ts)
   144  	ptr = ikcp_encode32u(ptr, seg.sn)
   145  	ptr = ikcp_encode32u(ptr, seg.una)
   146  	ptr = ikcp_encode32u(ptr, uint32(len(seg.data)))
   147  	atomic.AddUint64(&DefaultSnmp.OutSegs, 1)
   148  
   149  	return ptr
   150  }
   151  
   152  const maxSpeed = 1000 * 1000 * 1000
   153  
   154  type rateLimiter struct {
   155  	limiter *rate.Limiter
   156  	limit   float64
   157  	lock    sync.Mutex
   158  }
   159  
   160  func (rl *rateLimiter) fixLimiter(speed float64) {
   161  	if rl.limiter == nil {
   162  		rl.limiter = rate.NewLimiter(maxSpeed, maxSpeed/10)
   163  	}
   164  }
   165  
   166  func (rl *rateLimiter) Limit(speed float64, events int) {
   167  	rl.lock.Lock()
   168  	defer rl.lock.Unlock()
   169  	rl.fixLimiter(speed)
   170  	evts := int(float64(events) * (float64(maxSpeed) / speed))
   171  	rl.limiter.WaitN(context.Background(), evts)
   172  }
   173  
   174  func (rl *rateLimiter) Allow(speed float64, events int) bool {
   175  	rl.lock.Lock()
   176  	defer rl.lock.Unlock()
   177  	rl.fixLimiter(speed)
   178  	evts := int(float64(events) * (float64(maxSpeed) / speed))
   179  	return rl.limiter.AllowN(time.Now(), evts)
   180  }
   181  
   182  // KCP defines a single KCP connection
   183  type KCP struct {
   184  	conv, mtu, mss                   uint32
   185  	snd_una, snd_nxt, rcv_nxt        uint32
   186  	ssthresh                         uint32
   187  	rx_rttvar, rx_srtt               int32
   188  	rx_rto, rx_minrto                uint32
   189  	snd_wnd, rcv_wnd, rmt_wnd, probe uint32
   190  	cwnd                             float64
   191  	interval, ts_flush               uint32
   192  	nodelay, updated                 uint32
   193  	ts_probe, probe_wait             uint32
   194  
   195  	isDead bool
   196  
   197  	wmax     float64
   198  	lastLoss time.Time
   199  	retrans  uint64
   200  	trans    uint64
   201  
   202  	pacer rateLimiter
   203  
   204  	DRE struct {
   205  		delivered    float64
   206  		ppDelivered  map[uint32]float64
   207  		delTime      time.Time
   208  		ppDelTime    map[uint32]time.Time
   209  		ppAppLimited map[uint32]bool
   210  		avgAckRate   float64
   211  		maxAckRate   float64
   212  		maxAckTime   time.Time
   213  		minRtt       float64
   214  		minRttTime   time.Time
   215  
   216  		runDataAcked   float64
   217  		runElapsedTime float64
   218  
   219  		lastLossTime    time.Time
   220  		lastLossDel     float64
   221  		lastLossTrans   uint64
   222  		lastLossRetrans uint64
   223  		lastLossRate    float64
   224  		lastLoss        float64
   225  		policeRate      float64
   226  		policeTime      time.Time
   227  	}
   228  
   229  	LOL struct {
   230  		filledPipe    bool
   231  		fullBwCount   int
   232  		fullBw        float64
   233  		lastFillTime  time.Time
   234  		gain          float64
   235  		slack         float64
   236  		lossRate      float64
   237  		bdpMultiplier float64
   238  
   239  		devi float64
   240  	}
   241  
   242  	VGS struct {
   243  	}
   244  
   245  	fastresend     int32
   246  	nocwnd, stream int32
   247  
   248  	snd_queue []segment
   249  	rcv_queue []segment
   250  	snd_buf   []segment
   251  	rcv_buf   []segment
   252  
   253  	acklist []ackItem
   254  
   255  	buffer   []byte
   256  	reserved int
   257  	output   output_callback
   258  
   259  	quiescent int
   260  
   261  	estimLoss float64
   262  	fecRate   float64
   263  }
   264  
   265  type ackItem struct {
   266  	sn uint32
   267  	ts uint32
   268  }
   269  
   270  // NewKCP create a new kcp state machine
   271  //
   272  // 'conv' must be equal in the connection peers, or else data will be silently rejected.
   273  //
   274  // 'output' function will be called whenever these is data to be sent on wire.
   275  func NewKCP(conv uint32, output output_callback) *KCP {
   276  	kcp := new(KCP)
   277  	kcp.conv = conv
   278  	kcp.snd_wnd = IKCP_WND_SND
   279  	kcp.rcv_wnd = IKCP_WND_RCV
   280  	kcp.rmt_wnd = IKCP_WND_RCV
   281  	kcp.mtu = IKCP_MTU_DEF
   282  	kcp.mss = kcp.mtu - IKCP_OVERHEAD
   283  	kcp.buffer = make([]byte, kcp.mtu)
   284  	kcp.rx_rto = IKCP_RTO_DEF
   285  	kcp.rx_minrto = IKCP_RTO_MIN
   286  	kcp.interval = IKCP_INTERVAL
   287  	kcp.ts_flush = IKCP_INTERVAL
   288  	kcp.ssthresh = IKCP_THRESH_INIT
   289  	kcp.output = output
   290  	kcp.wmax = 1 << 30
   291  	kcp.DRE.ppDelTime = make(map[uint32]time.Time)
   292  	kcp.DRE.ppDelivered = make(map[uint32]float64)
   293  	kcp.DRE.ppAppLimited = make(map[uint32]bool)
   294  	kcp.LOL.gain = 1
   295  	kcp.LOL.bdpMultiplier = 1.5
   296  	kcp.quiescent = QuiescentMax
   297  	kcp.fecRate = 0
   298  	if CongestionControl == "BBR" {
   299  		//kcp.bbrOnConnectionInit()
   300  	}
   301  	return kcp
   302  }
   303  
   304  // newSegment creates a KCP segment
   305  func (kcp *KCP) newSegment(size int) (seg segment) {
   306  	seg.data = xmitBuf.Get().([]byte)[:size]
   307  	return
   308  }
   309  
   310  // delSegment recycles a KCP segment
   311  func (kcp *KCP) delSegment(seg *segment) {
   312  	if seg.data != nil {
   313  		xmitBuf.Put(seg.data)
   314  		seg.data = nil
   315  	}
   316  }
   317  
   318  // ReserveBytes keeps n bytes untouched from the beginning of the buffer,
   319  // the output_callback function should be aware of this.
   320  //
   321  // Return false if n >= mss
   322  func (kcp *KCP) ReserveBytes(n int) bool {
   323  	if n >= int(kcp.mtu-IKCP_OVERHEAD) || n < 0 {
   324  		return false
   325  	}
   326  	kcp.reserved = n
   327  	kcp.mss = kcp.mtu - IKCP_OVERHEAD - uint32(n)
   328  	return true
   329  }
   330  
   331  // PeekSize checks the size of next message in the recv queue
   332  func (kcp *KCP) PeekSize() (length int) {
   333  	if len(kcp.rcv_queue) == 0 {
   334  		return -1
   335  	}
   336  
   337  	seg := &kcp.rcv_queue[0]
   338  	if seg.frg == 0 {
   339  		return len(seg.data)
   340  	}
   341  
   342  	if len(kcp.rcv_queue) < int(seg.frg+1) {
   343  		return -1
   344  	}
   345  
   346  	for k := range kcp.rcv_queue {
   347  		seg := &kcp.rcv_queue[k]
   348  		length += len(seg.data)
   349  		if seg.frg == 0 {
   350  			break
   351  		}
   352  	}
   353  	return
   354  }
   355  
   356  // Receive data from kcp state machine
   357  //
   358  // Return number of bytes read.
   359  //
   360  // Return -1 when there is no readable data.
   361  //
   362  // Return -2 if len(buffer) is smaller than kcp.PeekSize().
   363  func (kcp *KCP) Recv(buffer []byte) (n int) {
   364  	peeksize := kcp.PeekSize()
   365  	if peeksize < 0 {
   366  		return -1
   367  	}
   368  
   369  	if peeksize > len(buffer) {
   370  		return -2
   371  	}
   372  
   373  	var fast_recover bool
   374  	if len(kcp.rcv_queue) >= int(kcp.rcv_wnd) {
   375  		fast_recover = true
   376  	}
   377  
   378  	// merge fragment
   379  	count := 0
   380  	for k := range kcp.rcv_queue {
   381  		seg := &kcp.rcv_queue[k]
   382  		copy(buffer, seg.data)
   383  		buffer = buffer[len(seg.data):]
   384  		n += len(seg.data)
   385  		count++
   386  		kcp.delSegment(seg)
   387  		if seg.frg == 0 {
   388  			break
   389  		}
   390  	}
   391  	if count > 0 {
   392  		kcp.rcv_queue = kcp.remove_front(kcp.rcv_queue, count)
   393  	}
   394  
   395  	// move available data from rcv_buf -> rcv_queue
   396  	count = 0
   397  	for k := range kcp.rcv_buf {
   398  		seg := &kcp.rcv_buf[k]
   399  		if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue)+count < int(kcp.rcv_wnd) {
   400  			kcp.rcv_nxt++
   401  			count++
   402  		} else {
   403  			break
   404  		}
   405  	}
   406  
   407  	if count > 0 {
   408  		kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
   409  		kcp.rcv_buf = kcp.remove_front(kcp.rcv_buf, count)
   410  	}
   411  
   412  	// fast recover
   413  	if len(kcp.rcv_queue) < int(kcp.rcv_wnd) && fast_recover {
   414  		// ready to send back IKCP_CMD_WINS in ikcp_flush
   415  		// tell remote my window size
   416  		kcp.probe |= IKCP_ASK_TELL
   417  	}
   418  
   419  	if kcp.rcv_wnd < 10000 && mrand.Int()%100 == 0 {
   420  		kcp.rcv_wnd++
   421  	}
   422  	return
   423  }
   424  
   425  // Send is user/upper level send, returns below zero for error
   426  func (kcp *KCP) Send(buffer []byte) int {
   427  	kcp.quiescent = QuiescentMax
   428  	var count int
   429  	if len(buffer) == 0 {
   430  		return -1
   431  	}
   432  	//if kcp.nocwnd == 1 {
   433  	//}
   434  
   435  	// append to previous segment in streaming mode (if possible)
   436  	if kcp.stream != 0 {
   437  		n := len(kcp.snd_queue)
   438  		if n > 0 {
   439  			seg := &kcp.snd_queue[n-1]
   440  			if len(seg.data) < int(kcp.mss) {
   441  				capacity := int(kcp.mss) - len(seg.data)
   442  				extend := capacity
   443  				if len(buffer) < capacity {
   444  					extend = len(buffer)
   445  				}
   446  
   447  				// grow slice, the underlying cap is guaranteed to
   448  				// be larger than kcp.mss
   449  				oldlen := len(seg.data)
   450  				seg.data = seg.data[:oldlen+extend]
   451  				copy(seg.data[oldlen:], buffer)
   452  				buffer = buffer[extend:]
   453  			}
   454  		}
   455  
   456  		if len(buffer) == 0 {
   457  			return 0
   458  		}
   459  	}
   460  
   461  	if len(buffer) <= int(kcp.mss) {
   462  		count = 1
   463  	} else {
   464  		count = (len(buffer) + int(kcp.mss) - 1) / int(kcp.mss)
   465  	}
   466  
   467  	if count > 255 {
   468  		return -2
   469  	}
   470  
   471  	if count == 0 {
   472  		count = 1
   473  	}
   474  
   475  	for i := 0; i < count; i++ {
   476  		var size int
   477  		if len(buffer) > int(kcp.mss) {
   478  			size = int(kcp.mss)
   479  		} else {
   480  			size = len(buffer)
   481  		}
   482  		seg := kcp.newSegment(size)
   483  		copy(seg.data, buffer[:size])
   484  		if kcp.stream == 0 { // message mode
   485  			seg.frg = uint8(count - i - 1)
   486  		} else { // stream mode
   487  			seg.frg = 0
   488  		}
   489  		kcp.snd_queue = append(kcp.snd_queue, seg)
   490  		buffer = buffer[size:]
   491  	}
   492  	return 0
   493  }
   494  
   495  func (kcp *KCP) bdp() float64 {
   496  	return (kcp.rttProp()) * 0.001 * math.Max(500*1000, kcp.DRE.maxAckRate)
   497  }
   498  
   499  func (kcp *KCP) update_ack(rtt int32) {
   500  	if float64(rtt) < kcp.DRE.minRtt || time.Since(kcp.DRE.minRttTime).Seconds() > 10 {
   501  		kcp.DRE.minRtt = float64(rtt)
   502  		kcp.DRE.minRttTime = time.Now()
   503  	}
   504  	// update CWND
   505  	// https://tools.ietf.org/html/rfc6298
   506  	var rto uint32
   507  	if kcp.rx_srtt == 0 {
   508  		kcp.rx_srtt = rtt
   509  		kcp.rx_rttvar = rtt >> 1
   510  	} else {
   511  		delta := rtt - kcp.rx_srtt
   512  		kcp.rx_srtt += delta >> 3
   513  		if delta < 0 {
   514  			delta = -delta
   515  		}
   516  		if rtt < kcp.rx_srtt-kcp.rx_rttvar {
   517  			// if the new RTT sample is below the bottom of the range of
   518  			// what an RTT measurement is expected to be.
   519  			// give an 8x reduced weight versus its normal weighting
   520  			kcp.rx_rttvar += (delta - kcp.rx_rttvar) >> 5
   521  		} else {
   522  			kcp.rx_rttvar += (delta - kcp.rx_rttvar) >> 2
   523  		}
   524  	}
   525  	rto = uint32(kcp.rx_srtt) + _imax_(kcp.interval, uint32(kcp.rx_rttvar)<<2)
   526  	kcp.rx_rto = _ibound_(kcp.rx_minrto, rto, IKCP_RTO_MAX)
   527  	// if kcp.rx_rto < 500 {
   528  	// 	kcp.rx_rto = 500
   529  	// }
   530  	kcp.rx_rto += 500
   531  }
   532  
   533  func (kcp *KCP) shrink_buf() {
   534  	if len(kcp.snd_buf) > 0 {
   535  		seg := &kcp.snd_buf[0]
   536  		kcp.snd_una = seg.sn
   537  	} else {
   538  		kcp.snd_una = kcp.snd_nxt
   539  	}
   540  }
   541  
   542  func (kcp *KCP) processAck(seg *segment) {
   543  	kcp.DRE.delTime = time.Now()
   544  	pDelivered, ok := kcp.DRE.ppDelivered[seg.sn]
   545  	if !ok {
   546  		return
   547  	}
   548  	dataAcked := kcp.DRE.delivered - pDelivered
   549  	delete(kcp.DRE.ppDelivered, seg.sn)
   550  	pDelTime, ok := kcp.DRE.ppDelTime[seg.sn]
   551  	if !ok {
   552  		return
   553  	}
   554  	ackElapsed := kcp.DRE.delTime.Sub(pDelTime)
   555  	delete(kcp.DRE.ppDelTime, seg.sn)
   556  	al, ok := kcp.DRE.ppAppLimited[seg.sn]
   557  	if !ok {
   558  		return
   559  	}
   560  	delete(kcp.DRE.ppAppLimited, seg.sn)
   561  	kcp.DRE.runElapsedTime += ackElapsed.Seconds()
   562  	kcp.DRE.runDataAcked += dataAcked
   563  	kcp.DRE.delivered += float64(len(seg.data))
   564  	kcp.updateSample(al)
   565  }
   566  
   567  func (kcp *KCP) parse_ack(sn uint32) {
   568  	if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
   569  		return
   570  	}
   571  	for k := range kcp.snd_buf {
   572  		seg := &kcp.snd_buf[k]
   573  		if sn == seg.sn {
   574  			// mark and free space, but leave the segment here,
   575  			// and wait until `una` to delete this, then we don't
   576  			// have to shift the segments behind forward,
   577  			// which is an expensive operation for large window
   578  			seg.acked = 1
   579  			kcp.processAck(seg)
   580  			kcp.delSegment(seg)
   581  			break
   582  		}
   583  		if _itimediff(sn, seg.sn) < 0 {
   584  			break
   585  		}
   586  	}
   587  }
   588  
   589  func (kcp *KCP) parse_fastack(sn, ts uint32) {
   590  	if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
   591  		return
   592  	}
   593  
   594  	for k := range kcp.snd_buf {
   595  		seg := &kcp.snd_buf[k]
   596  		if _itimediff(sn, seg.sn) < 0 {
   597  			break
   598  		} else if sn != seg.sn && _itimediff(seg.ts, ts) <= 0 {
   599  			if seg.lastfastack == sn {
   600  			} else {
   601  				seg.fastack++
   602  				seg.lastfastack = sn
   603  			}
   604  		}
   605  	}
   606  }
   607  
   608  func (kcp *KCP) parse_una(una uint32) {
   609  	count := 0
   610  	for k := range kcp.snd_buf {
   611  		seg := &kcp.snd_buf[k]
   612  		if _itimediff(una, seg.sn) > 0 {
   613  			kcp.processAck(seg)
   614  			kcp.delSegment(seg)
   615  			count++
   616  		} else {
   617  			break
   618  		}
   619  	}
   620  	if count > 0 {
   621  		kcp.snd_buf = kcp.remove_front(kcp.snd_buf, count)
   622  	}
   623  }
   624  
   625  // ack append
   626  func (kcp *KCP) ack_push(sn, ts uint32) {
   627  	kcp.quiescent = QuiescentMax
   628  	kcp.acklist = append(kcp.acklist, ackItem{sn, ts})
   629  }
   630  
   631  // returns true if data has repeated
   632  func (kcp *KCP) parse_data(newseg segment) bool {
   633  	sn := newseg.sn
   634  	if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
   635  		_itimediff(sn, kcp.rcv_nxt) < 0 {
   636  		return true
   637  	}
   638  
   639  	n := len(kcp.rcv_buf) - 1
   640  	insert_idx := 0
   641  	repeat := false
   642  	for i := n; i >= 0; i-- {
   643  		seg := &kcp.rcv_buf[i]
   644  		if seg.sn == sn {
   645  			repeat = true
   646  			break
   647  		}
   648  		if _itimediff(sn, seg.sn) > 0 {
   649  			insert_idx = i + 1
   650  			break
   651  		}
   652  	}
   653  
   654  	if !repeat {
   655  		// replicate the content if it's new
   656  		dataCopy := xmitBuf.Get().([]byte)[:len(newseg.data)]
   657  		copy(dataCopy, newseg.data)
   658  		newseg.data = dataCopy
   659  
   660  		if insert_idx == n+1 {
   661  			kcp.rcv_buf = append(kcp.rcv_buf, newseg)
   662  		} else {
   663  			kcp.rcv_buf = append(kcp.rcv_buf, segment{})
   664  			copy(kcp.rcv_buf[insert_idx+1:], kcp.rcv_buf[insert_idx:])
   665  			kcp.rcv_buf[insert_idx] = newseg
   666  		}
   667  	}
   668  
   669  	// move available data from rcv_buf -> rcv_queue
   670  	count := 0
   671  	for k := range kcp.rcv_buf {
   672  		seg := &kcp.rcv_buf[k]
   673  		if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue)+count < int(kcp.rcv_wnd) {
   674  			kcp.rcv_nxt++
   675  			count++
   676  		} else {
   677  			break
   678  		}
   679  	}
   680  	if count > 0 {
   681  		kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
   682  		kcp.rcv_buf = kcp.remove_front(kcp.rcv_buf, count)
   683  	}
   684  
   685  	return repeat
   686  }
   687  
   688  // Input a packet into kcp state machine.
   689  //
   690  // 'regular' indicates it's a real data packet from remote, and it means it's not generated from ReedSolomon
   691  // codecs.
   692  //
   693  // 'ackNoDelay' will trigger immediate ACK, but surely it will not be efficient in bandwidth
   694  func (kcp *KCP) Input(data []byte, regular, ackNoDelay bool) int {
   695  	kcp.quiescent = QuiescentMax
   696  	snd_una := kcp.snd_una
   697  	if len(data) < IKCP_OVERHEAD {
   698  		return -1
   699  	}
   700  
   701  	var latest uint32 // the latest ack packet
   702  	var flag int
   703  	var inSegs uint64
   704  
   705  	for {
   706  		var ts, sn, length, una, conv uint32
   707  		var wnd uint16
   708  		var cmd, frg uint8
   709  
   710  		if len(data) < int(IKCP_OVERHEAD) {
   711  			break
   712  		}
   713  
   714  		data = ikcp_decode32u(data, &conv)
   715  		// if conv != kcp.conv {
   716  		// 	return -1
   717  		// }
   718  
   719  		data = ikcp_decode8u(data, &cmd)
   720  		data = ikcp_decode8u(data, &frg)
   721  		data = ikcp_decode16u(data, &wnd)
   722  		data = ikcp_decode32u(data, &ts)
   723  		data = ikcp_decode32u(data, &sn)
   724  		data = ikcp_decode32u(data, &una)
   725  		data = ikcp_decode32u(data, &length)
   726  		kcp.conv = conv
   727  		if len(data) < int(length) {
   728  			return -2
   729  		}
   730  
   731  		if cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK &&
   732  			cmd != IKCP_CMD_WASK && cmd != IKCP_CMD_WINS {
   733  			log.Println("not any command")
   734  			return -3
   735  		}
   736  
   737  		// only trust window updates from regular packets. i.e: latest update
   738  		if regular {
   739  			kcp.rmt_wnd = uint32(wnd)
   740  		}
   741  		kcp.parse_una(una)
   742  		kcp.shrink_buf()
   743  
   744  		if cmd == IKCP_CMD_ACK {
   745  			kcp.parse_ack(sn)
   746  			kcp.parse_fastack(sn, ts)
   747  			flag |= 1
   748  			latest = ts
   749  		} else if cmd == IKCP_CMD_PUSH {
   750  			repeat := true
   751  			if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
   752  				kcp.ack_push(sn, ts)
   753  				if _itimediff(sn, kcp.rcv_nxt) >= 0 {
   754  					var seg segment
   755  					seg.conv = conv
   756  					seg.cmd = cmd
   757  					seg.frg = frg
   758  					seg.wnd = wnd
   759  					seg.ts = ts
   760  					seg.sn = sn
   761  					seg.una = una
   762  					seg.data = data[:length] // delayed data copying
   763  					repeat = kcp.parse_data(seg)
   764  				}
   765  			}
   766  			if regular && repeat {
   767  				atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
   768  			}
   769  		} else if cmd == IKCP_CMD_WASK {
   770  			// ready to send back IKCP_CMD_WINS in Ikcp_flush
   771  			// tell remote my window size
   772  			kcp.probe |= IKCP_ASK_TELL
   773  		} else if cmd == IKCP_CMD_WINS {
   774  			// do nothing
   775  		} else {
   776  			return -3
   777  		}
   778  
   779  		inSegs++
   780  		data = data[length:]
   781  	}
   782  	atomic.AddUint64(&DefaultSnmp.InSegs, inSegs)
   783  
   784  	// update rtt with the latest ts
   785  	// ignore the FEC packet
   786  	if flag != 0 && regular {
   787  		current := currentMs()
   788  		if _itimediff(current, latest) >= 0 {
   789  			kcp.update_ack(_itimediff(current, latest))
   790  		}
   791  	}
   792  
   793  	// cwnd update when packet arrived
   794  	if kcp.nocwnd == 0 {
   795  		if acks := _itimediff(kcp.snd_una, snd_una); acks > 0 {
   796  			kcp.trans += uint64(acks)
   797  			switch CongestionControl {
   798  			case "BIC":
   799  				kcp.bic_onack(acks)
   800  			case "CUBIC":
   801  				kcp.cubic_onack(acks)
   802  			case "VGS":
   803  				kcp.vgs_onack(acks)
   804  			case "LOL":
   805  				bdp := kcp.bdp() / float64(kcp.mss)
   806  				targetCwnd := bdp*kcp.LOL.bdpMultiplier + 64
   807  				if targetCwnd > kcp.cwnd+float64(acks) {
   808  					kcp.cwnd += float64(acks)
   809  				} else {
   810  					kcp.cwnd = targetCwnd
   811  				}
   812  				kcp.cwnd = targetCwnd
   813  				if kcp.cwnd < 16 {
   814  					kcp.cwnd = 16
   815  				}
   816  
   817  				if !kcp.LOL.filledPipe {
   818  					// check for filled pipe
   819  					if kcp.DRE.avgAckRate > kcp.LOL.fullBw {
   820  						// still growing
   821  						kcp.LOL.fullBw = kcp.DRE.avgAckRate
   822  						kcp.LOL.fullBwCount = 0
   823  						//log.Println("growing...")
   824  					} else {
   825  						kcp.LOL.fullBwCount++
   826  					}
   827  					kcp.LOL.gain = 2.89
   828  					if kcp.LOL.fullBwCount >= 5 {
   829  						//log.Printf("BW filled at %2.fK", kcp.LOL.fullBw/1000)
   830  						kcp.LOL.filledPipe = true
   831  						kcp.LOL.lastFillTime = time.Now()
   832  						kcp.LOL.gain = 1.0 / 2.89
   833  					}
   834  				} else {
   835  					// vibrate the gain up and down every 50 rtts
   836  					period := currentMs() / uint32(math.Max(1, kcp.DRE.minRtt))
   837  					//kcp.LOL.gain = math.Sin(period*(2*math.Pi)/4)*0.25 + 1
   838  					if period%10 == 0 && kcp.DRE.lastLoss < 0.03 {
   839  						kcp.LOL.gain = 1.5
   840  					} else if period%10 == 1 {
   841  						kcp.LOL.gain = 0.5
   842  					} else {
   843  						kcp.LOL.gain = 0.95
   844  					}
   845  				}
   846  
   847  				if doLogging {
   848  					log.Printf("[%p] %vK | %vK | cwnd %v/%v | bdp %v | gain %.2f | %v [%v] ms | %.2f%%", kcp,
   849  						int(kcp.DRE.maxAckRate/1000),
   850  						int(kcp.DRE.avgAckRate/1000),
   851  						len(kcp.snd_buf),
   852  						int(kcp.cwnd), int(bdp), kcp.LOL.gain,
   853  						kcp.rttProp(),
   854  						kcp.rx_rttvar,
   855  						100*float64(kcp.DRE.lastLoss))
   856  				}
   857  			}
   858  		}
   859  	}
   860  
   861  	if len(kcp.acklist) >= 128 || (ackNoDelay && len(kcp.acklist) > 0) { // ack immediately
   862  		kcp.flush(true)
   863  	}
   864  	return 0
   865  }
   866  
   867  func (kcp *KCP) paceGain() float64 {
   868  	return kcp.LOL.gain
   869  }
   870  
   871  func (kcp *KCP) wnd_unused() uint16 {
   872  	if len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
   873  		return uint16(int(kcp.rcv_wnd) - len(kcp.rcv_queue))
   874  	}
   875  	return 0
   876  }
   877  
   878  var ackDebugCache = cache.New(time.Hour, time.Hour)
   879  
   880  func (kcp *KCP) rttProp() float64 {
   881  	return kcp.DRE.minRtt
   882  }
   883  
   884  func (kcp *KCP) updateSample(appLimited bool) {
   885  	if kcp.DRE.runElapsedTime > 0 {
   886  		avgRate := kcp.DRE.runDataAcked / kcp.DRE.runElapsedTime
   887  		beta := 10 / math.Max(1000, kcp.DRE.avgAckRate/300)
   888  		kcp.DRE.avgAckRate = (1-beta)*kcp.DRE.avgAckRate + beta*avgRate
   889  		//avgRate = kcp.DRE.avgAckRate
   890  		if kcp.DRE.maxAckRate < avgRate || (!appLimited && float64(time.Since(kcp.DRE.maxAckTime).Milliseconds()) > kcp.rttProp()*10) {
   891  			kcp.DRE.maxAckRate = avgRate
   892  			// if kcp.DRE.maxAckRate < 200*1000 {
   893  			// 	kcp.DRE.maxAckRate = 200 * 1000
   894  			// }
   895  			kcp.DRE.maxAckTime = kcp.DRE.delTime
   896  			if time.Since(kcp.DRE.policeTime).Seconds() < 20 && kcp.DRE.maxAckRate > kcp.DRE.policeRate {
   897  				kcp.DRE.maxAckRate = kcp.DRE.policeRate
   898  			}
   899  		}
   900  	}
   901  	if kcp.DRE.runElapsedTime != 0 {
   902  		kcp.DRE.runElapsedTime = 0
   903  		kcp.DRE.runDataAcked = 0
   904  	}
   905  }
   906  
   907  // flush pending data
   908  func (kcp *KCP) flush(ackOnly bool) uint32 {
   909  	var busy bool
   910  	defer func() {
   911  		if !busy {
   912  			kcp.LOL.filledPipe = false
   913  			kcp.LOL.fullBwCount = 0
   914  			kcp.LOL.fullBw = 0
   915  			kcp.quiescent--
   916  			if kcp.quiescent <= 0 {
   917  				kcp.quiescent = 0
   918  			}
   919  		}
   920  	}()
   921  	if kcp.conv == 814 {
   922  		//busy = true
   923  		return kcp.interval
   924  	}
   925  
   926  	var seg segment
   927  	seg.conv = kcp.conv
   928  	seg.cmd = IKCP_CMD_ACK
   929  	seg.wnd = kcp.wnd_unused()
   930  	seg.una = kcp.rcv_nxt
   931  
   932  	buffer := kcp.buffer
   933  	ptr := buffer[kcp.reserved:] // keep n bytes untouched
   934  
   935  	// makeSpace makes room for writing
   936  	makeSpace := func(space int) {
   937  		size := len(buffer) - len(ptr)
   938  		if size+space > int(kcp.mtu) {
   939  			kcp.output(buffer, size)
   940  			ptr = buffer[kcp.reserved:]
   941  		}
   942  	}
   943  
   944  	// flush bytes in buffer if there is any
   945  	flushBuffer := func() {
   946  		size := len(buffer) - len(ptr)
   947  		if size > kcp.reserved {
   948  			busy = true
   949  			kcp.output(buffer, size)
   950  		}
   951  	}
   952  
   953  	// flush acknowledges
   954  	for i, ack := range kcp.acklist {
   955  		busy = true
   956  		makeSpace(IKCP_OVERHEAD)
   957  		// filter jitters caused by bufferbloat
   958  		if ack.sn >= kcp.rcv_nxt || len(kcp.acklist)-1 == i {
   959  			seg.sn, seg.ts = ack.sn, ack.ts
   960  			ptr = seg.encode(ptr)
   961  		} else {
   962  		}
   963  	}
   964  	kcp.acklist = kcp.acklist[0:0]
   965  
   966  	if ackOnly { // flash remain ack segments
   967  		flushBuffer()
   968  		return kcp.interval
   969  	}
   970  
   971  	// probe window size (if remote window size equals zero)
   972  	if kcp.rmt_wnd == 0 {
   973  		current := currentMs()
   974  		if kcp.probe_wait == 0 {
   975  			kcp.probe_wait = IKCP_PROBE_INIT
   976  			kcp.ts_probe = current + kcp.probe_wait
   977  		} else {
   978  			if _itimediff(current, kcp.ts_probe) >= 0 {
   979  				if kcp.probe_wait < IKCP_PROBE_INIT {
   980  					kcp.probe_wait = IKCP_PROBE_INIT
   981  				}
   982  				kcp.probe_wait += kcp.probe_wait / 2
   983  				if kcp.probe_wait > IKCP_PROBE_LIMIT {
   984  					kcp.probe_wait = IKCP_PROBE_LIMIT
   985  				}
   986  				kcp.ts_probe = current + kcp.probe_wait
   987  				kcp.probe |= IKCP_ASK_SEND
   988  			}
   989  		}
   990  		busy = true
   991  	} else if kcp.ts_probe != 0 || kcp.probe_wait != 0 {
   992  		kcp.ts_probe = 0
   993  		kcp.probe_wait = 0
   994  		busy = true
   995  	}
   996  
   997  	// flush window probing commands
   998  	if (kcp.probe & IKCP_ASK_SEND) != 0 {
   999  		seg.cmd = IKCP_CMD_WASK
  1000  		makeSpace(IKCP_OVERHEAD)
  1001  		ptr = seg.encode(ptr)
  1002  		busy = true
  1003  	}
  1004  
  1005  	// flush window probing commands
  1006  	if (kcp.probe & IKCP_ASK_TELL) != 0 {
  1007  		seg.cmd = IKCP_CMD_WINS
  1008  		makeSpace(IKCP_OVERHEAD)
  1009  		ptr = seg.encode(ptr)
  1010  		busy = true
  1011  	}
  1012  
  1013  	if kcp.probe != 0 {
  1014  		kcp.probe = 0
  1015  		busy = true
  1016  	}
  1017  
  1018  	// calculate window size
  1019  	cwnd := _imin_(kcp.snd_wnd, kcp.rmt_wnd)
  1020  	if kcp.nocwnd == 0 {
  1021  		cwnd = _imin_(uint32(kcp.cwnd), cwnd)
  1022  	}
  1023  
  1024  	// sliding window, controlled by snd_nxt && sna_una+cwnd
  1025  	newSegsCount := 0
  1026  	appLimited := len(kcp.snd_buf) < 2
  1027  	total := 0
  1028  	for k := range kcp.snd_queue {
  1029  		busy = true
  1030  		if _itimediff(kcp.snd_nxt, kcp.snd_una+cwnd) >= 0 {
  1031  			break
  1032  		}
  1033  		newseg := kcp.snd_queue[k]
  1034  		if CongestionControl == "LOL" {
  1035  			r, x := math.Max(500*1000, kcp.DRE.maxAckRate),
  1036  				int(float64(len(newseg.data))/math.Max(0.5, kcp.LOL.gain))
  1037  			if !kcp.pacer.Allow(r, x) && kcp.DRE.maxAckRate > 500*1000 {
  1038  				break
  1039  			}
  1040  			//kcp.pacer.Limit(r, x)
  1041  		}
  1042  
  1043  		newseg.conv = kcp.conv
  1044  		newseg.cmd = IKCP_CMD_PUSH
  1045  		newseg.sn = kcp.snd_nxt
  1046  		total += len(newseg.data)
  1047  		kcp.DRE.ppDelivered[newseg.sn] = kcp.DRE.delivered
  1048  		kcp.DRE.ppDelTime[newseg.sn] = kcp.DRE.delTime
  1049  		kcp.DRE.ppAppLimited[newseg.sn] = appLimited
  1050  		kcp.snd_buf = append(kcp.snd_buf, newseg)
  1051  		kcp.snd_nxt++
  1052  		newSegsCount++
  1053  	}
  1054  	if newSegsCount > 0 {
  1055  		busy = true
  1056  		kcp.snd_queue = kcp.remove_front(kcp.snd_queue, newSegsCount)
  1057  	}
  1058  
  1059  	// calculate resent
  1060  	resent := uint32(1)
  1061  	if kcp.fastresend <= 0 {
  1062  		resent = 0xffffffff
  1063  	}
  1064  
  1065  	// check for retransmissions
  1066  	current := currentMs()
  1067  	var change, lost, lostSegs, fastRetransSegs, earlyRetransSegs uint64
  1068  	minrto := int32(kcp.interval)
  1069  
  1070  	ref := kcp.snd_buf[:len(kcp.snd_buf)] // for bounds check elimination
  1071  	var lostSn []uint32
  1072  	for k := range ref {
  1073  		busy = true
  1074  		segment := &ref[k]
  1075  		needsend := false
  1076  		if segment.acked == 1 {
  1077  			continue
  1078  		}
  1079  		if segment.xmit == 0 { // initial transmit
  1080  			needsend = true
  1081  			segment.rto = kcp.rx_rto
  1082  			// if len(segment.data) < 1024 {
  1083  			// 	segment.rto = uint32(kcp.rx_srtt)
  1084  			// 	if segment.rto > 400 {
  1085  			// 		segment.rto = 400
  1086  			// 	}
  1087  			// 	log.Println("small segment, retransmit without rttvar", segment.rto, kcp.rx_srtt, kcp.rx_rttvar, kcp.DRE.minRtt)
  1088  			// }
  1089  			segment.resendts = current + segment.rto
  1090  			kcp.trans++
  1091  		} else if segment.fastack >= resent { // fast retransmit
  1092  			needsend = true
  1093  			segment.fastack = 0
  1094  			segment.rto = kcp.rx_rto
  1095  			segment.resendts = current + segment.rto
  1096  			change++
  1097  			fastRetransSegs++
  1098  			lostSn = append(lostSn, segment.sn)
  1099  		} else if (segment.fastack > 0 && newSegsCount == 0) ||
  1100  			(segment.xmit < 2 && len(kcp.snd_buf) == 1) { // early retransmit
  1101  			needsend = true
  1102  			segment.fastack = 0
  1103  			segment.rto = kcp.rx_rto
  1104  			segment.resendts = current + segment.rto
  1105  			change++
  1106  			earlyRetransSegs++
  1107  			lostSn = append(lostSn, segment.sn)
  1108  			//log.Println("early", segment.sn)
  1109  		} else if _itimediff(current, segment.resendts) >= 0 { // RTO
  1110  			needsend = true
  1111  			// if kcp.nodelay == 0 {
  1112  			// 	segment.rto += kcp.rx_rto
  1113  			// } else {
  1114  			// 	segment.rto += kcp.rx_rto / 2
  1115  			// }
  1116  			segment.rto += segment.rto
  1117  			segment.fastack = 0
  1118  			segment.resendts = current + segment.rto
  1119  			if segment.rto > IKCP_RTO_MAX*4 {
  1120  				if doLogging {
  1121  					log.Printf("[%p] self-destruct due to far too long RTO", kcp)
  1122  				}
  1123  				kcp.isDead = true
  1124  			}
  1125  			lost++
  1126  			lostSegs++
  1127  			// if doLogging {
  1128  			// 	log.Printf("[%p] RTO on %v %v", kcp, segment.sn, segment.rto)
  1129  			// }
  1130  			lostSn = append(lostSn, segment.sn)
  1131  			//log.Println("rto", segment.sn)
  1132  		}
  1133  
  1134  		if needsend {
  1135  			current = currentMs()
  1136  			segment.xmit++
  1137  			segment.ts = current
  1138  			segment.wnd = seg.wnd
  1139  			segment.una = seg.una
  1140  
  1141  			need := IKCP_OVERHEAD + len(segment.data)
  1142  			makeSpace(need)
  1143  			ptr = segment.encode(ptr)
  1144  			copy(ptr, segment.data)
  1145  			ptr = ptr[len(segment.data):]
  1146  		}
  1147  
  1148  		// get the nearest rto
  1149  		if rto := _itimediff(segment.resendts, current); rto > 0 && rto < minrto {
  1150  			minrto = rto
  1151  		}
  1152  	}
  1153  
  1154  	// flash remain segments
  1155  	flushBuffer()
  1156  
  1157  	// counter updates
  1158  	sum := lostSegs
  1159  	if lostSegs > 0 {
  1160  		atomic.AddUint64(&DefaultSnmp.LostSegs, lostSegs)
  1161  	}
  1162  	if fastRetransSegs > 0 {
  1163  		atomic.AddUint64(&DefaultSnmp.FastRetransSegs, fastRetransSegs)
  1164  		sum += fastRetransSegs
  1165  	}
  1166  	if earlyRetransSegs > 0 {
  1167  		atomic.AddUint64(&DefaultSnmp.EarlyRetransSegs, earlyRetransSegs)
  1168  		sum += earlyRetransSegs
  1169  	}
  1170  	if sum > 0 {
  1171  		atomic.AddUint64(&DefaultSnmp.RetransSegs, sum)
  1172  		kcp.retrans += sum
  1173  	}
  1174  
  1175  	// cwnd update
  1176  	if kcp.nocwnd == 0 {
  1177  		switch CongestionControl {
  1178  		case "BIC":
  1179  			// congestion control, https://tools.ietf.org/html/rfc5681
  1180  			if sum > 0 {
  1181  				kcp.bic_onloss(lostSn)
  1182  			}
  1183  		case "CUBIC":
  1184  			// congestion control, https://tools.ietf.org/html/rfc5681
  1185  			if sum > 0 {
  1186  				kcp.cubic_onloss(lostSn)
  1187  			}
  1188  		case "LOL":
  1189  			// if sum > 0 {
  1190  			// 	kcp.cwnd = math.Min(kcp.cwnd, kcp.bdp()/float64(kcp.mss))
  1191  			// }
  1192  		case "VGS":
  1193  		}
  1194  		if sum > 0 {
  1195  			now := time.Now()
  1196  			if now.Sub(kcp.DRE.lastLossTime).Milliseconds() > int64(kcp.DRE.minRtt*10) {
  1197  				deltaR := kcp.retrans - kcp.DRE.lastLossRetrans
  1198  				deltaT := kcp.trans - kcp.DRE.lastLossTrans
  1199  				loss := float64(deltaR) / (float64(deltaR) + float64(deltaT) + 1)
  1200  				rate := (kcp.DRE.delivered - kcp.DRE.lastLossDel) /
  1201  					now.Sub(kcp.DRE.lastLossTime).Seconds()
  1202  				if doLogging {
  1203  					log.Printf("[%p] Loss-to-loss delivery rate: %vK @ %.2f%%", kcp, int(rate/1000), loss*100)
  1204  				}
  1205  				now := time.Now()
  1206  				// if loss > 0.1 {
  1207  				// 	kcp.LOL.bdpMultiplier = kcp.LOL.bdpMultiplier*0.7 + 0.3
  1208  				// }
  1209  				// if loss < 0.05 {
  1210  				// 	kcp.LOL.bdpMultiplier = kcp.LOL.bdpMultiplier*0.9 + 0.1*3
  1211  				// }
  1212  				// if doLogging {
  1213  				// 	log.Println("bdpMultiplier =>", kcp.LOL.bdpMultiplier)
  1214  				// }
  1215  				// if rate > 400*1000 && loss+kcp.DRE.lastLoss > 0.3 && math.Abs(kcp.DRE.lastLossRate-rate) < rate/5 {
  1216  				// 	if doLogging {
  1217  				// 		log.Printf("[%p] ****** POLICE ******", kcp)
  1218  				// 	}
  1219  				// 	kcp.DRE.policeRate = (rate + kcp.DRE.lastLossRate) / 2
  1220  				// 	kcp.DRE.policeTime = now
  1221  				// }
  1222  				kcp.DRE.lastLossTime = now
  1223  				kcp.DRE.lastLossDel = kcp.DRE.delivered
  1224  				kcp.DRE.lastLossTrans = kcp.trans
  1225  				kcp.DRE.lastLossRetrans = kcp.retrans
  1226  				kcp.DRE.lastLossRate = rate
  1227  				kcp.DRE.lastLoss = loss
  1228  				kcp.estimLoss = loss
  1229  				if kcp.estimLoss > 0.1 {
  1230  					kcp.fecRate = 0.8*kcp.fecRate + 0.2
  1231  				} else {
  1232  					kcp.fecRate = 0.8 * kcp.fecRate
  1233  				}
  1234  			}
  1235  		}
  1236  		if kcp.cwnd < 4 {
  1237  			kcp.cwnd = 4
  1238  		}
  1239  	}
  1240  
  1241  	return uint32(minrto)
  1242  }
  1243  
  1244  // SetMtu changes MTU size, default is 1400
  1245  func (kcp *KCP) SetMtu(mtu int) int {
  1246  	if mtu < 50 || mtu < IKCP_OVERHEAD {
  1247  		return -1
  1248  	}
  1249  	if kcp.reserved >= int(kcp.mtu-IKCP_OVERHEAD) || kcp.reserved < 0 {
  1250  		return -1
  1251  	}
  1252  
  1253  	buffer := make([]byte, mtu)
  1254  	if buffer == nil {
  1255  		return -2
  1256  	}
  1257  	kcp.mtu = uint32(mtu)
  1258  	kcp.mss = kcp.mtu - IKCP_OVERHEAD - uint32(kcp.reserved)
  1259  	kcp.buffer = buffer
  1260  	return 0
  1261  }
  1262  
  1263  // NoDelay options
  1264  // fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
  1265  // nodelay: 0:disable(default), 1:enable
  1266  // interval: internal update timer interval in millisec, default is 100ms
  1267  // resend: 0:disable fast resend(default), 1:enable fast resend
  1268  // nc: 0:normal congestion control(default), 1:disable congestion control
  1269  func (kcp *KCP) NoDelay(nodelay, interval, resend, nc int) int {
  1270  	if nodelay >= 0 {
  1271  		kcp.nodelay = uint32(nodelay)
  1272  		if nodelay != 0 {
  1273  			kcp.rx_minrto = IKCP_RTO_NDL
  1274  		} else {
  1275  			kcp.rx_minrto = IKCP_RTO_MIN
  1276  		}
  1277  	}
  1278  	if interval >= 0 {
  1279  		if interval > 5000 {
  1280  			interval = 5000
  1281  		} else if interval < 10 {
  1282  			interval = 10
  1283  		}
  1284  		kcp.interval = uint32(interval)
  1285  	}
  1286  	if resend >= 0 {
  1287  		kcp.fastresend = int32(resend)
  1288  	}
  1289  	if nc >= 0 {
  1290  		kcp.nocwnd = int32(nc)
  1291  	}
  1292  	return 0
  1293  }
  1294  
  1295  // WndSize sets maximum window size: sndwnd=32, rcvwnd=32 by default
  1296  func (kcp *KCP) WndSize(sndwnd, rcvwnd int) int {
  1297  	if sndwnd > 0 {
  1298  		kcp.snd_wnd = uint32(sndwnd)
  1299  	}
  1300  	if rcvwnd > 0 {
  1301  		kcp.rcv_wnd = uint32(rcvwnd)
  1302  	}
  1303  	return 0
  1304  }
  1305  
  1306  // WaitSnd gets how many packet is waiting to be sent
  1307  func (kcp *KCP) WaitSnd() int {
  1308  	return len(kcp.snd_buf) + len(kcp.snd_queue)
  1309  }
  1310  
  1311  // remove front n elements from queue
  1312  // if the number of elements to remove is more than half of the size.
  1313  // just shift the rear elements to front, otherwise just reslice q to q[n:]
  1314  // then the cost of runtime.growslice can always be less than n/2
  1315  func (kcp *KCP) remove_front(q []segment, n int) []segment {
  1316  	if n > cap(q)/2 {
  1317  		newn := copy(q, q[n:])
  1318  		return q[:newn]
  1319  	}
  1320  	return q[n:]
  1321  }