gitee.com/ks-custle/core-gm@v0.0.0-20230922171213-b83bdd97b62c/gmhttp/internal/chunked_test.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package internal
     6  
     7  import (
     8  	"bufio"
     9  	"bytes"
    10  	"fmt"
    11  	"io"
    12  	"strings"
    13  	"testing"
    14  )
    15  
    16  func TestChunk(t *testing.T) {
    17  	var b bytes.Buffer
    18  
    19  	w := NewChunkedWriter(&b)
    20  	const chunk1 = "hello, "
    21  	const chunk2 = "world! 0123456789abcdef"
    22  	_, _ = w.Write([]byte(chunk1))
    23  	_, _ = w.Write([]byte(chunk2))
    24  	_ = w.Close()
    25  
    26  	if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e {
    27  		t.Fatalf("chunk writer wrote %q; want %q", g, e)
    28  	}
    29  
    30  	r := NewChunkedReader(&b)
    31  	data, err := io.ReadAll(r)
    32  	if err != nil {
    33  		t.Logf(`data: "%s"`, data)
    34  		t.Fatalf("ReadAll from reader: %v", err)
    35  	}
    36  	if g, e := string(data), chunk1+chunk2; g != e {
    37  		t.Errorf("chunk reader read %q; want %q", g, e)
    38  	}
    39  }
    40  
    41  func TestChunkReadMultiple(t *testing.T) {
    42  	// Bunch of small chunks, all read together.
    43  	{
    44  		var b bytes.Buffer
    45  		w := NewChunkedWriter(&b)
    46  		_, _ = w.Write([]byte("foo"))
    47  		_, _ = w.Write([]byte("bar"))
    48  		_ = w.Close()
    49  
    50  		r := NewChunkedReader(&b)
    51  		buf := make([]byte, 10)
    52  		n, err := r.Read(buf)
    53  		if n != 6 || err != io.EOF {
    54  			t.Errorf("Read = %d, %v; want 6, EOF", n, err)
    55  		}
    56  		buf = buf[:n]
    57  		if string(buf) != "foobar" {
    58  			t.Errorf("Read = %q; want %q", buf, "foobar")
    59  		}
    60  	}
    61  
    62  	// One big chunk followed by a little chunk, but the small bufio.Reader size
    63  	// should prevent the second chunk header from being read.
    64  	{
    65  		var b bytes.Buffer
    66  		w := NewChunkedWriter(&b)
    67  		// fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes,
    68  		// the same as the bufio ReaderSize below (the minimum), so even
    69  		// though we're going to try to Read with a buffer larger enough to also
    70  		// receive "foo", the second chunk header won't be read yet.
    71  		const fillBufChunk = "0123456789a"
    72  		const shortChunk = "foo"
    73  		_, _ = w.Write([]byte(fillBufChunk))
    74  		_, _ = w.Write([]byte(shortChunk))
    75  		_ = w.Close()
    76  
    77  		r := NewChunkedReader(bufio.NewReaderSize(&b, 16))
    78  		buf := make([]byte, len(fillBufChunk)+len(shortChunk))
    79  		n, err := r.Read(buf)
    80  		if n != len(fillBufChunk) || err != nil {
    81  			t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk))
    82  		}
    83  		buf = buf[:n]
    84  		if string(buf) != fillBufChunk {
    85  			t.Errorf("Read = %q; want %q", buf, fillBufChunk)
    86  		}
    87  
    88  		n, err = r.Read(buf)
    89  		if n != len(shortChunk) || err != io.EOF {
    90  			t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk))
    91  		}
    92  	}
    93  
    94  	// And test that we see an EOF chunk, even though our buffer is already full:
    95  	{
    96  		r := NewChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n")))
    97  		buf := make([]byte, 3)
    98  		n, err := r.Read(buf)
    99  		if n != 3 || err != io.EOF {
   100  			t.Errorf("Read = %d, %v; want 3, EOF", n, err)
   101  		}
   102  		if string(buf) != "foo" {
   103  			t.Errorf("buf = %q; want foo", buf)
   104  		}
   105  	}
   106  }
   107  
   108  func TestChunkReaderAllocs(t *testing.T) {
   109  	if testing.Short() {
   110  		t.Skip("skipping in short mode")
   111  	}
   112  	var buf bytes.Buffer
   113  	w := NewChunkedWriter(&buf)
   114  	a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc")
   115  	_, _ = w.Write(a)
   116  	_, _ = w.Write(b)
   117  	_, _ = w.Write(c)
   118  	_ = w.Close()
   119  
   120  	readBuf := make([]byte, len(a)+len(b)+len(c)+1)
   121  	byter := bytes.NewReader(buf.Bytes())
   122  	bufr := bufio.NewReader(byter)
   123  	mallocs := testing.AllocsPerRun(100, func() {
   124  		_, _ = byter.Seek(0, io.SeekStart)
   125  		bufr.Reset(byter)
   126  		r := NewChunkedReader(bufr)
   127  		n, err := io.ReadFull(r, readBuf)
   128  		if n != len(readBuf)-1 {
   129  			t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1)
   130  		}
   131  		if err != io.ErrUnexpectedEOF {
   132  			t.Fatalf("read error = %v; want ErrUnexpectedEOF", err)
   133  		}
   134  	})
   135  	if mallocs > 1.5 {
   136  		t.Errorf("mallocs = %v; want 1", mallocs)
   137  	}
   138  }
   139  
   140  func TestParseHexUint(t *testing.T) {
   141  	type testCase struct {
   142  		in      string
   143  		want    uint64
   144  		wantErr string
   145  	}
   146  	tests := []testCase{
   147  		{"x", 0, "invalid byte in chunk length"},
   148  		{"0000000000000000", 0, ""},
   149  		{"0000000000000001", 1, ""},
   150  		{"ffffffffffffffff", 1<<64 - 1, ""},
   151  		{"000000000000bogus", 0, "invalid byte in chunk length"},
   152  		{"00000000000000000", 0, "http chunk length too large"}, // could accept if we wanted
   153  		{"10000000000000000", 0, "http chunk length too large"},
   154  		{"00000000000000001", 0, "http chunk length too large"}, // could accept if we wanted
   155  	}
   156  	for i := uint64(0); i <= 1234; i++ {
   157  		tests = append(tests, testCase{in: fmt.Sprintf("%x", i), want: i})
   158  	}
   159  	for _, tt := range tests {
   160  		got, err := parseHexUint([]byte(tt.in))
   161  		if tt.wantErr != "" {
   162  			if !strings.Contains(fmt.Sprint(err), tt.wantErr) {
   163  				t.Errorf("parseHexUint(%q) = %v, %v; want error %q", tt.in, got, err, tt.wantErr)
   164  			}
   165  		} else {
   166  			if err != nil || got != tt.want {
   167  				t.Errorf("parseHexUint(%q) = %v, %v; want %v", tt.in, got, err, tt.want)
   168  			}
   169  		}
   170  	}
   171  }
   172  
   173  func TestChunkReadingIgnoresExtensions(t *testing.T) {
   174  	in := "7;ext=\"some quoted string\"\r\n" + // token=quoted string
   175  		"hello, \r\n" +
   176  		"17;someext\r\n" + // token without value
   177  		"world! 0123456789abcdef\r\n" +
   178  		"0;someextension=sometoken\r\n" // token=token
   179  	data, err := io.ReadAll(NewChunkedReader(strings.NewReader(in)))
   180  	if err != nil {
   181  		t.Fatalf("ReadAll = %q, %v", data, err)
   182  	}
   183  	if g, e := string(data), "hello, world! 0123456789abcdef"; g != e {
   184  		t.Errorf("read %q; want %q", g, e)
   185  	}
   186  }
   187  
   188  // Issue 17355: ChunkedReader shouldn't block waiting for more data
   189  // if it can return something.
   190  func TestChunkReadPartial(t *testing.T) {
   191  	pr, pw := io.Pipe()
   192  	go func() {
   193  		_, _ = pw.Write([]byte("7\r\n1234567"))
   194  	}()
   195  	cr := NewChunkedReader(pr)
   196  	readBuf := make([]byte, 7)
   197  	n, err := cr.Read(readBuf)
   198  	if err != nil {
   199  		t.Fatal(err)
   200  	}
   201  	want := "1234567"
   202  	if n != 7 || string(readBuf) != want {
   203  		t.Fatalf("Read: %v %q; want %d, %q", n, readBuf[:n], len(want), want)
   204  	}
   205  	go func() {
   206  		_, _ = pw.Write([]byte("xx"))
   207  	}()
   208  	_, err = cr.Read(readBuf)
   209  	if got := fmt.Sprint(err); !strings.Contains(got, "malformed") {
   210  		t.Fatalf("second read = %v; want malformed error", err)
   211  	}
   212  
   213  }