1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package internal
6
7import (
8	"bufio"
9	"bytes"
10	"fmt"
11	"io"
12	"strings"
13	"testing"
14	"testing/iotest"
15)
16
17func TestChunk(t *testing.T) {
18	var b bytes.Buffer
19
20	w := NewChunkedWriter(&b)
21	const chunk1 = "hello, "
22	const chunk2 = "world! 0123456789abcdef"
23	w.Write([]byte(chunk1))
24	w.Write([]byte(chunk2))
25	w.Close()
26
27	if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e {
28		t.Fatalf("chunk writer wrote %q; want %q", g, e)
29	}
30
31	r := NewChunkedReader(&b)
32	data, err := io.ReadAll(r)
33	if err != nil {
34		t.Logf(`data: "%s"`, data)
35		t.Fatalf("ReadAll from reader: %v", err)
36	}
37	if g, e := string(data), chunk1+chunk2; g != e {
38		t.Errorf("chunk reader read %q; want %q", g, e)
39	}
40}
41
42func TestChunkReadMultiple(t *testing.T) {
43	// Bunch of small chunks, all read together.
44	{
45		var b bytes.Buffer
46		w := NewChunkedWriter(&b)
47		w.Write([]byte("foo"))
48		w.Write([]byte("bar"))
49		w.Close()
50
51		r := NewChunkedReader(&b)
52		buf := make([]byte, 10)
53		n, err := r.Read(buf)
54		if n != 6 || err != io.EOF {
55			t.Errorf("Read = %d, %v; want 6, EOF", n, err)
56		}
57		buf = buf[:n]
58		if string(buf) != "foobar" {
59			t.Errorf("Read = %q; want %q", buf, "foobar")
60		}
61	}
62
63	// One big chunk followed by a little chunk, but the small bufio.Reader size
64	// should prevent the second chunk header from being read.
65	{
66		var b bytes.Buffer
67		w := NewChunkedWriter(&b)
68		// fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes,
69		// the same as the bufio ReaderSize below (the minimum), so even
70		// though we're going to try to Read with a buffer larger enough to also
71		// receive "foo", the second chunk header won't be read yet.
72		const fillBufChunk = "0123456789a"
73		const shortChunk = "foo"
74		w.Write([]byte(fillBufChunk))
75		w.Write([]byte(shortChunk))
76		w.Close()
77
78		r := NewChunkedReader(bufio.NewReaderSize(&b, 16))
79		buf := make([]byte, len(fillBufChunk)+len(shortChunk))
80		n, err := r.Read(buf)
81		if n != len(fillBufChunk) || err != nil {
82			t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk))
83		}
84		buf = buf[:n]
85		if string(buf) != fillBufChunk {
86			t.Errorf("Read = %q; want %q", buf, fillBufChunk)
87		}
88
89		n, err = r.Read(buf)
90		if n != len(shortChunk) || err != io.EOF {
91			t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk))
92		}
93	}
94
95	// And test that we see an EOF chunk, even though our buffer is already full:
96	{
97		r := NewChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n")))
98		buf := make([]byte, 3)
99		n, err := r.Read(buf)
100		if n != 3 || err != io.EOF {
101			t.Errorf("Read = %d, %v; want 3, EOF", n, err)
102		}
103		if string(buf) != "foo" {
104			t.Errorf("buf = %q; want foo", buf)
105		}
106	}
107}
108
109func TestChunkReaderAllocs(t *testing.T) {
110	if testing.Short() {
111		t.Skip("skipping in short mode")
112	}
113	var buf bytes.Buffer
114	w := NewChunkedWriter(&buf)
115	a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc")
116	w.Write(a)
117	w.Write(b)
118	w.Write(c)
119	w.Close()
120
121	readBuf := make([]byte, len(a)+len(b)+len(c)+1)
122	byter := bytes.NewReader(buf.Bytes())
123	bufr := bufio.NewReader(byter)
124	mallocs := testing.AllocsPerRun(100, func() {
125		byter.Seek(0, io.SeekStart)
126		bufr.Reset(byter)
127		r := NewChunkedReader(bufr)
128		n, err := io.ReadFull(r, readBuf)
129		if n != len(readBuf)-1 {
130			t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1)
131		}
132		if err != io.ErrUnexpectedEOF {
133			t.Fatalf("read error = %v; want ErrUnexpectedEOF", err)
134		}
135	})
136	if mallocs > 1.5 {
137		t.Errorf("mallocs = %v; want 1", mallocs)
138	}
139}
140
141func TestParseHexUint(t *testing.T) {
142	type testCase struct {
143		in      string
144		want    uint64
145		wantErr string
146	}
147	tests := []testCase{
148		{"x", 0, "invalid byte in chunk length"},
149		{"0000000000000000", 0, ""},
150		{"0000000000000001", 1, ""},
151		{"ffffffffffffffff", 1<<64 - 1, ""},
152		{"000000000000bogus", 0, "invalid byte in chunk length"},
153		{"00000000000000000", 0, "http chunk length too large"}, // could accept if we wanted
154		{"10000000000000000", 0, "http chunk length too large"},
155		{"00000000000000001", 0, "http chunk length too large"}, // could accept if we wanted
156		{"", 0, "empty hex number for chunk length"},
157	}
158	for i := uint64(0); i <= 1234; i++ {
159		tests = append(tests, testCase{in: fmt.Sprintf("%x", i), want: i})
160	}
161	for _, tt := range tests {
162		got, err := parseHexUint([]byte(tt.in))
163		if tt.wantErr != "" {
164			if !strings.Contains(fmt.Sprint(err), tt.wantErr) {
165				t.Errorf("parseHexUint(%q) = %v, %v; want error %q", tt.in, got, err, tt.wantErr)
166			}
167		} else {
168			if err != nil || got != tt.want {
169				t.Errorf("parseHexUint(%q) = %v, %v; want %v", tt.in, got, err, tt.want)
170			}
171		}
172	}
173}
174
175func TestChunkReadingIgnoresExtensions(t *testing.T) {
176	in := "7;ext=\"some quoted string\"\r\n" + // token=quoted string
177		"hello, \r\n" +
178		"17;someext\r\n" + // token without value
179		"world! 0123456789abcdef\r\n" +
180		"0;someextension=sometoken\r\n" // token=token
181	data, err := io.ReadAll(NewChunkedReader(strings.NewReader(in)))
182	if err != nil {
183		t.Fatalf("ReadAll = %q, %v", data, err)
184	}
185	if g, e := string(data), "hello, world! 0123456789abcdef"; g != e {
186		t.Errorf("read %q; want %q", g, e)
187	}
188}
189
190// Issue 17355: ChunkedReader shouldn't block waiting for more data
191// if it can return something.
192func TestChunkReadPartial(t *testing.T) {
193	pr, pw := io.Pipe()
194	go func() {
195		pw.Write([]byte("7\r\n1234567"))
196	}()
197	cr := NewChunkedReader(pr)
198	readBuf := make([]byte, 7)
199	n, err := cr.Read(readBuf)
200	if err != nil {
201		t.Fatal(err)
202	}
203	want := "1234567"
204	if n != 7 || string(readBuf) != want {
205		t.Fatalf("Read: %v %q; want %d, %q", n, readBuf[:n], len(want), want)
206	}
207	go func() {
208		pw.Write([]byte("xx"))
209	}()
210	_, err = cr.Read(readBuf)
211	if got := fmt.Sprint(err); !strings.Contains(got, "malformed") {
212		t.Fatalf("second read = %v; want malformed error", err)
213	}
214
215}
216
217// Issue 48861: ChunkedReader should report incomplete chunks
218func TestIncompleteChunk(t *testing.T) {
219	const valid = "4\r\nabcd\r\n" + "5\r\nabc\r\n\r\n" + "0\r\n"
220
221	for i := 0; i < len(valid); i++ {
222		incomplete := valid[:i]
223		r := NewChunkedReader(strings.NewReader(incomplete))
224		if _, err := io.ReadAll(r); err != io.ErrUnexpectedEOF {
225			t.Errorf("expected io.ErrUnexpectedEOF for %q, got %v", incomplete, err)
226		}
227	}
228
229	r := NewChunkedReader(strings.NewReader(valid))
230	if _, err := io.ReadAll(r); err != nil {
231		t.Errorf("unexpected error for %q: %v", valid, err)
232	}
233}
234
235func TestChunkEndReadError(t *testing.T) {
236	readErr := fmt.Errorf("chunk end read error")
237
238	r := NewChunkedReader(io.MultiReader(strings.NewReader("4\r\nabcd"), iotest.ErrReader(readErr)))
239	if _, err := io.ReadAll(r); err != readErr {
240		t.Errorf("expected %v, got %v", readErr, err)
241	}
242}
243
244func TestChunkReaderTooMuchOverhead(t *testing.T) {
245	// If the sender is sending 100x as many chunk header bytes as chunk data,
246	// we should reject the stream at some point.
247	chunk := []byte("1;")
248	for i := 0; i < 100; i++ {
249		chunk = append(chunk, 'a') // chunk extension
250	}
251	chunk = append(chunk, "\r\nX\r\n"...)
252	const bodylen = 1 << 20
253	r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
254		if i < bodylen {
255			return chunk, nil
256		}
257		return []byte("0\r\n"), nil
258	}})
259	_, err := io.ReadAll(r)
260	if err == nil {
261		t.Fatalf("successfully read body with excessive overhead; want error")
262	}
263}
264
265func TestChunkReaderByteAtATime(t *testing.T) {
266	// Sending one byte per chunk should not trip the excess-overhead detection.
267	const bodylen = 1 << 20
268	r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) {
269		if i < bodylen {
270			return []byte("1\r\nX\r\n"), nil
271		}
272		return []byte("0\r\n"), nil
273	}})
274	got, err := io.ReadAll(r)
275	if err != nil {
276		t.Errorf("unexpected error: %v", err)
277	}
278	if len(got) != bodylen {
279		t.Errorf("read %v bytes, want %v", len(got), bodylen)
280	}
281}
282
283type funcReader struct {
284	f   func(iteration int) ([]byte, error)
285	i   int
286	b   []byte
287	err error
288}
289
290func (r *funcReader) Read(p []byte) (n int, err error) {
291	if len(r.b) == 0 && r.err == nil {
292		r.b, r.err = r.f(r.i)
293		r.i++
294	}
295	n = copy(p, r.b)
296	r.b = r.b[n:]
297	if len(r.b) > 0 {
298		return n, nil
299	}
300	return n, r.err
301}
302