1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package flate
6
7import (
8	"errors"
9	"fmt"
10	"io"
11	"math"
12)
13
14const (
15	NoCompression      = 0
16	BestSpeed          = 1
17	BestCompression    = 9
18	DefaultCompression = -1
19
20	// HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
21	// entropy encoding. This mode is useful in compressing data that has
22	// already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
23	// that lacks an entropy encoder. Compression gains are achieved when
24	// certain bytes in the input stream occur more frequently than others.
25	//
26	// Note that HuffmanOnly produces a compressed output that is
27	// RFC 1951 compliant. That is, any valid DEFLATE decompressor will
28	// continue to be able to decompress this output.
29	HuffmanOnly = -2
30)
31
32const (
33	logWindowSize = 15
34	windowSize    = 1 << logWindowSize
35	windowMask    = windowSize - 1
36
37	// The LZ77 step produces a sequence of literal tokens and <length, offset>
38	// pair tokens. The offset is also known as distance. The underlying wire
39	// format limits the range of lengths and offsets. For example, there are
40	// 256 legitimate lengths: those in the range [3, 258]. This package's
41	// compressor uses a higher minimum match length, enabling optimizations
42	// such as finding matches via 32-bit loads and compares.
43	baseMatchLength = 3       // The smallest match length per the RFC section 3.2.5
44	minMatchLength  = 4       // The smallest match length that the compressor actually emits
45	maxMatchLength  = 258     // The largest match length
46	baseMatchOffset = 1       // The smallest match offset
47	maxMatchOffset  = 1 << 15 // The largest match offset
48
49	// The maximum number of tokens we put into a single flate block, just to
50	// stop things from getting too large.
51	maxFlateBlockTokens = 1 << 14
52	maxStoreBlockSize   = 65535
53	hashBits            = 17 // After 17 performance degrades
54	hashSize            = 1 << hashBits
55	hashMask            = (1 << hashBits) - 1
56	maxHashOffset       = 1 << 24
57
58	skipNever = math.MaxInt32
59)
60
61type compressionLevel struct {
62	level, good, lazy, nice, chain, fastSkipHashing int
63}
64
65var levels = []compressionLevel{
66	{0, 0, 0, 0, 0, 0}, // NoCompression.
67	{1, 0, 0, 0, 0, 0}, // BestSpeed uses a custom algorithm; see deflatefast.go.
68	// For levels 2-3 we don't bother trying with lazy matches.
69	{2, 4, 0, 16, 8, 5},
70	{3, 4, 0, 32, 32, 6},
71	// Levels 4-9 use increasingly more lazy matching
72	// and increasingly stringent conditions for "good enough".
73	{4, 4, 4, 16, 16, skipNever},
74	{5, 8, 16, 32, 32, skipNever},
75	{6, 8, 16, 128, 128, skipNever},
76	{7, 8, 32, 128, 256, skipNever},
77	{8, 32, 128, 258, 1024, skipNever},
78	{9, 32, 258, 258, 4096, skipNever},
79}
80
81type compressor struct {
82	compressionLevel
83
84	w          *huffmanBitWriter
85	bulkHasher func([]byte, []uint32)
86
87	// compression algorithm
88	fill      func(*compressor, []byte) int // copy data to window
89	step      func(*compressor)             // process window
90	bestSpeed *deflateFast                  // Encoder for BestSpeed
91
92	// Input hash chains
93	// hashHead[hashValue] contains the largest inputIndex with the specified hash value
94	// If hashHead[hashValue] is within the current window, then
95	// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
96	// with the same hash value.
97	chainHead  int
98	hashHead   [hashSize]uint32
99	hashPrev   [windowSize]uint32
100	hashOffset int
101
102	// input window: unprocessed data is window[index:windowEnd]
103	index         int
104	window        []byte
105	windowEnd     int
106	blockStart    int  // window index where current tokens start
107	byteAvailable bool // if true, still need to process window[index-1].
108
109	sync bool // requesting flush
110
111	// queued output tokens
112	tokens []token
113
114	// deflate state
115	length         int
116	offset         int
117	maxInsertIndex int
118	err            error
119
120	// hashMatch must be able to contain hashes for the maximum match length.
121	hashMatch [maxMatchLength - 1]uint32
122}
123
124func (d *compressor) fillDeflate(b []byte) int {
125	if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
126		// shift the window by windowSize
127		copy(d.window, d.window[windowSize:2*windowSize])
128		d.index -= windowSize
129		d.windowEnd -= windowSize
130		if d.blockStart >= windowSize {
131			d.blockStart -= windowSize
132		} else {
133			d.blockStart = math.MaxInt32
134		}
135		d.hashOffset += windowSize
136		if d.hashOffset > maxHashOffset {
137			delta := d.hashOffset - 1
138			d.hashOffset -= delta
139			d.chainHead -= delta
140
141			// Iterate over slices instead of arrays to avoid copying
142			// the entire table onto the stack (Issue #18625).
143			for i, v := range d.hashPrev[:] {
144				if int(v) > delta {
145					d.hashPrev[i] = uint32(int(v) - delta)
146				} else {
147					d.hashPrev[i] = 0
148				}
149			}
150			for i, v := range d.hashHead[:] {
151				if int(v) > delta {
152					d.hashHead[i] = uint32(int(v) - delta)
153				} else {
154					d.hashHead[i] = 0
155				}
156			}
157		}
158	}
159	n := copy(d.window[d.windowEnd:], b)
160	d.windowEnd += n
161	return n
162}
163
164func (d *compressor) writeBlock(tokens []token, index int) error {
165	if index > 0 {
166		var window []byte
167		if d.blockStart <= index {
168			window = d.window[d.blockStart:index]
169		}
170		d.blockStart = index
171		d.w.writeBlock(tokens, false, window)
172		return d.w.err
173	}
174	return nil
175}
176
177// fillWindow will fill the current window with the supplied
178// dictionary and calculate all hashes.
179// This is much faster than doing a full encode.
180// Should only be used after a reset.
181func (d *compressor) fillWindow(b []byte) {
182	// Do not fill window if we are in store-only mode.
183	if d.compressionLevel.level < 2 {
184		return
185	}
186	if d.index != 0 || d.windowEnd != 0 {
187		panic("internal error: fillWindow called with stale data")
188	}
189
190	// If we are given too much, cut it.
191	if len(b) > windowSize {
192		b = b[len(b)-windowSize:]
193	}
194	// Add all to window.
195	n := copy(d.window, b)
196
197	// Calculate 256 hashes at the time (more L1 cache hits)
198	loops := (n + 256 - minMatchLength) / 256
199	for j := 0; j < loops; j++ {
200		index := j * 256
201		end := index + 256 + minMatchLength - 1
202		if end > n {
203			end = n
204		}
205		toCheck := d.window[index:end]
206		dstSize := len(toCheck) - minMatchLength + 1
207
208		if dstSize <= 0 {
209			continue
210		}
211
212		dst := d.hashMatch[:dstSize]
213		d.bulkHasher(toCheck, dst)
214		for i, val := range dst {
215			di := i + index
216			hh := &d.hashHead[val&hashMask]
217			// Get previous value with the same hash.
218			// Our chain should point to the previous value.
219			d.hashPrev[di&windowMask] = *hh
220			// Set the head of the hash chain to us.
221			*hh = uint32(di + d.hashOffset)
222		}
223	}
224	// Update window information.
225	d.windowEnd = n
226	d.index = n
227}
228
229// Try to find a match starting at index whose length is greater than prevSize.
230// We only look at chainCount possibilities before giving up.
231func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
232	minMatchLook := maxMatchLength
233	if lookahead < minMatchLook {
234		minMatchLook = lookahead
235	}
236
237	win := d.window[0 : pos+minMatchLook]
238
239	// We quit when we get a match that's at least nice long
240	nice := len(win) - pos
241	if d.nice < nice {
242		nice = d.nice
243	}
244
245	// If we've got a match that's good enough, only look in 1/4 the chain.
246	tries := d.chain
247	length = prevLength
248	if length >= d.good {
249		tries >>= 2
250	}
251
252	wEnd := win[pos+length]
253	wPos := win[pos:]
254	minIndex := pos - windowSize
255
256	for i := prevHead; tries > 0; tries-- {
257		if wEnd == win[i+length] {
258			n := matchLen(win[i:], wPos, minMatchLook)
259
260			if n > length && (n > minMatchLength || pos-i <= 4096) {
261				length = n
262				offset = pos - i
263				ok = true
264				if n >= nice {
265					// The match is good enough that we don't try to find a better one.
266					break
267				}
268				wEnd = win[pos+n]
269			}
270		}
271		if i == minIndex {
272			// hashPrev[i & windowMask] has already been overwritten, so stop now.
273			break
274		}
275		i = int(d.hashPrev[i&windowMask]) - d.hashOffset
276		if i < minIndex || i < 0 {
277			break
278		}
279	}
280	return
281}
282
283func (d *compressor) writeStoredBlock(buf []byte) error {
284	if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
285		return d.w.err
286	}
287	d.w.writeBytes(buf)
288	return d.w.err
289}
290
291const hashmul = 0x1e35a7bd
292
293// hash4 returns a hash representation of the first 4 bytes
294// of the supplied slice.
295// The caller must ensure that len(b) >= 4.
296func hash4(b []byte) uint32 {
297	return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits)
298}
299
300// bulkHash4 will compute hashes using the same
301// algorithm as hash4.
302func bulkHash4(b []byte, dst []uint32) {
303	if len(b) < minMatchLength {
304		return
305	}
306	hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
307	dst[0] = (hb * hashmul) >> (32 - hashBits)
308	end := len(b) - minMatchLength + 1
309	for i := 1; i < end; i++ {
310		hb = (hb << 8) | uint32(b[i+3])
311		dst[i] = (hb * hashmul) >> (32 - hashBits)
312	}
313}
314
315// matchLen returns the number of matching bytes in a and b
316// up to length 'max'. Both slices must be at least 'max'
317// bytes in size.
318func matchLen(a, b []byte, max int) int {
319	a = a[:max]
320	b = b[:len(a)]
321	for i, av := range a {
322		if b[i] != av {
323			return i
324		}
325	}
326	return max
327}
328
329// encSpeed will compress and store the currently added data,
330// if enough has been accumulated or we at the end of the stream.
331// Any error that occurred will be in d.err
332func (d *compressor) encSpeed() {
333	// We only compress if we have maxStoreBlockSize.
334	if d.windowEnd < maxStoreBlockSize {
335		if !d.sync {
336			return
337		}
338
339		// Handle small sizes.
340		if d.windowEnd < 128 {
341			switch {
342			case d.windowEnd == 0:
343				return
344			case d.windowEnd <= 16:
345				d.err = d.writeStoredBlock(d.window[:d.windowEnd])
346			default:
347				d.w.writeBlockHuff(false, d.window[:d.windowEnd])
348				d.err = d.w.err
349			}
350			d.windowEnd = 0
351			d.bestSpeed.reset()
352			return
353		}
354
355	}
356	// Encode the block.
357	d.tokens = d.bestSpeed.encode(d.tokens[:0], d.window[:d.windowEnd])
358
359	// If we removed less than 1/16th, Huffman compress the block.
360	if len(d.tokens) > d.windowEnd-(d.windowEnd>>4) {
361		d.w.writeBlockHuff(false, d.window[:d.windowEnd])
362	} else {
363		d.w.writeBlockDynamic(d.tokens, false, d.window[:d.windowEnd])
364	}
365	d.err = d.w.err
366	d.windowEnd = 0
367}
368
369func (d *compressor) initDeflate() {
370	d.window = make([]byte, 2*windowSize)
371	d.hashOffset = 1
372	d.tokens = make([]token, 0, maxFlateBlockTokens+1)
373	d.length = minMatchLength - 1
374	d.offset = 0
375	d.byteAvailable = false
376	d.index = 0
377	d.chainHead = -1
378	d.bulkHasher = bulkHash4
379}
380
381func (d *compressor) deflate() {
382	if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
383		return
384	}
385
386	d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
387
388Loop:
389	for {
390		if d.index > d.windowEnd {
391			panic("index > windowEnd")
392		}
393		lookahead := d.windowEnd - d.index
394		if lookahead < minMatchLength+maxMatchLength {
395			if !d.sync {
396				break Loop
397			}
398			if d.index > d.windowEnd {
399				panic("index > windowEnd")
400			}
401			if lookahead == 0 {
402				// Flush current output block if any.
403				if d.byteAvailable {
404					// There is still one pending token that needs to be flushed
405					d.tokens = append(d.tokens, literalToken(uint32(d.window[d.index-1])))
406					d.byteAvailable = false
407				}
408				if len(d.tokens) > 0 {
409					if d.err = d.writeBlock(d.tokens, d.index); d.err != nil {
410						return
411					}
412					d.tokens = d.tokens[:0]
413				}
414				break Loop
415			}
416		}
417		if d.index < d.maxInsertIndex {
418			// Update the hash
419			hash := hash4(d.window[d.index : d.index+minMatchLength])
420			hh := &d.hashHead[hash&hashMask]
421			d.chainHead = int(*hh)
422			d.hashPrev[d.index&windowMask] = uint32(d.chainHead)
423			*hh = uint32(d.index + d.hashOffset)
424		}
425		prevLength := d.length
426		prevOffset := d.offset
427		d.length = minMatchLength - 1
428		d.offset = 0
429		minIndex := d.index - windowSize
430		if minIndex < 0 {
431			minIndex = 0
432		}
433
434		if d.chainHead-d.hashOffset >= minIndex &&
435			(d.fastSkipHashing != skipNever && lookahead > minMatchLength-1 ||
436				d.fastSkipHashing == skipNever && lookahead > prevLength && prevLength < d.lazy) {
437			if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
438				d.length = newLength
439				d.offset = newOffset
440			}
441		}
442		if d.fastSkipHashing != skipNever && d.length >= minMatchLength ||
443			d.fastSkipHashing == skipNever && prevLength >= minMatchLength && d.length <= prevLength {
444			// There was a match at the previous step, and the current match is
445			// not better. Output the previous match.
446			if d.fastSkipHashing != skipNever {
447				d.tokens = append(d.tokens, matchToken(uint32(d.length-baseMatchLength), uint32(d.offset-baseMatchOffset)))
448			} else {
449				d.tokens = append(d.tokens, matchToken(uint32(prevLength-baseMatchLength), uint32(prevOffset-baseMatchOffset)))
450			}
451			// Insert in the hash table all strings up to the end of the match.
452			// index and index-1 are already inserted. If there is not enough
453			// lookahead, the last two strings are not inserted into the hash
454			// table.
455			if d.length <= d.fastSkipHashing {
456				var newIndex int
457				if d.fastSkipHashing != skipNever {
458					newIndex = d.index + d.length
459				} else {
460					newIndex = d.index + prevLength - 1
461				}
462				index := d.index
463				for index++; index < newIndex; index++ {
464					if index < d.maxInsertIndex {
465						hash := hash4(d.window[index : index+minMatchLength])
466						// Get previous value with the same hash.
467						// Our chain should point to the previous value.
468						hh := &d.hashHead[hash&hashMask]
469						d.hashPrev[index&windowMask] = *hh
470						// Set the head of the hash chain to us.
471						*hh = uint32(index + d.hashOffset)
472					}
473				}
474				d.index = index
475
476				if d.fastSkipHashing == skipNever {
477					d.byteAvailable = false
478					d.length = minMatchLength - 1
479				}
480			} else {
481				// For matches this long, we don't bother inserting each individual
482				// item into the table.
483				d.index += d.length
484			}
485			if len(d.tokens) == maxFlateBlockTokens {
486				// The block includes the current character
487				if d.err = d.writeBlock(d.tokens, d.index); d.err != nil {
488					return
489				}
490				d.tokens = d.tokens[:0]
491			}
492		} else {
493			if d.fastSkipHashing != skipNever || d.byteAvailable {
494				i := d.index - 1
495				if d.fastSkipHashing != skipNever {
496					i = d.index
497				}
498				d.tokens = append(d.tokens, literalToken(uint32(d.window[i])))
499				if len(d.tokens) == maxFlateBlockTokens {
500					if d.err = d.writeBlock(d.tokens, i+1); d.err != nil {
501						return
502					}
503					d.tokens = d.tokens[:0]
504				}
505			}
506			d.index++
507			if d.fastSkipHashing == skipNever {
508				d.byteAvailable = true
509			}
510		}
511	}
512}
513
514func (d *compressor) fillStore(b []byte) int {
515	n := copy(d.window[d.windowEnd:], b)
516	d.windowEnd += n
517	return n
518}
519
520func (d *compressor) store() {
521	if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
522		d.err = d.writeStoredBlock(d.window[:d.windowEnd])
523		d.windowEnd = 0
524	}
525}
526
527// storeHuff compresses and stores the currently added data
528// when the d.window is full or we are at the end of the stream.
529// Any error that occurred will be in d.err
530func (d *compressor) storeHuff() {
531	if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
532		return
533	}
534	d.w.writeBlockHuff(false, d.window[:d.windowEnd])
535	d.err = d.w.err
536	d.windowEnd = 0
537}
538
539func (d *compressor) write(b []byte) (n int, err error) {
540	if d.err != nil {
541		return 0, d.err
542	}
543	n = len(b)
544	for len(b) > 0 {
545		d.step(d)
546		b = b[d.fill(d, b):]
547		if d.err != nil {
548			return 0, d.err
549		}
550	}
551	return n, nil
552}
553
554func (d *compressor) syncFlush() error {
555	if d.err != nil {
556		return d.err
557	}
558	d.sync = true
559	d.step(d)
560	if d.err == nil {
561		d.w.writeStoredHeader(0, false)
562		d.w.flush()
563		d.err = d.w.err
564	}
565	d.sync = false
566	return d.err
567}
568
569func (d *compressor) init(w io.Writer, level int) (err error) {
570	d.w = newHuffmanBitWriter(w)
571
572	switch {
573	case level == NoCompression:
574		d.window = make([]byte, maxStoreBlockSize)
575		d.fill = (*compressor).fillStore
576		d.step = (*compressor).store
577	case level == HuffmanOnly:
578		d.window = make([]byte, maxStoreBlockSize)
579		d.fill = (*compressor).fillStore
580		d.step = (*compressor).storeHuff
581	case level == BestSpeed:
582		d.compressionLevel = levels[level]
583		d.window = make([]byte, maxStoreBlockSize)
584		d.fill = (*compressor).fillStore
585		d.step = (*compressor).encSpeed
586		d.bestSpeed = newDeflateFast()
587		d.tokens = make([]token, maxStoreBlockSize)
588	case level == DefaultCompression:
589		level = 6
590		fallthrough
591	case 2 <= level && level <= 9:
592		d.compressionLevel = levels[level]
593		d.initDeflate()
594		d.fill = (*compressor).fillDeflate
595		d.step = (*compressor).deflate
596	default:
597		return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
598	}
599	return nil
600}
601
602func (d *compressor) reset(w io.Writer) {
603	d.w.reset(w)
604	d.sync = false
605	d.err = nil
606	switch d.compressionLevel.level {
607	case NoCompression:
608		d.windowEnd = 0
609	case BestSpeed:
610		d.windowEnd = 0
611		d.tokens = d.tokens[:0]
612		d.bestSpeed.reset()
613	default:
614		d.chainHead = -1
615		for i := range d.hashHead {
616			d.hashHead[i] = 0
617		}
618		for i := range d.hashPrev {
619			d.hashPrev[i] = 0
620		}
621		d.hashOffset = 1
622		d.index, d.windowEnd = 0, 0
623		d.blockStart, d.byteAvailable = 0, false
624		d.tokens = d.tokens[:0]
625		d.length = minMatchLength - 1
626		d.offset = 0
627		d.maxInsertIndex = 0
628	}
629}
630
631func (d *compressor) close() error {
632	if d.err == errWriterClosed {
633		return nil
634	}
635	if d.err != nil {
636		return d.err
637	}
638	d.sync = true
639	d.step(d)
640	if d.err != nil {
641		return d.err
642	}
643	if d.w.writeStoredHeader(0, true); d.w.err != nil {
644		return d.w.err
645	}
646	d.w.flush()
647	if d.w.err != nil {
648		return d.w.err
649	}
650	d.err = errWriterClosed
651	return nil
652}
653
654// NewWriter returns a new [Writer] compressing data at the given level.
655// Following zlib, levels range from 1 ([BestSpeed]) to 9 ([BestCompression]);
656// higher levels typically run slower but compress more. Level 0
657// ([NoCompression]) does not attempt any compression; it only adds the
658// necessary DEFLATE framing.
659// Level -1 ([DefaultCompression]) uses the default compression level.
660// Level -2 ([HuffmanOnly]) will use Huffman compression only, giving
661// a very fast compression for all types of input, but sacrificing considerable
662// compression efficiency.
663//
664// If level is in the range [-2, 9] then the error returned will be nil.
665// Otherwise the error returned will be non-nil.
666func NewWriter(w io.Writer, level int) (*Writer, error) {
667	var dw Writer
668	if err := dw.d.init(w, level); err != nil {
669		return nil, err
670	}
671	return &dw, nil
672}
673
674// NewWriterDict is like [NewWriter] but initializes the new
675// [Writer] with a preset dictionary. The returned [Writer] behaves
676// as if the dictionary had been written to it without producing
677// any compressed output. The compressed data written to w
678// can only be decompressed by a [Reader] initialized with the
679// same dictionary.
680func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
681	dw := &dictWriter{w}
682	zw, err := NewWriter(dw, level)
683	if err != nil {
684		return nil, err
685	}
686	zw.d.fillWindow(dict)
687	zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
688	return zw, err
689}
690
691type dictWriter struct {
692	w io.Writer
693}
694
695func (w *dictWriter) Write(b []byte) (n int, err error) {
696	return w.w.Write(b)
697}
698
699var errWriterClosed = errors.New("flate: closed writer")
700
701// A Writer takes data written to it and writes the compressed
702// form of that data to an underlying writer (see [NewWriter]).
703type Writer struct {
704	d    compressor
705	dict []byte
706}
707
708// Write writes data to w, which will eventually write the
709// compressed form of data to its underlying writer.
710func (w *Writer) Write(data []byte) (n int, err error) {
711	return w.d.write(data)
712}
713
714// Flush flushes any pending data to the underlying writer.
715// It is useful mainly in compressed network protocols, to ensure that
716// a remote reader has enough data to reconstruct a packet.
717// Flush does not return until the data has been written.
718// Calling Flush when there is no pending data still causes the [Writer]
719// to emit a sync marker of at least 4 bytes.
720// If the underlying writer returns an error, Flush returns that error.
721//
722// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
723func (w *Writer) Flush() error {
724	// For more about flushing:
725	// https://www.bolet.org/~pornin/deflate-flush.html
726	return w.d.syncFlush()
727}
728
729// Close flushes and closes the writer.
730func (w *Writer) Close() error {
731	return w.d.close()
732}
733
734// Reset discards the writer's state and makes it equivalent to
735// the result of [NewWriter] or [NewWriterDict] called with dst
736// and w's level and dictionary.
737func (w *Writer) Reset(dst io.Writer) {
738	if dw, ok := w.d.w.writer.(*dictWriter); ok {
739		// w was created with NewWriterDict
740		dw.w = dst
741		w.d.reset(dw)
742		w.d.fillWindow(w.dict)
743	} else {
744		// w was created with NewWriter
745		w.d.reset(dst)
746	}
747}
748