1// Copyright 2015 Google Inc. All rights reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package zip
16
17import (
18	"bytes"
19	"compress/flate"
20	"errors"
21	"fmt"
22	"hash/crc32"
23	"io"
24	"io/ioutil"
25	"os"
26	"path/filepath"
27	"sort"
28	"strings"
29	"sync"
30	"syscall"
31	"time"
32
33	"android/soong/response"
34
35	"github.com/google/blueprint/pathtools"
36
37	"android/soong/jar"
38	"android/soong/third_party/zip"
39)
40
41// Block size used during parallel compression of a single file.
42const parallelBlockSize = 1 * 1024 * 1024 // 1MB
43
44// Minimum file size to use parallel compression. It requires more
45// flate.Writer allocations, since we can't change the dictionary
46// during Reset
47const minParallelFileSize = parallelBlockSize * 6
48
49// Size of the ZIP compression window (32KB)
50const windowSize = 32 * 1024
51
52type nopCloser struct {
53	io.Writer
54}
55
56func (nopCloser) Close() error {
57	return nil
58}
59
60type byteReaderCloser struct {
61	*bytes.Reader
62	io.Closer
63}
64
65type pathMapping struct {
66	dest, src string
67	zipMethod uint16
68}
69
70type FileArg struct {
71	PathPrefixInZip, SourcePrefixToStrip string
72	SourceFiles                          []string
73	JunkPaths                            bool
74	GlobDir                              string
75}
76
77type FileArgsBuilder struct {
78	state FileArg
79	err   error
80	fs    pathtools.FileSystem
81
82	fileArgs []FileArg
83}
84
85func NewFileArgsBuilder() *FileArgsBuilder {
86	return &FileArgsBuilder{
87		fs: pathtools.OsFs,
88	}
89}
90
91func (b *FileArgsBuilder) JunkPaths(v bool) *FileArgsBuilder {
92	b.state.JunkPaths = v
93	b.state.SourcePrefixToStrip = ""
94	return b
95}
96
97func (b *FileArgsBuilder) SourcePrefixToStrip(prefixToStrip string) *FileArgsBuilder {
98	b.state.JunkPaths = false
99	b.state.SourcePrefixToStrip = prefixToStrip
100	return b
101}
102
103func (b *FileArgsBuilder) PathPrefixInZip(rootPrefix string) *FileArgsBuilder {
104	b.state.PathPrefixInZip = rootPrefix
105	return b
106}
107
108func (b *FileArgsBuilder) File(name string) *FileArgsBuilder {
109	if b.err != nil {
110		return b
111	}
112
113	arg := b.state
114	arg.SourceFiles = []string{name}
115	b.fileArgs = append(b.fileArgs, arg)
116	return b
117}
118
119func (b *FileArgsBuilder) Dir(name string) *FileArgsBuilder {
120	if b.err != nil {
121		return b
122	}
123
124	arg := b.state
125	arg.GlobDir = name
126	b.fileArgs = append(b.fileArgs, arg)
127	return b
128}
129
130// List reads the file names from the given file and adds them to the source files list.
131func (b *FileArgsBuilder) List(name string) *FileArgsBuilder {
132	if b.err != nil {
133		return b
134	}
135
136	f, err := b.fs.Open(name)
137	if err != nil {
138		b.err = err
139		return b
140	}
141	defer f.Close()
142
143	list, err := ioutil.ReadAll(f)
144	if err != nil {
145		b.err = err
146		return b
147	}
148
149	arg := b.state
150	arg.SourceFiles = strings.Fields(string(list))
151	b.fileArgs = append(b.fileArgs, arg)
152	return b
153}
154
155// RspFile reads the file names from given .rsp file and adds them to the source files list.
156func (b *FileArgsBuilder) RspFile(name string) *FileArgsBuilder {
157	if b.err != nil {
158		return b
159	}
160
161	f, err := b.fs.Open(name)
162	if err != nil {
163		b.err = err
164		return b
165	}
166	defer f.Close()
167
168	arg := b.state
169	arg.SourceFiles, err = response.ReadRspFile(f)
170	if err != nil {
171		b.err = err
172		return b
173	}
174	for i := range arg.SourceFiles {
175		arg.SourceFiles[i] = pathtools.MatchEscape(arg.SourceFiles[i])
176	}
177	b.fileArgs = append(b.fileArgs, arg)
178	return b
179}
180
181func (b *FileArgsBuilder) Error() error {
182	if b == nil {
183		return nil
184	}
185	return b.err
186}
187
188func (b *FileArgsBuilder) FileArgs() []FileArg {
189	if b == nil {
190		return nil
191	}
192	return b.fileArgs
193}
194
195type IncorrectRelativeRootError struct {
196	RelativeRoot string
197	Path         string
198}
199
200func (x IncorrectRelativeRootError) Error() string {
201	return fmt.Sprintf("path %q is outside relative root %q", x.Path, x.RelativeRoot)
202}
203
204type ZipWriter struct {
205	time         time.Time
206	createdFiles map[string]string
207	createdDirs  map[string]string
208	directories  bool
209
210	errors   chan error
211	writeOps chan chan *zipEntry
212
213	cpuRateLimiter    *CPURateLimiter
214	memoryRateLimiter *MemoryRateLimiter
215
216	compressorPool sync.Pool
217	compLevel      int
218
219	followSymlinks     pathtools.ShouldFollowSymlinks
220	ignoreMissingFiles bool
221
222	stderr io.Writer
223	fs     pathtools.FileSystem
224}
225
226type zipEntry struct {
227	fh *zip.FileHeader
228
229	// List of delayed io.Reader
230	futureReaders chan chan io.Reader
231
232	// Only used for passing into the MemoryRateLimiter to ensure we
233	// release as much memory as much as we request
234	allocatedSize int64
235}
236
237type ZipArgs struct {
238	FileArgs                 []FileArg
239	OutputFilePath           string
240	EmulateJar               bool
241	SrcJar                   bool
242	AddDirectoryEntriesToZip bool
243	CompressionLevel         int
244	ManifestSourcePath       string
245	NumParallelJobs          int
246	NonDeflatedFiles         map[string]bool
247	WriteIfChanged           bool
248	StoreSymlinks            bool
249	IgnoreMissingFiles       bool
250
251	Stderr     io.Writer
252	Filesystem pathtools.FileSystem
253}
254
255func zipTo(args ZipArgs, w io.Writer) error {
256	if args.EmulateJar {
257		args.AddDirectoryEntriesToZip = true
258	}
259
260	// Have Glob follow symlinks if they are not being stored as symlinks in the zip file.
261	followSymlinks := pathtools.ShouldFollowSymlinks(!args.StoreSymlinks)
262
263	z := &ZipWriter{
264		time:               jar.DefaultTime,
265		createdDirs:        make(map[string]string),
266		createdFiles:       make(map[string]string),
267		directories:        args.AddDirectoryEntriesToZip,
268		compLevel:          args.CompressionLevel,
269		followSymlinks:     followSymlinks,
270		ignoreMissingFiles: args.IgnoreMissingFiles,
271		stderr:             args.Stderr,
272		fs:                 args.Filesystem,
273	}
274
275	if z.fs == nil {
276		z.fs = pathtools.OsFs
277	}
278
279	if z.stderr == nil {
280		z.stderr = os.Stderr
281	}
282
283	pathMappings := []pathMapping{}
284
285	noCompression := args.CompressionLevel == 0
286
287	for _, fa := range args.FileArgs {
288		var srcs []string
289		for _, s := range fa.SourceFiles {
290			s = strings.TrimSpace(s)
291			if s == "" {
292				continue
293			}
294
295			result, err := z.fs.Glob(s, nil, followSymlinks)
296			if err != nil {
297				return err
298			}
299			if len(result.Matches) == 0 {
300				err := &os.PathError{
301					Op:   "lstat",
302					Path: s,
303					Err:  os.ErrNotExist,
304				}
305				if args.IgnoreMissingFiles {
306					fmt.Fprintln(z.stderr, "warning:", err)
307				} else {
308					return err
309				}
310			}
311			srcs = append(srcs, result.Matches...)
312		}
313		if fa.GlobDir != "" {
314			if exists, isDir, err := z.fs.Exists(fa.GlobDir); err != nil {
315				return err
316			} else if !exists && !args.IgnoreMissingFiles {
317				err := &os.PathError{
318					Op:   "lstat",
319					Path: fa.GlobDir,
320					Err:  os.ErrNotExist,
321				}
322				if args.IgnoreMissingFiles {
323					fmt.Fprintln(z.stderr, "warning:", err)
324				} else {
325					return err
326				}
327			} else if !isDir && !args.IgnoreMissingFiles {
328				err := &os.PathError{
329					Op:   "lstat",
330					Path: fa.GlobDir,
331					Err:  syscall.ENOTDIR,
332				}
333				if args.IgnoreMissingFiles {
334					fmt.Fprintln(z.stderr, "warning:", err)
335				} else {
336					return err
337				}
338			}
339			result, err := z.fs.Glob(filepath.Join(fa.GlobDir, "**/*"), nil, followSymlinks)
340			if err != nil {
341				return err
342			}
343			srcs = append(srcs, result.Matches...)
344		}
345		for _, src := range srcs {
346			err := fillPathPairs(fa, src, &pathMappings, args.NonDeflatedFiles, noCompression)
347			if err != nil {
348				return err
349			}
350		}
351	}
352
353	return z.write(w, pathMappings, args.ManifestSourcePath, args.EmulateJar, args.SrcJar, args.NumParallelJobs)
354}
355
356// Zip creates an output zip archive from given sources.
357func Zip(args ZipArgs) error {
358	if args.OutputFilePath == "" {
359		return fmt.Errorf("output file path must be nonempty")
360	}
361
362	buf := &bytes.Buffer{}
363	var out io.Writer = buf
364
365	var zipErr error
366
367	if !args.WriteIfChanged {
368		f, err := os.Create(args.OutputFilePath)
369		if err != nil {
370			return err
371		}
372
373		defer f.Close()
374		defer func() {
375			if zipErr != nil {
376				os.Remove(args.OutputFilePath)
377			}
378		}()
379
380		out = f
381	}
382
383	zipErr = zipTo(args, out)
384	if zipErr != nil {
385		return zipErr
386	}
387
388	if args.WriteIfChanged {
389		err := pathtools.WriteFileIfChanged(args.OutputFilePath, buf.Bytes(), 0666)
390		if err != nil {
391			return err
392		}
393	}
394
395	return nil
396}
397
398func fillPathPairs(fa FileArg, src string, pathMappings *[]pathMapping,
399	nonDeflatedFiles map[string]bool, noCompression bool) error {
400
401	var dest string
402
403	if fa.JunkPaths {
404		dest = filepath.Base(src)
405	} else {
406		var err error
407		dest, err = filepath.Rel(fa.SourcePrefixToStrip, src)
408		if err != nil {
409			return err
410		}
411		if strings.HasPrefix(dest, "../") {
412			return IncorrectRelativeRootError{
413				Path:         src,
414				RelativeRoot: fa.SourcePrefixToStrip,
415			}
416		}
417	}
418	dest = filepath.Join(fa.PathPrefixInZip, dest)
419
420	zipMethod := zip.Deflate
421	if _, found := nonDeflatedFiles[dest]; found || noCompression {
422		zipMethod = zip.Store
423	}
424	*pathMappings = append(*pathMappings,
425		pathMapping{dest: dest, src: src, zipMethod: zipMethod})
426
427	return nil
428}
429
430func jarSort(mappings []pathMapping) {
431	sort.SliceStable(mappings, func(i int, j int) bool {
432		return jar.EntryNamesLess(mappings[i].dest, mappings[j].dest)
433	})
434}
435
436func (z *ZipWriter) write(f io.Writer, pathMappings []pathMapping, manifest string, emulateJar, srcJar bool,
437	parallelJobs int) error {
438
439	z.errors = make(chan error)
440	defer close(z.errors)
441
442	// This channel size can be essentially unlimited -- it's used as a fifo
443	// queue decouple the CPU and IO loads. Directories don't require any
444	// compression time, but still cost some IO. Similar with small files that
445	// can be very fast to compress. Some files that are more difficult to
446	// compress won't take a corresponding longer time writing out.
447	//
448	// The optimum size here depends on your CPU and IO characteristics, and
449	// the the layout of your zip file. 1000 was chosen mostly at random as
450	// something that worked reasonably well for a test file.
451	//
452	// The RateLimit object will put the upper bounds on the number of
453	// parallel compressions and outstanding buffers.
454	z.writeOps = make(chan chan *zipEntry, 1000)
455	z.cpuRateLimiter = NewCPURateLimiter(int64(parallelJobs))
456	z.memoryRateLimiter = NewMemoryRateLimiter(0)
457	defer func() {
458		z.cpuRateLimiter.Stop()
459		z.memoryRateLimiter.Stop()
460	}()
461
462	if manifest != "" && !emulateJar {
463		return errors.New("must specify --jar when specifying a manifest via -m")
464	}
465
466	if emulateJar {
467		// manifest may be empty, in which case addManifest will fill in a default
468		pathMappings = append(pathMappings, pathMapping{jar.ManifestFile, manifest, zip.Deflate})
469
470		jarSort(pathMappings)
471	}
472
473	go func() {
474		var err error
475		defer close(z.writeOps)
476
477		for _, ele := range pathMappings {
478			if emulateJar && ele.dest == jar.ManifestFile {
479				err = z.addManifest(ele.dest, ele.src, ele.zipMethod)
480			} else {
481				err = z.addFile(ele.dest, ele.src, ele.zipMethod, emulateJar, srcJar)
482			}
483			if err != nil {
484				z.errors <- err
485				return
486			}
487		}
488	}()
489
490	zipw := zip.NewWriter(f)
491
492	var currentWriteOpChan chan *zipEntry
493	var currentWriter io.WriteCloser
494	var currentReaders chan chan io.Reader
495	var currentReader chan io.Reader
496	var done bool
497
498	for !done {
499		var writeOpsChan chan chan *zipEntry
500		var writeOpChan chan *zipEntry
501		var readersChan chan chan io.Reader
502
503		if currentReader != nil {
504			// Only read and process errors
505		} else if currentReaders != nil {
506			readersChan = currentReaders
507		} else if currentWriteOpChan != nil {
508			writeOpChan = currentWriteOpChan
509		} else {
510			writeOpsChan = z.writeOps
511		}
512
513		select {
514		case writeOp, ok := <-writeOpsChan:
515			if !ok {
516				done = true
517			}
518
519			currentWriteOpChan = writeOp
520
521		case op := <-writeOpChan:
522			currentWriteOpChan = nil
523
524			var err error
525			if op.fh.Method == zip.Deflate {
526				currentWriter, err = zipw.CreateCompressedHeader(op.fh)
527			} else {
528				var zw io.Writer
529
530				op.fh.CompressedSize64 = op.fh.UncompressedSize64
531
532				zw, err = zipw.CreateHeaderAndroid(op.fh)
533				currentWriter = nopCloser{zw}
534			}
535			if err != nil {
536				return err
537			}
538
539			currentReaders = op.futureReaders
540			if op.futureReaders == nil {
541				currentWriter.Close()
542				currentWriter = nil
543			}
544			z.memoryRateLimiter.Finish(op.allocatedSize)
545
546		case futureReader, ok := <-readersChan:
547			if !ok {
548				// Done with reading
549				currentWriter.Close()
550				currentWriter = nil
551				currentReaders = nil
552			}
553
554			currentReader = futureReader
555
556		case reader := <-currentReader:
557			_, err := io.Copy(currentWriter, reader)
558			if err != nil {
559				return err
560			}
561
562			currentReader = nil
563
564		case err := <-z.errors:
565			return err
566		}
567	}
568
569	// One last chance to catch an error
570	select {
571	case err := <-z.errors:
572		return err
573	default:
574		zipw.Close()
575		return nil
576	}
577}
578
579// imports (possibly with compression) <src> into the zip at sub-path <dest>
580func (z *ZipWriter) addFile(dest, src string, method uint16, emulateJar, srcJar bool) error {
581	var fileSize int64
582	var executable bool
583
584	var s os.FileInfo
585	var err error
586	if z.followSymlinks {
587		s, err = z.fs.Stat(src)
588	} else {
589		s, err = z.fs.Lstat(src)
590	}
591
592	if err != nil {
593		if os.IsNotExist(err) && z.ignoreMissingFiles {
594			fmt.Fprintln(z.stderr, "warning:", err)
595			return nil
596		}
597		return err
598	}
599
600	createParentDirs := func(dest, src string) error {
601		if err := z.writeDirectory(filepath.Dir(dest), src, emulateJar); err != nil {
602			return err
603		}
604
605		if prev, exists := z.createdDirs[dest]; exists {
606			return fmt.Errorf("destination %q is both a directory %q and a file %q", dest, prev, src)
607		}
608		if prev, exists := z.createdFiles[dest]; exists {
609			return fmt.Errorf("destination %q has two files %q and %q", dest, prev, src)
610		}
611
612		z.createdFiles[dest] = src
613
614		return nil
615	}
616
617	if s.IsDir() {
618		if z.directories {
619			return z.writeDirectory(dest, src, emulateJar)
620		}
621		return nil
622	} else if s.Mode()&os.ModeSymlink != 0 {
623		err = createParentDirs(dest, src)
624		if err != nil {
625			return err
626		}
627
628		return z.writeSymlink(dest, src)
629	} else if s.Mode().IsRegular() {
630		r, err := z.fs.Open(src)
631		if err != nil {
632			return err
633		}
634
635		if srcJar && filepath.Ext(src) == ".java" {
636			// rewrite the destination using the package path if it can be determined
637			pkg, err := jar.JavaPackage(r, src)
638			if err != nil {
639				// ignore errors for now, leaving the file at in its original location in the zip
640			} else {
641				dest = filepath.Join(filepath.Join(strings.Split(pkg, ".")...), filepath.Base(src))
642			}
643
644			_, err = r.Seek(0, io.SeekStart)
645			if err != nil {
646				return err
647			}
648		}
649
650		fileSize = s.Size()
651		executable = s.Mode()&0100 != 0
652
653		header := &zip.FileHeader{
654			Name:               dest,
655			Method:             method,
656			UncompressedSize64: uint64(fileSize),
657		}
658
659		mode := os.FileMode(0644)
660		if executable {
661			mode = 0755
662		}
663		header.SetMode(mode)
664
665		err = createParentDirs(dest, src)
666		if err != nil {
667			return err
668		}
669
670		return z.writeFileContents(header, r)
671	} else {
672		return fmt.Errorf("%s is not a file, directory, or symlink", src)
673	}
674}
675
676func (z *ZipWriter) addManifest(dest string, src string, _ uint16) error {
677	if prev, exists := z.createdDirs[dest]; exists {
678		return fmt.Errorf("destination %q is both a directory %q and a file %q", dest, prev, src)
679	}
680	if prev, exists := z.createdFiles[dest]; exists {
681		return fmt.Errorf("destination %q has two files %q and %q", dest, prev, src)
682	}
683
684	if err := z.writeDirectory(filepath.Dir(dest), src, true); err != nil {
685		return err
686	}
687
688	var contents []byte
689	if src != "" {
690		f, err := z.fs.Open(src)
691		if err != nil {
692			return err
693		}
694
695		contents, err = ioutil.ReadAll(f)
696		f.Close()
697		if err != nil {
698			return err
699		}
700	}
701
702	fh, buf, err := jar.ManifestFileContents(contents)
703	if err != nil {
704		return err
705	}
706
707	reader := &byteReaderCloser{bytes.NewReader(buf), ioutil.NopCloser(nil)}
708
709	return z.writeFileContents(fh, reader)
710}
711
712func (z *ZipWriter) writeFileContents(header *zip.FileHeader, r pathtools.ReaderAtSeekerCloser) (err error) {
713
714	header.SetModTime(z.time)
715
716	compressChan := make(chan *zipEntry, 1)
717	z.writeOps <- compressChan
718
719	// Pre-fill a zipEntry, it will be sent in the compressChan once
720	// we're sure about the Method and CRC.
721	ze := &zipEntry{
722		fh: header,
723	}
724
725	ze.allocatedSize = int64(header.UncompressedSize64)
726	z.cpuRateLimiter.Request()
727	z.memoryRateLimiter.Request(ze.allocatedSize)
728
729	fileSize := int64(header.UncompressedSize64)
730	if fileSize == 0 {
731		fileSize = int64(header.UncompressedSize)
732	}
733
734	if header.Method == zip.Deflate && fileSize >= minParallelFileSize {
735		wg := new(sync.WaitGroup)
736
737		// Allocate enough buffer to hold all readers. We'll limit
738		// this based on actual buffer sizes in RateLimit.
739		ze.futureReaders = make(chan chan io.Reader, (fileSize/parallelBlockSize)+1)
740
741		// Calculate the CRC in the background, since reading the entire
742		// file could take a while.
743		//
744		// We could split this up into chunks as well, but it's faster
745		// than the compression. Due to the Go Zip API, we also need to
746		// know the result before we can begin writing the compressed
747		// data out to the zipfile.
748		wg.Add(1)
749		go z.crcFile(r, ze, compressChan, wg)
750
751		for start := int64(0); start < fileSize; start += parallelBlockSize {
752			sr := io.NewSectionReader(r, start, parallelBlockSize)
753			resultChan := make(chan io.Reader, 1)
754			ze.futureReaders <- resultChan
755
756			z.cpuRateLimiter.Request()
757
758			last := !(start+parallelBlockSize < fileSize)
759			var dict []byte
760			if start >= windowSize {
761				dict, err = ioutil.ReadAll(io.NewSectionReader(r, start-windowSize, windowSize))
762				if err != nil {
763					return err
764				}
765			}
766
767			wg.Add(1)
768			go z.compressPartialFile(sr, dict, last, resultChan, wg)
769		}
770
771		close(ze.futureReaders)
772
773		// Close the file handle after all readers are done
774		go func(wg *sync.WaitGroup, closer io.Closer) {
775			wg.Wait()
776			closer.Close()
777		}(wg, r)
778	} else {
779		go func() {
780			z.compressWholeFile(ze, r, compressChan)
781			r.Close()
782		}()
783	}
784
785	return nil
786}
787
788func (z *ZipWriter) crcFile(r io.Reader, ze *zipEntry, resultChan chan *zipEntry, wg *sync.WaitGroup) {
789	defer wg.Done()
790	defer z.cpuRateLimiter.Finish()
791
792	crc := crc32.NewIEEE()
793	_, err := io.Copy(crc, r)
794	if err != nil {
795		z.errors <- err
796		return
797	}
798
799	ze.fh.CRC32 = crc.Sum32()
800	resultChan <- ze
801	close(resultChan)
802}
803
804func (z *ZipWriter) compressPartialFile(r io.Reader, dict []byte, last bool, resultChan chan io.Reader, wg *sync.WaitGroup) {
805	defer wg.Done()
806
807	result, err := z.compressBlock(r, dict, last)
808	if err != nil {
809		z.errors <- err
810		return
811	}
812
813	z.cpuRateLimiter.Finish()
814
815	resultChan <- result
816}
817
818func (z *ZipWriter) compressBlock(r io.Reader, dict []byte, last bool) (*bytes.Buffer, error) {
819	buf := new(bytes.Buffer)
820	var fw *flate.Writer
821	var err error
822	if len(dict) > 0 {
823		// There's no way to Reset a Writer with a new dictionary, so
824		// don't use the Pool
825		fw, err = flate.NewWriterDict(buf, z.compLevel, dict)
826	} else {
827		var ok bool
828		if fw, ok = z.compressorPool.Get().(*flate.Writer); ok {
829			fw.Reset(buf)
830		} else {
831			fw, err = flate.NewWriter(buf, z.compLevel)
832		}
833		defer z.compressorPool.Put(fw)
834	}
835	if err != nil {
836		return nil, err
837	}
838
839	_, err = io.Copy(fw, r)
840	if err != nil {
841		return nil, err
842	}
843	if last {
844		fw.Close()
845	} else {
846		fw.Flush()
847	}
848
849	return buf, nil
850}
851
852func (z *ZipWriter) compressWholeFile(ze *zipEntry, r io.ReadSeeker, compressChan chan *zipEntry) {
853
854	crc := crc32.NewIEEE()
855	_, err := io.Copy(crc, r)
856	if err != nil {
857		z.errors <- err
858		return
859	}
860
861	ze.fh.CRC32 = crc.Sum32()
862
863	_, err = r.Seek(0, 0)
864	if err != nil {
865		z.errors <- err
866		return
867	}
868
869	readFile := func(reader io.ReadSeeker) ([]byte, error) {
870		_, err := reader.Seek(0, 0)
871		if err != nil {
872			return nil, err
873		}
874
875		buf, err := ioutil.ReadAll(reader)
876		if err != nil {
877			return nil, err
878		}
879
880		return buf, nil
881	}
882
883	ze.futureReaders = make(chan chan io.Reader, 1)
884	futureReader := make(chan io.Reader, 1)
885	ze.futureReaders <- futureReader
886	close(ze.futureReaders)
887
888	if ze.fh.Method == zip.Deflate {
889		compressed, err := z.compressBlock(r, nil, true)
890		if err != nil {
891			z.errors <- err
892			return
893		}
894		if uint64(compressed.Len()) < ze.fh.UncompressedSize64 {
895			futureReader <- compressed
896		} else {
897			buf, err := readFile(r)
898			if err != nil {
899				z.errors <- err
900				return
901			}
902			ze.fh.Method = zip.Store
903			futureReader <- bytes.NewReader(buf)
904		}
905	} else {
906		buf, err := readFile(r)
907		if err != nil {
908			z.errors <- err
909			return
910		}
911		ze.fh.Method = zip.Store
912		futureReader <- bytes.NewReader(buf)
913	}
914
915	z.cpuRateLimiter.Finish()
916
917	close(futureReader)
918
919	compressChan <- ze
920	close(compressChan)
921}
922
923// writeDirectory annotates that dir is a directory created for the src file or directory, and adds
924// the directory entry to the zip file if directories are enabled.
925func (z *ZipWriter) writeDirectory(dir string, src string, emulateJar bool) error {
926	// clean the input
927	dir = filepath.Clean(dir)
928
929	// discover any uncreated directories in the path
930	var zipDirs []string
931	for dir != "" && dir != "." {
932		if _, exists := z.createdDirs[dir]; exists {
933			break
934		}
935
936		if prev, exists := z.createdFiles[dir]; exists {
937			return fmt.Errorf("destination %q is both a directory %q and a file %q", dir, src, prev)
938		}
939
940		z.createdDirs[dir] = src
941		// parent directories precede their children
942		zipDirs = append([]string{dir}, zipDirs...)
943
944		dir = filepath.Dir(dir)
945	}
946
947	if z.directories {
948		// make a directory entry for each uncreated directory
949		for _, cleanDir := range zipDirs {
950			var dirHeader *zip.FileHeader
951
952			if emulateJar && cleanDir+"/" == jar.MetaDir {
953				dirHeader = jar.MetaDirFileHeader()
954			} else {
955				dirHeader = &zip.FileHeader{
956					Name: cleanDir + "/",
957				}
958				dirHeader.SetMode(0755 | os.ModeDir)
959			}
960
961			dirHeader.SetModTime(z.time)
962
963			ze := make(chan *zipEntry, 1)
964			ze <- &zipEntry{
965				fh: dirHeader,
966			}
967			close(ze)
968			z.writeOps <- ze
969		}
970	}
971
972	return nil
973}
974
975func (z *ZipWriter) writeSymlink(rel, file string) error {
976	fileHeader := &zip.FileHeader{
977		Name: rel,
978	}
979	fileHeader.SetModTime(z.time)
980	fileHeader.SetMode(0777 | os.ModeSymlink)
981
982	dest, err := z.fs.Readlink(file)
983	if err != nil {
984		return err
985	}
986
987	fileHeader.UncompressedSize64 = uint64(len(dest))
988	fileHeader.CRC32 = crc32.ChecksumIEEE([]byte(dest))
989
990	ze := make(chan *zipEntry, 1)
991	futureReaders := make(chan chan io.Reader, 1)
992	futureReader := make(chan io.Reader, 1)
993	futureReaders <- futureReader
994	close(futureReaders)
995	futureReader <- bytes.NewBufferString(dest)
996	close(futureReader)
997
998	ze <- &zipEntry{
999		fh:            fileHeader,
1000		futureReaders: futureReaders,
1001	}
1002	close(ze)
1003	z.writeOps <- ze
1004
1005	return nil
1006}
1007