Don't add data_descripters when merging uncompress zip entries for merge_zips.

Also filter out META-INF/TRANSITIVE dir, and report warnings when
merge_zips see duplicates entries with different CRC hash.

Bug: b/65455145
Test: m clean && m -j java (locally)

Change-Id: I47172ffa27df71f3280f35f6b540a7b5a0c14550
This commit is contained in:
Nan Zhang
2017-09-13 13:17:43 -07:00
parent 58aebd40d4
commit d5998cce7d
2 changed files with 51 additions and 21 deletions

View File

@@ -26,11 +26,28 @@ import (
"android/soong/third_party/zip" "android/soong/third_party/zip"
) )
type strip struct{}
func (s *strip) String() string {
return `""`
}
func (s *strip) Set(path_prefix string) error {
strippings = append(strippings, path_prefix)
return nil
}
var ( var (
sortEntries = flag.Bool("s", false, "sort entries (defaults to the order from the input zip files)") sortEntries = flag.Bool("s", false, "sort entries (defaults to the order from the input zip files)")
emulateJar = flag.Bool("j", false, "sort zip entries using jar ordering (META-INF first)") emulateJar = flag.Bool("j", false, "sort zip entries using jar ordering (META-INF first)")
strippings []string
) )
func init() {
flag.Var(&strip{}, "strip", "the prefix of file path to be excluded from the output zip")
}
func main() { func main() {
flag.Usage = func() { flag.Usage = func() {
fmt.Fprintln(os.Stderr, "usage: merge_zips [-j] output [inputs...]") fmt.Fprintln(os.Stderr, "usage: merge_zips [-j] output [inputs...]")
@@ -115,7 +132,13 @@ func mergeZips(readers []namedZipReader, writer *zip.Writer, sortEntries bool, e
orderedMappings := []fileMapping{} orderedMappings := []fileMapping{}
for _, namedReader := range readers { for _, namedReader := range readers {
FileLoop:
for _, file := range namedReader.reader.File { for _, file := range namedReader.reader.File {
for _, path_prefix := range strippings {
if strings.HasPrefix(file.Name, path_prefix) {
continue FileLoop
}
}
// check for other files or directories destined for the same path // check for other files or directories destined for the same path
dest := file.Name dest := file.Name
mapKey := dest mapKey := dest
@@ -142,8 +165,15 @@ func mergeZips(readers []namedZipReader, writer *zip.Writer, sortEntries bool, e
continue continue
} }
if !isDir { if !isDir {
return fmt.Errorf("Duplicate path %v found in %v and %v\n", if emulateJar {
dest, existingMapping.source.path, newMapping.source.path) if existingMapping.source.content.CRC32 != newMapping.source.content.CRC32 {
fmt.Fprintf(os.Stdout, "WARNING: Duplicate path %v found in %v and %v\n",
dest, existingMapping.source.path, newMapping.source.path)
}
} else {
return fmt.Errorf("Duplicate path %v found in %v and %v\n",
dest, existingMapping.source.path, newMapping.source.path)
}
} }
} else { } else {
// save entry // save entry
@@ -151,7 +181,6 @@ func mergeZips(readers []namedZipReader, writer *zip.Writer, sortEntries bool, e
orderedMappings = append(orderedMappings, newMapping) orderedMappings = append(orderedMappings, newMapping)
} }
} }
} }
if emulateJar { if emulateJar {

View File

@@ -32,7 +32,6 @@ func (w *Writer) CopyFrom(orig *File, newName string) error {
fileHeader := orig.FileHeader fileHeader := orig.FileHeader
fileHeader.Name = newName fileHeader.Name = newName
fh := &fileHeader fh := &fileHeader
fh.Flags |= DataDescriptorFlag
// The zip64 extras change between the Central Directory and Local File Header, while we use // The zip64 extras change between the Central Directory and Local File Header, while we use
// the same structure for both. The Local File Haeder is taken care of by us writing a data // the same structure for both. The Local File Haeder is taken care of by us writing a data
@@ -57,24 +56,26 @@ func (w *Writer) CopyFrom(orig *File, newName string) error {
} }
io.Copy(w.cw, io.NewSectionReader(orig.zipr, dataOffset, int64(orig.CompressedSize64))) io.Copy(w.cw, io.NewSectionReader(orig.zipr, dataOffset, int64(orig.CompressedSize64)))
// Write data descriptor. if orig.hasDataDescriptor() {
var buf []byte // Write data descriptor.
if fh.isZip64() { var buf []byte
buf = make([]byte, dataDescriptor64Len) if fh.isZip64() {
} else { buf = make([]byte, dataDescriptor64Len)
buf = make([]byte, dataDescriptorLen) } else {
buf = make([]byte, dataDescriptorLen)
}
b := writeBuf(buf)
b.uint32(dataDescriptorSignature)
b.uint32(fh.CRC32)
if fh.isZip64() {
b.uint64(fh.CompressedSize64)
b.uint64(fh.UncompressedSize64)
} else {
b.uint32(fh.CompressedSize)
b.uint32(fh.UncompressedSize)
}
_, err = w.cw.Write(buf)
} }
b := writeBuf(buf)
b.uint32(dataDescriptorSignature)
b.uint32(fh.CRC32)
if fh.isZip64() {
b.uint64(fh.CompressedSize64)
b.uint64(fh.UncompressedSize64)
} else {
b.uint32(fh.CompressedSize)
b.uint32(fh.UncompressedSize)
}
_, err = w.cw.Write(buf)
return err return err
} }