diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py index 8b179d5a32..51031ce822 100644 --- a/tools/releasetools/blockimgdiff.py +++ b/tools/releasetools/blockimgdiff.py @@ -73,6 +73,7 @@ class EmptyImage(object): """A zero-length image.""" blocksize = 4096 care_map = RangeSet() + clobbered_blocks = RangeSet() total_blocks = 0 file_map = {} def ReadRangeSet(self, ranges): @@ -105,6 +106,7 @@ class DataImage(object): self.total_blocks = len(self.data) / self.blocksize self.care_map = RangeSet(data=(0, self.total_blocks)) + self.clobbered_blocks = RangeSet() zero_blocks = [] nonzero_blocks = [] @@ -126,9 +128,9 @@ class DataImage(object): return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges] def TotalSha1(self): - if not hasattr(self, "sha1"): - self.sha1 = sha1(self.data).hexdigest() - return self.sha1 + # DataImage always carries empty clobbered_blocks. + assert self.clobbered_blocks.size() == 0 + return sha1(self.data).hexdigest() class Transfer(object): @@ -174,6 +176,10 @@ class Transfer(object): # (Typically a domain is a file, and the key in file_map is the # pathname.) # +# clobbered_blocks: a RangeSet containing which blocks contain data +# but may be altered by the FS. They need to be excluded when +# verifying the partition integrity. +# # ReadRangeSet(): a function that takes a RangeSet and returns the # data contained in the image blocks of that RangeSet. The data # is returned as a list or tuple of strings; concatenating the @@ -183,7 +189,7 @@ class Transfer(object): # # TotalSha1(): a function that returns (as a hex string) the SHA-1 # hash of all the data in the image (ie, all the blocks in the -# care_map) +# care_map minus clobbered_blocks). # # When creating a BlockImageDiff, the src image may be None, in which # case the list of transfers produced will never read from the @@ -768,6 +774,13 @@ class BlockImageDiff(object): "zero", self.transfers) continue + elif tgt_fn == "__COPY": + # "__COPY" domain includes all the blocks not contained in any + # file and that need to be copied unconditionally to the target. + print("FindTransfers: new", tgt_ranges); + Transfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers) + continue + elif tgt_fn in self.src.file_map: # Look for an exact pathname match in the source. Transfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn], diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index a596c26168..b28cbf16e9 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -1030,6 +1030,9 @@ class BlockDifference: self.partition = partition self.check_first_block = check_first_block + # Due to http://b/20939131, check_first_block is disabled temporarily. + assert not self.check_first_block + if version is None: version = 1 if OPTIONS.info_dict: @@ -1058,29 +1061,28 @@ class BlockDifference: self._WriteUpdate(script, output_zip) def WriteVerifyScript(self, script): + partition = self.partition if not self.src: - script.Print("Image %s will be patched unconditionally." % (self.partition,)) + script.Print("Image %s will be patched unconditionally." % (partition,)) else: + ranges = self.src.care_map.subtract(self.src.clobbered_blocks) + ranges_str = ranges.to_string_raw() + script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % ( + self.device, ranges_str, self.src.TotalSha1())) + script.Print('Verified %s image...' % (partition,)) + script.AppendExtra('else') + + # When generating incrementals for the system and vendor partitions, + # explicitly check the first block (which contains the superblock) of + # the partition to see if it's what we expect. If this check fails, + # give an explicit log message about the partition having been + # remounted R/W (the most likely explanation) and the need to flash to + # get OTAs working again. if self.check_first_block: self._CheckFirstBlock(script) - script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % - (self.device, self.src.care_map.to_string_raw(), - self.src.TotalSha1())) - script.Print("Verified %s image..." % (self.partition,)) - # Abort the OTA update if it doesn't support resumable OTA (i.e. version<3) - # and the checksum doesn't match the one in the source partition. - if self.version < 3: - script.AppendExtra(('else\n' - ' abort("%s partition has unexpected contents");\n' - 'endif;') % (self.partition)) - else: - script.AppendExtra(('else\n' - ' (range_sha1("%s", "%s") == "%s") ||\n' - ' abort("%s partition has unexpected contents");\n' - 'endif;') % - (self.device, self.tgt.care_map.to_string_raw(), - self.tgt.TotalSha1(), self.partition)) + script.AppendExtra((' abort("%s partition has unexpected contents");\n' + 'endif;') % (partition)) def _WriteUpdate(self, script, output_zip): partition = self.partition @@ -1098,6 +1100,9 @@ class BlockDifference: (self.device, partition, partition, partition)) script.AppendExtra(script._WordWrap(call)) + # TODO(tbao): Due to http://b/20939131, block 0 may be changed without + # remounting R/W. Will change the checking to a finer-grained way to + # mask off those bits. def _CheckFirstBlock(self, script): r = RangeSet((0, 1)) h = sha1() diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files index 25309a49ce..768f4cb592 100755 --- a/tools/releasetools/ota_from_target_files +++ b/tools/releasetools/ota_from_target_files @@ -457,7 +457,13 @@ def GetImage(which, tmpdir, info_dict): path = add_img_to_target_files.BuildVendor( tmpdir, info_dict, block_list=mappath) - return sparse_img.SparseImage(path, mappath) + # Bug: http://b/20939131 + # In ext4 filesystems, block 0 might be changed even being mounted + # R/O. We add it to clobbered_blocks so that it will be written to the + # target unconditionally. Note that they are still part of care_map. + clobbered_blocks = "0" + + return sparse_img.SparseImage(path, mappath, clobbered_blocks) def WriteFullOTAPackage(input_zip, output_zip): @@ -748,7 +754,7 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip): OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(",")) system_diff = common.BlockDifference("system", system_tgt, system_src, - check_first_block=True, + check_first_block=False, version=blockimgdiff_version) if HasVendorPartition(target_zip): @@ -757,7 +763,7 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip): vendor_src = GetImage("vendor", OPTIONS.source_tmp, OPTIONS.source_info_dict) vendor_tgt = GetImage("vendor", OPTIONS.target_tmp, OPTIONS.target_info_dict) vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src, - check_first_block=True, + check_first_block=False, version=blockimgdiff_version) else: vendor_diff = None diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py index 7574747f5d..e253fbfca2 100644 --- a/tools/releasetools/sparse_img.py +++ b/tools/releasetools/sparse_img.py @@ -22,10 +22,17 @@ from hashlib import sha1 from rangelib import * class SparseImage(object): - """Wraps a sparse image file (and optional file map) into an image - object suitable for passing to BlockImageDiff.""" + """Wraps a sparse image file into an image object. - def __init__(self, simg_fn, file_map_fn=None): + Wraps a sparse image file (and optional file map and clobbered_blocks) into + an image object suitable for passing to BlockImageDiff. file_map contains + the mapping between files and their blocks. clobbered_blocks contains the set + of blocks that should be always written to the target regardless of the old + contents (i.e. copying instead of patching). clobbered_blocks should be in + the form of a string like "0" or "0 1-5 8". + """ + + def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None): self.simg_f = f = open(simg_fn, "rb") header_bin = f.read(28) @@ -59,6 +66,7 @@ class SparseImage(object): pos = 0 # in blocks care_data = [] self.offset_map = offset_map = [] + self.clobbered_blocks = RangeSet(data=clobbered_blocks) for i in range(total_chunks): header_bin = f.read(12) @@ -106,7 +114,7 @@ class SparseImage(object): self.offset_index = [i[0] for i in offset_map] if file_map_fn: - self.LoadFileBlockMap(file_map_fn) + self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks) else: self.file_map = {"__DATA": self.care_map} @@ -114,9 +122,10 @@ class SparseImage(object): return [d for d in self._GetRangeData(ranges)] def TotalSha1(self): - """Return the SHA-1 hash of all data in the 'care' regions of this image.""" + """Return the SHA-1 hash of all data in the 'care' regions but not in + clobbered_blocks of this image.""" h = sha1() - for d in self._GetRangeData(self.care_map): + for d in self._GetRangeData(self.care_map.subtract(self.clobbered_blocks)): h.update(d) return h.hexdigest() @@ -159,7 +168,7 @@ class SparseImage(object): yield fill_data * (this_read * (self.blocksize >> 2)) to_read -= this_read - def LoadFileBlockMap(self, fn): + def LoadFileBlockMap(self, fn, clobbered_blocks): remaining = self.care_map self.file_map = out = {} @@ -169,14 +178,20 @@ class SparseImage(object): ranges = RangeSet.parse(ranges) out[fn] = ranges assert ranges.size() == ranges.intersect(remaining).size() + + # Currently we assume that blocks in clobbered_blocks are not part of + # any file. + assert not clobbered_blocks.overlaps(ranges) remaining = remaining.subtract(ranges) + remaining = remaining.subtract(clobbered_blocks) + # For all the remaining blocks in the care_map (ie, those that - # aren't part of the data for any file), divide them into blocks - # that are all zero and blocks that aren't. (Zero blocks are - # handled specially because (1) there are usually a lot of them - # and (2) bsdiff handles files with long sequences of repeated - # bytes especially poorly.) + # aren't part of the data for any file nor part of the clobbered_blocks), + # divide them into blocks that are all zero and blocks that aren't. + # (Zero blocks are handled specially because (1) there are usually + # a lot of them and (2) bsdiff handles files with long sequences of + # repeated bytes especially poorly.) zero_blocks = [] nonzero_blocks = [] @@ -206,6 +221,7 @@ class SparseImage(object): out["__ZERO"] = RangeSet(data=zero_blocks) out["__NONZERO"] = RangeSet(data=nonzero_blocks) + out["__COPY"] = clobbered_blocks def ResetFileMap(self): """Throw away the file map and treat the entire image as