Generate the compute_hash_tree command

Generate the transfer command "compute_hash_tree" for incremental
updates of the non-A/B devices that enable verified boot 1.0

Other changes include:
i.  factor out verity_utils to use both in build_image and blockimgdiff
ii. add a new flag 'hashtree_info_generator' in sparse_image to generate
    the hashtree information.

Bug: 25170618
Test: generate a package for aosp_angler; and run simulator
Change-Id: I4d4d7a3e41dc3da810d2cbf8988e85d187c9ab0e
This commit is contained in:
Tianjie Xu
2018-08-30 00:32:07 -07:00
parent fabb2c9792
commit 67c7cbb9c8
6 changed files with 431 additions and 11 deletions

View File

@@ -32,7 +32,6 @@ from hashlib import sha1
import common import common
from rangelib import RangeSet from rangelib import RangeSet
__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"] __all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
@@ -649,6 +648,14 @@ class BlockImageDiff(object):
self.touched_src_sha1 = self.src.RangeSha1(self.touched_src_ranges) self.touched_src_sha1 = self.src.RangeSha1(self.touched_src_ranges)
if self.tgt.hashtree_info:
out.append("compute_hash_tree {} {} {} {} {}\n".format(
self.tgt.hashtree_info.hashtree_range.to_string_raw(),
self.tgt.hashtree_info.filesystem_range.to_string_raw(),
self.tgt.hashtree_info.hash_algorithm,
self.tgt.hashtree_info.salt,
self.tgt.hashtree_info.root_hash))
# Zero out extended blocks as a workaround for bug 20881595. # Zero out extended blocks as a workaround for bug 20881595.
if self.tgt.extended: if self.tgt.extended:
assert (WriteSplitTransfers(out, "zero", self.tgt.extended) == assert (WriteSplitTransfers(out, "zero", self.tgt.extended) ==
@@ -988,6 +995,12 @@ class BlockImageDiff(object):
assert touched[i] == 0 assert touched[i] == 0
touched[i] = 1 touched[i] = 1
if self.tgt.hashtree_info:
for s, e in self.tgt.hashtree_info.hashtree_range:
for i in range(s, e):
assert touched[i] == 0
touched[i] = 1
# Check that we've written every target block. # Check that we've written every target block.
for s, e in self.tgt.care_map: for s, e in self.tgt.care_map:
for i in range(s, e): for i in range(s, e):
@@ -1533,6 +1546,9 @@ class BlockImageDiff(object):
AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers) AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
continue continue
elif tgt_fn == "__HASHTREE":
continue
elif tgt_fn in self.src.file_map: elif tgt_fn in self.src.file_map:
# Look for an exact pathname match in the source. # Look for an exact pathname match in the source.
AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn], AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],

View File

@@ -701,7 +701,8 @@ def UnzipTemp(filename, pattern=None):
return tmp return tmp
def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks): def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
hashtree_info_generator=None):
"""Returns a SparseImage object suitable for passing to BlockImageDiff. """Returns a SparseImage object suitable for passing to BlockImageDiff.
This function loads the specified sparse image from the given path, and This function loads the specified sparse image from the given path, and
@@ -714,7 +715,8 @@ def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
tmpdir: The directory that contains the prebuilt image and block map file. tmpdir: The directory that contains the prebuilt image and block map file.
input_zip: The target-files ZIP archive. input_zip: The target-files ZIP archive.
allow_shared_blocks: Whether having shared blocks is allowed. allow_shared_blocks: Whether having shared blocks is allowed.
hashtree_info_generator: If present, generates the hashtree_info for this
sparse image.
Returns: Returns:
A SparseImage object, with file_map info loaded. A SparseImage object, with file_map info loaded.
""" """
@@ -732,8 +734,9 @@ def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
# unconditionally. Note that they are still part of care_map. (Bug: 20939131) # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
clobbered_blocks = "0" clobbered_blocks = "0"
image = sparse_img.SparseImage(path, mappath, clobbered_blocks, image = sparse_img.SparseImage(
allow_shared_blocks=allow_shared_blocks) path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks,
hashtree_info_generator=hashtree_info_generator)
# block.map may contain less blocks, because mke2fs may skip allocating blocks # block.map may contain less blocks, because mke2fs may skip allocating blocks
# if they contain all zeros. We can't reconstruct such a file from its block # if they contain all zeros. We can't reconstruct such a file from its block

View File

@@ -176,6 +176,7 @@ import zipfile
import common import common
import edify_generator import edify_generator
import verity_utils
if sys.hexversion < 0x02070000: if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr) print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -1411,8 +1412,12 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_file):
target_info.get('ext4_share_dup_blocks') == "true") target_info.get('ext4_share_dup_blocks') == "true")
system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip, system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip,
allow_shared_blocks) allow_shared_blocks)
hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
"system", 4096, target_info)
system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip, system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip,
allow_shared_blocks) allow_shared_blocks,
hashtree_info_generator)
blockimgdiff_version = max( blockimgdiff_version = max(
int(i) for i in target_info.get("blockimgdiff_versions", "1").split(",")) int(i) for i in target_info.get("blockimgdiff_versions", "1").split(","))
@@ -1439,8 +1444,11 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_file):
raise RuntimeError("can't generate incremental that adds /vendor") raise RuntimeError("can't generate incremental that adds /vendor")
vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip, vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip,
allow_shared_blocks) allow_shared_blocks)
vendor_tgt = common.GetSparseImage("vendor", OPTIONS.target_tmp, target_zip, hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
allow_shared_blocks) "vendor", 4096, target_info)
vendor_tgt = common.GetSparseImage(
"vendor", OPTIONS.target_tmp, target_zip, allow_shared_blocks,
hashtree_info_generator)
# Check first block of vendor partition for remount R/W only if # Check first block of vendor partition for remount R/W only if
# disk type is ext4 # disk type is ext4

View File

@@ -33,7 +33,8 @@ class SparseImage(object):
""" """
def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None, def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
mode="rb", build_map=True, allow_shared_blocks=False): mode="rb", build_map=True, allow_shared_blocks=False,
hashtree_info_generator=None):
self.simg_f = f = open(simg_fn, mode) self.simg_f = f = open(simg_fn, mode)
header_bin = f.read(28) header_bin = f.read(28)
@@ -64,6 +65,8 @@ class SparseImage(object):
% (total_blks, blk_sz, total_chunks)) % (total_blks, blk_sz, total_chunks))
if not build_map: if not build_map:
assert not hashtree_info_generator, \
"Cannot generate the hashtree info without building the offset map."
return return
pos = 0 # in blocks pos = 0 # in blocks
@@ -102,7 +105,17 @@ class SparseImage(object):
if data_sz != 0: if data_sz != 0:
raise ValueError("Don't care chunk input size is non-zero (%u)" % raise ValueError("Don't care chunk input size is non-zero (%u)" %
(data_sz)) (data_sz))
else: # Fills the don't care data ranges with zeros.
# TODO(xunchang) pass the care_map to hashtree info generator.
if hashtree_info_generator:
fill_data = '\x00' * 4
# In order to compute verity hashtree on device, we need to write
# zeros explicitly to the don't care ranges. Because these ranges may
# contain non-zero data from the previous build.
care_data.append(pos)
care_data.append(pos + chunk_sz)
offset_map.append((pos, chunk_sz, None, fill_data))
pos += chunk_sz pos += chunk_sz
elif chunk_type == 0xCAC4: elif chunk_type == 0xCAC4:
@@ -128,6 +141,10 @@ class SparseImage(object):
extended = extended.intersect(all_blocks).subtract(self.care_map) extended = extended.intersect(all_blocks).subtract(self.care_map)
self.extended = extended self.extended = extended
self.hashtree_info = None
if hashtree_info_generator:
self.hashtree_info = hashtree_info_generator.Generate(self)
if file_map_fn: if file_map_fn:
self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks, self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks,
allow_shared_blocks) allow_shared_blocks)
@@ -246,6 +263,8 @@ class SparseImage(object):
remaining = remaining.subtract(ranges) remaining = remaining.subtract(ranges)
remaining = remaining.subtract(clobbered_blocks) remaining = remaining.subtract(clobbered_blocks)
if self.hashtree_info:
remaining = remaining.subtract(self.hashtree_info.hashtree_range)
# For all the remaining blocks in the care_map (ie, those that # For all the remaining blocks in the care_map (ie, those that
# aren't part of the data for any file nor part of the clobbered_blocks), # aren't part of the data for any file nor part of the clobbered_blocks),
@@ -308,6 +327,8 @@ class SparseImage(object):
out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks) out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
if clobbered_blocks: if clobbered_blocks:
out["__COPY"] = clobbered_blocks out["__COPY"] = clobbered_blocks
if self.hashtree_info:
out["__HASHTREE"] = self.hashtree_info.hashtree_range
def ResetFileMap(self): def ResetFileMap(self):
"""Throw away the file map and treat the entire image as """Throw away the file map and treat the entire image as

View File

@@ -0,0 +1,168 @@
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittests for verity_utils.py."""
from __future__ import print_function
import os
import os.path
import unittest
import build_image
import common
import sparse_img
import test_utils
import verity_utils
from rangelib import RangeSet
class VerityUtilsTest(unittest.TestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
self.partition_size = 1024 * 1024
self.prop_dict = {
'verity': 'true',
'verity_fec': 'true',
'system_verity_block_device': '/dev/block/system',
'system_size': self.partition_size
}
self.hash_algorithm = "sha256"
self.fixed_salt = \
"aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
self.expected_root_hash = \
"0b7c4565e87b1026e11fbab91c0bc29e185c847a5b44d40e6e86e461e8adf80d"
def tearDown(self):
common.Cleanup()
def _create_simg(self, raw_data):
output_file = common.MakeTempFile()
raw_image = common.MakeTempFile()
with open(raw_image, 'wb') as f:
f.write(raw_data)
cmd = ["img2simg", raw_image, output_file, '4096']
p = common.Run(cmd)
p.communicate()
self.assertEqual(0, p.returncode)
return output_file
def _generate_image(self):
partition_size = 1024 * 1024
adjusted_size, verity_size = build_image.AdjustPartitionSizeForVerity(
partition_size, True)
raw_image = ""
for i in range(adjusted_size):
raw_image += str(i % 10)
output_file = self._create_simg(raw_image)
# Append the verity metadata.
prop_dict = {
'partition_size': str(partition_size),
'image_size': str(adjusted_size),
'verity_block_device': '/dev/block/system',
'verity_key': os.path.join(self.testdata_dir, 'testkey'),
'verity_signer_cmd': 'verity_signer',
'verity_size': str(verity_size),
}
build_image.MakeVerityEnabledImage(output_file, True, prop_dict)
return output_file
def test_VerifiedBootVersion1HashtreeInfoGenerator_create(self):
image_file = sparse_img.SparseImage(self._generate_image())
generator = verity_utils.CreateHashtreeInfoGenerator(
'system', image_file, self.prop_dict)
self.assertEqual(
verity_utils.VerifiedBootVersion1HashtreeInfoGenerator, type(generator))
self.assertEqual(self.partition_size, generator.partition_size)
self.assertTrue(generator.fec_supported)
def test_VerifiedBootVersion1HashtreeInfoGenerator_decomposeImage(self):
image_file = sparse_img.SparseImage(self._generate_image())
generator = verity_utils.VerifiedBootVersion1HashtreeInfoGenerator(
self.partition_size, 4096, True)
generator.DecomposeSparseImage(image_file)
self.assertEqual(991232, generator.filesystem_size)
self.assertEqual(12288, generator.hashtree_size)
self.assertEqual(32768, generator.metadata_size)
def test_VerifiedBootVersion1HashtreeInfoGenerator_parseHashtreeMetadata(
self):
image_file = sparse_img.SparseImage(self._generate_image())
generator = verity_utils.VerifiedBootVersion1HashtreeInfoGenerator(
self.partition_size, 4096, True)
generator.DecomposeSparseImage(image_file)
generator._ParseHashtreeMetadata()
self.assertEqual(
self.hash_algorithm, generator.hashtree_info.hash_algorithm)
self.assertEqual(self.fixed_salt, generator.hashtree_info.salt)
self.assertEqual(self.expected_root_hash, generator.hashtree_info.root_hash)
def test_VerifiedBootVersion1HashtreeInfoGenerator_validateHashtree_smoke(
self):
generator = verity_utils.VerifiedBootVersion1HashtreeInfoGenerator(
self.partition_size, 4096, True)
generator.image = sparse_img.SparseImage(self._generate_image())
generator.hashtree_info = info = verity_utils.HashtreeInfo()
info.filesystem_range = RangeSet(data=[0, 991232 / 4096])
info.hashtree_range = RangeSet(
data=[991232 / 4096, (991232 + 12288) / 4096])
info.hash_algorithm = self.hash_algorithm
info.salt = self.fixed_salt
info.root_hash = self.expected_root_hash
self.assertTrue(generator.ValidateHashtree())
def test_VerifiedBootVersion1HashtreeInfoGenerator_validateHashtree_failure(
self):
generator = verity_utils.VerifiedBootVersion1HashtreeInfoGenerator(
self.partition_size, 4096, True)
generator.image = sparse_img.SparseImage(self._generate_image())
generator.hashtree_info = info = verity_utils.HashtreeInfo()
info.filesystem_range = RangeSet(data=[0, 991232 / 4096])
info.hashtree_range = RangeSet(
data=[991232 / 4096, (991232 + 12288) / 4096])
info.hash_algorithm = self.hash_algorithm
info.salt = self.fixed_salt
info.root_hash = "a" + self.expected_root_hash[1:]
self.assertFalse(generator.ValidateHashtree())
def test_VerifiedBootVersion1HashtreeInfoGenerator_generate(self):
image_file = sparse_img.SparseImage(self._generate_image())
generator = verity_utils.CreateHashtreeInfoGenerator(
'system', 4096, self.prop_dict)
info = generator.Generate(image_file)
self.assertEqual(RangeSet(data=[0, 991232 / 4096]), info.filesystem_range)
self.assertEqual(RangeSet(data=[991232 / 4096, (991232 + 12288) / 4096]),
info.hashtree_range)
self.assertEqual(self.hash_algorithm, info.hash_algorithm)
self.assertEqual(self.fixed_salt, info.salt)
self.assertEqual(self.expected_root_hash, info.root_hash)

View File

@@ -0,0 +1,204 @@
#!/usr/bin/env python
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import struct
import common
from build_image import (AdjustPartitionSizeForVerity, GetVerityTreeSize,
GetVerityMetadataSize, BuildVerityTree)
from rangelib import RangeSet
class HashtreeInfoGenerationError(Exception):
"""An Exception raised during hashtree info generation."""
def __init__(self, message):
Exception.__init__(self, message)
class HashtreeInfo(object):
def __init__(self):
self.hashtree_range = None
self.filesystem_range = None
self.hash_algorithm = None
self.salt = None
self.root_hash = None
def CreateHashtreeInfoGenerator(partition_name, block_size, info_dict):
generator = None
if (info_dict.get("verity") == "true" and
info_dict.get("{}_verity_block_device".format(partition_name))):
partition_size = info_dict["{}_size".format(partition_name)]
fec_supported = info_dict.get("verity_fec") == "true"
generator = VerifiedBootVersion1HashtreeInfoGenerator(
partition_size, block_size, fec_supported)
return generator
class HashtreeInfoGenerator(object):
def Generate(self, image):
raise NotImplementedError
def DecomposeSparseImage(self, image):
raise NotImplementedError
def ValidateHashtree(self):
raise NotImplementedError
class VerifiedBootVersion2HashtreeInfoGenerator(HashtreeInfoGenerator):
pass
class VerifiedBootVersion1HashtreeInfoGenerator(HashtreeInfoGenerator):
"""A class that parses the metadata of hashtree for a given partition."""
def __init__(self, partition_size, block_size, fec_supported):
"""Initialize VerityTreeInfo with the sparse image and input property.
Arguments:
partition_size: The whole size in bytes of a partition, including the
filesystem size, padding size, and verity size.
block_size: Expected size in bytes of each block for the sparse image.
fec_supported: True if the verity section contains fec data.
"""
self.block_size = block_size
self.partition_size = partition_size
self.fec_supported = fec_supported
self.image = None
self.filesystem_size = None
self.hashtree_size = None
self.metadata_size = None
self.hashtree_info = HashtreeInfo()
def DecomposeSparseImage(self, image):
"""Calculate the verity size based on the size of the input image.
Since we already know the structure of a verity enabled image to be:
[filesystem, verity_hashtree, verity_metadata, fec_data]. We can then
calculate the size and offset of each section.
"""
self.image = image
assert self.block_size == image.blocksize
assert self.partition_size == image.total_blocks * self.block_size, \
"partition size {} doesn't match with the calculated image size." \
" total_blocks: {}".format(self.partition_size, image.total_blocks)
adjusted_size, _ = AdjustPartitionSizeForVerity(
self.partition_size, self.fec_supported)
assert adjusted_size % self.block_size == 0
verity_tree_size = GetVerityTreeSize(adjusted_size)
assert verity_tree_size % self.block_size == 0
metadata_size = GetVerityMetadataSize(adjusted_size)
assert metadata_size % self.block_size == 0
self.filesystem_size = adjusted_size
self.hashtree_size = verity_tree_size
self.metadata_size = metadata_size
self.hashtree_info.filesystem_range = RangeSet(
data=[0, adjusted_size / self.block_size])
self.hashtree_info.hashtree_range = RangeSet(
data=[adjusted_size / self.block_size,
(adjusted_size + verity_tree_size) / self.block_size])
def _ParseHashtreeMetadata(self):
"""Parses the hash_algorithm, root_hash, salt from the metadata block."""
metadata_start = self.filesystem_size + self.hashtree_size
metadata_range = RangeSet(
data=[metadata_start / self.block_size,
(metadata_start + self.metadata_size) / self.block_size])
meta_data = ''.join(self.image.ReadRangeSet(metadata_range))
# More info about the metadata structure available in:
# system/extras/verity/build_verity_metadata.py
META_HEADER_SIZE = 268
header_bin = meta_data[0:META_HEADER_SIZE]
header = struct.unpack("II256sI", header_bin)
# header: magic_number, version, signature, table_len
assert header[0] == 0xb001b001, header[0]
table_len = header[3]
verity_table = meta_data[META_HEADER_SIZE: META_HEADER_SIZE + table_len]
table_entries = verity_table.rstrip().split()
# Expected verity table format: "1 block_device block_device block_size
# block_size data_blocks data_blocks hash_algorithm root_hash salt"
assert len(table_entries) == 10, "Unexpected verity table size {}".format(
len(table_entries))
assert (int(table_entries[3]) == self.block_size and
int(table_entries[4]) == self.block_size)
assert (int(table_entries[5]) * self.block_size == self.filesystem_size and
int(table_entries[6]) * self.block_size == self.filesystem_size)
self.hashtree_info.hash_algorithm = table_entries[7]
self.hashtree_info.root_hash = table_entries[8]
self.hashtree_info.salt = table_entries[9]
def ValidateHashtree(self):
"""Checks that we can reconstruct the verity hash tree."""
# Writes the file system section to a temp file; and calls the executable
# build_verity_tree to construct the hash tree.
adjusted_partition = common.MakeTempFile(prefix="adjusted_partition")
with open(adjusted_partition, "wb") as fd:
self.image.WriteRangeDataToFd(self.hashtree_info.filesystem_range, fd)
generated_verity_tree = common.MakeTempFile(prefix="verity")
prop_dict = {}
BuildVerityTree(adjusted_partition, generated_verity_tree, prop_dict)
assert prop_dict["verity_salt"] == self.hashtree_info.salt
if prop_dict["verity_root_hash"] != self.hashtree_info.root_hash:
print("Calculated verty root hash {} doesn't match the one in metadata"
" {}".format(prop_dict["verity_root_hash"],
self.hashtree_info.root_hash))
return False
# Reads the generated hash tree and checks if it has the exact same bytes
# as the one in the sparse image.
with open(generated_verity_tree, "rb") as fd:
return fd.read() == ''.join(self.image.ReadRangeSet(
self.hashtree_info.hashtree_range))
def Generate(self, image):
"""Parses and validates the hashtree info in a sparse image.
Returns:
hashtree_info: The information needed to reconstruct the hashtree.
Raises:
HashtreeInfoGenerationError: If we fail to generate the exact bytes of
the hashtree.
"""
self.DecomposeSparseImage(image)
self._ParseHashtreeMetadata()
if not self.ValidateHashtree():
raise HashtreeInfoGenerationError("Failed to reconstruct the verity tree")
return self.hashtree_info