Disable VABC if either source/target doesn't support it
If source supports VABC, delta_generator/update_engine will attempt to use VABC. This dangerous, as the target build won't have snapuserd to serve I/O request when device boots. Therefore, disable VABC if source build doesn't supports it. Test: downgrade from VABC enabled build to a build w/o VABC Change-Id: Ie8353e00f65354c2242ee5255b6652c6b62483a4
This commit is contained in:
@@ -138,6 +138,7 @@ PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot']
|
|||||||
# existing search paths.
|
# existing search paths.
|
||||||
RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
|
RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
|
||||||
|
|
||||||
|
|
||||||
class ErrorCode(object):
|
class ErrorCode(object):
|
||||||
"""Define error_codes for failures that happen during the actual
|
"""Define error_codes for failures that happen during the actual
|
||||||
update package installation.
|
update package installation.
|
||||||
@@ -226,6 +227,7 @@ def InitLogging():
|
|||||||
def SetHostToolLocation(tool_name, location):
|
def SetHostToolLocation(tool_name, location):
|
||||||
OPTIONS.host_tools[tool_name] = location
|
OPTIONS.host_tools[tool_name] = location
|
||||||
|
|
||||||
|
|
||||||
def FindHostToolPath(tool_name):
|
def FindHostToolPath(tool_name):
|
||||||
"""Finds the path to the host tool.
|
"""Finds the path to the host tool.
|
||||||
|
|
||||||
@@ -246,6 +248,7 @@ def FindHostToolPath(tool_name):
|
|||||||
|
|
||||||
return tool_name
|
return tool_name
|
||||||
|
|
||||||
|
|
||||||
def Run(args, verbose=None, **kwargs):
|
def Run(args, verbose=None, **kwargs):
|
||||||
"""Creates and returns a subprocess.Popen object.
|
"""Creates and returns a subprocess.Popen object.
|
||||||
|
|
||||||
@@ -433,6 +436,13 @@ class BuildInfo(object):
|
|||||||
def fingerprint(self):
|
def fingerprint(self):
|
||||||
return self._fingerprint
|
return self._fingerprint
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_vabc(self):
|
||||||
|
vendor_prop = self.info_dict.get("vendor.build.prop")
|
||||||
|
vabc_enabled = vendor_prop and \
|
||||||
|
vendor_prop.GetProp("ro.virtual_ab.compression.enabled") == "true"
|
||||||
|
return vabc_enabled
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def oem_props(self):
|
def oem_props(self):
|
||||||
return self._oem_props
|
return self._oem_props
|
||||||
@@ -461,7 +471,7 @@ class BuildInfo(object):
|
|||||||
"""Returns the inquired build property for the provided partition."""
|
"""Returns the inquired build property for the provided partition."""
|
||||||
|
|
||||||
# Boot image uses ro.[product.]bootimage instead of boot.
|
# Boot image uses ro.[product.]bootimage instead of boot.
|
||||||
prop_partition = "bootimage" if partition == "boot" else partition
|
prop_partition = "bootimage" if partition == "boot" else partition
|
||||||
|
|
||||||
# If provided a partition for this property, only look within that
|
# If provided a partition for this property, only look within that
|
||||||
# partition's build.prop.
|
# partition's build.prop.
|
||||||
@@ -652,10 +662,12 @@ def ExtractFromInputFile(input_file, fn):
|
|||||||
raise KeyError(fn)
|
raise KeyError(fn)
|
||||||
return file
|
return file
|
||||||
|
|
||||||
|
|
||||||
class RamdiskFormat(object):
|
class RamdiskFormat(object):
|
||||||
LZ4 = 1
|
LZ4 = 1
|
||||||
GZ = 2
|
GZ = 2
|
||||||
|
|
||||||
|
|
||||||
def _GetRamdiskFormat(info_dict):
|
def _GetRamdiskFormat(info_dict):
|
||||||
if info_dict.get('lz4_ramdisks') == 'true':
|
if info_dict.get('lz4_ramdisks') == 'true':
|
||||||
ramdisk_format = RamdiskFormat.LZ4
|
ramdisk_format = RamdiskFormat.LZ4
|
||||||
@@ -663,6 +675,7 @@ def _GetRamdiskFormat(info_dict):
|
|||||||
ramdisk_format = RamdiskFormat.GZ
|
ramdisk_format = RamdiskFormat.GZ
|
||||||
return ramdisk_format
|
return ramdisk_format
|
||||||
|
|
||||||
|
|
||||||
def LoadInfoDict(input_file, repacking=False):
|
def LoadInfoDict(input_file, repacking=False):
|
||||||
"""Loads the key/value pairs from the given input target_files.
|
"""Loads the key/value pairs from the given input target_files.
|
||||||
|
|
||||||
@@ -781,7 +794,8 @@ def LoadInfoDict(input_file, repacking=False):
|
|||||||
for partition in PARTITIONS_WITH_BUILD_PROP:
|
for partition in PARTITIONS_WITH_BUILD_PROP:
|
||||||
fingerprint = build_info.GetPartitionFingerprint(partition)
|
fingerprint = build_info.GetPartitionFingerprint(partition)
|
||||||
if fingerprint:
|
if fingerprint:
|
||||||
d["avb_{}_salt".format(partition)] = sha256(fingerprint.encode()).hexdigest()
|
d["avb_{}_salt".format(partition)] = sha256(
|
||||||
|
fingerprint.encode()).hexdigest()
|
||||||
try:
|
try:
|
||||||
d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n")
|
d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n")
|
||||||
except KeyError:
|
except KeyError:
|
||||||
@@ -789,7 +803,6 @@ def LoadInfoDict(input_file, repacking=False):
|
|||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def LoadListFromFile(file_path):
|
def LoadListFromFile(file_path):
|
||||||
with open(file_path) as f:
|
with open(file_path) as f:
|
||||||
return f.read().splitlines()
|
return f.read().splitlines()
|
||||||
@@ -859,7 +872,8 @@ class PartitionBuildProps(object):
|
|||||||
"""Loads the build.prop file and builds the attributes."""
|
"""Loads the build.prop file and builds the attributes."""
|
||||||
|
|
||||||
if name == "boot":
|
if name == "boot":
|
||||||
data = PartitionBuildProps._ReadBootPropFile(input_file, ramdisk_format=ramdisk_format)
|
data = PartitionBuildProps._ReadBootPropFile(
|
||||||
|
input_file, ramdisk_format=ramdisk_format)
|
||||||
else:
|
else:
|
||||||
data = PartitionBuildProps._ReadPartitionPropFile(input_file, name)
|
data = PartitionBuildProps._ReadPartitionPropFile(input_file, name)
|
||||||
|
|
||||||
@@ -1106,7 +1120,7 @@ def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict):
|
|||||||
return " ".join(sorted(combined))
|
return " ".join(sorted(combined))
|
||||||
|
|
||||||
if (framework_dict.get("use_dynamic_partitions") !=
|
if (framework_dict.get("use_dynamic_partitions") !=
|
||||||
"true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
|
"true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
|
||||||
raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
|
raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
|
||||||
|
|
||||||
merged_dict = {"use_dynamic_partitions": "true"}
|
merged_dict = {"use_dynamic_partitions": "true"}
|
||||||
@@ -1371,7 +1385,8 @@ def AppendGkiSigningArgs(cmd):
|
|||||||
|
|
||||||
# Checks key_path exists, before appending --gki_signing_* args.
|
# Checks key_path exists, before appending --gki_signing_* args.
|
||||||
if not os.path.exists(key_path):
|
if not os.path.exists(key_path):
|
||||||
raise ExternalError('gki_signing_key_path: "{}" not found'.format(key_path))
|
raise ExternalError(
|
||||||
|
'gki_signing_key_path: "{}" not found'.format(key_path))
|
||||||
|
|
||||||
algorithm = OPTIONS.info_dict.get("gki_signing_algorithm")
|
algorithm = OPTIONS.info_dict.get("gki_signing_algorithm")
|
||||||
if key_path and algorithm:
|
if key_path and algorithm:
|
||||||
@@ -1588,7 +1603,7 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None,
|
|||||||
RunAndCheckOutput(cmd)
|
RunAndCheckOutput(cmd)
|
||||||
|
|
||||||
if (info_dict.get("boot_signer") == "true" and
|
if (info_dict.get("boot_signer") == "true" and
|
||||||
info_dict.get("verity_key")):
|
info_dict.get("verity_key")):
|
||||||
# Hard-code the path as "/boot" for two-step special recovery image (which
|
# Hard-code the path as "/boot" for two-step special recovery image (which
|
||||||
# will be loaded into /boot during the two-step OTA).
|
# will be loaded into /boot during the two-step OTA).
|
||||||
if two_step_image:
|
if two_step_image:
|
||||||
@@ -1753,14 +1768,17 @@ def _BuildVendorBootImage(sourcedir, info_dict=None):
|
|||||||
if os.access(fn, os.F_OK):
|
if os.access(fn, os.F_OK):
|
||||||
ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
|
ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
|
||||||
for ramdisk_fragment in ramdisk_fragments:
|
for ramdisk_fragment in ramdisk_fragments:
|
||||||
fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "mkbootimg_args")
|
fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
|
||||||
|
ramdisk_fragment, "mkbootimg_args")
|
||||||
cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
|
cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
|
||||||
fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "prebuilt_ramdisk")
|
fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
|
||||||
|
ramdisk_fragment, "prebuilt_ramdisk")
|
||||||
# Use prebuilt image if found, else create ramdisk from supplied files.
|
# Use prebuilt image if found, else create ramdisk from supplied files.
|
||||||
if os.access(fn, os.F_OK):
|
if os.access(fn, os.F_OK):
|
||||||
ramdisk_fragment_pathname = fn
|
ramdisk_fragment_pathname = fn
|
||||||
else:
|
else:
|
||||||
ramdisk_fragment_root = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
|
ramdisk_fragment_root = os.path.join(
|
||||||
|
sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
|
||||||
ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root,
|
ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root,
|
||||||
ramdisk_format=ramdisk_format)
|
ramdisk_format=ramdisk_format)
|
||||||
ramdisk_fragment_imgs.append(ramdisk_fragment_img)
|
ramdisk_fragment_imgs.append(ramdisk_fragment_img)
|
||||||
@@ -3533,7 +3551,7 @@ class DynamicPartitionsDifference(object):
|
|||||||
|
|
||||||
for g in tgt_groups:
|
for g in tgt_groups:
|
||||||
for p in shlex.split(info_dict.get(
|
for p in shlex.split(info_dict.get(
|
||||||
"super_%s_partition_list" % g, "").strip()):
|
"super_%s_partition_list" % g, "").strip()):
|
||||||
assert p in self._partition_updates, \
|
assert p in self._partition_updates, \
|
||||||
"{} is in target super_{}_partition_list but no BlockDifference " \
|
"{} is in target super_{}_partition_list but no BlockDifference " \
|
||||||
"object is provided.".format(p, g)
|
"object is provided.".format(p, g)
|
||||||
@@ -3541,7 +3559,7 @@ class DynamicPartitionsDifference(object):
|
|||||||
|
|
||||||
for g in src_groups:
|
for g in src_groups:
|
||||||
for p in shlex.split(source_info_dict.get(
|
for p in shlex.split(source_info_dict.get(
|
||||||
"super_%s_partition_list" % g, "").strip()):
|
"super_%s_partition_list" % g, "").strip()):
|
||||||
assert p in self._partition_updates, \
|
assert p in self._partition_updates, \
|
||||||
"{} is in source super_{}_partition_list but no BlockDifference " \
|
"{} is in source super_{}_partition_list but no BlockDifference " \
|
||||||
"object is provided.".format(p, g)
|
"object is provided.".format(p, g)
|
||||||
@@ -3650,7 +3668,7 @@ class DynamicPartitionsDifference(object):
|
|||||||
if u.src_size is not None and u.tgt_size is None:
|
if u.src_size is not None and u.tgt_size is None:
|
||||||
append('remove_group %s' % g)
|
append('remove_group %s' % g)
|
||||||
if (u.src_size is not None and u.tgt_size is not None and
|
if (u.src_size is not None and u.tgt_size is not None and
|
||||||
u.src_size > u.tgt_size):
|
u.src_size > u.tgt_size):
|
||||||
comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
|
comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
|
||||||
append('resize_group %s %d' % (g, u.tgt_size))
|
append('resize_group %s %d' % (g, u.tgt_size))
|
||||||
|
|
||||||
@@ -3659,7 +3677,7 @@ class DynamicPartitionsDifference(object):
|
|||||||
comment('Add group %s with maximum size %d' % (g, u.tgt_size))
|
comment('Add group %s with maximum size %d' % (g, u.tgt_size))
|
||||||
append('add_group %s %d' % (g, u.tgt_size))
|
append('add_group %s %d' % (g, u.tgt_size))
|
||||||
if (u.src_size is not None and u.tgt_size is not None and
|
if (u.src_size is not None and u.tgt_size is not None and
|
||||||
u.src_size < u.tgt_size):
|
u.src_size < u.tgt_size):
|
||||||
comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
|
comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
|
||||||
append('resize_group %s %d' % (g, u.tgt_size))
|
append('resize_group %s %d' % (g, u.tgt_size))
|
||||||
|
|
||||||
@@ -3693,7 +3711,8 @@ def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
|
|||||||
"""
|
"""
|
||||||
tmp_dir = MakeTempDir('boot_', suffix='.img')
|
tmp_dir = MakeTempDir('boot_', suffix='.img')
|
||||||
try:
|
try:
|
||||||
RunAndCheckOutput(['unpack_bootimg', '--boot_img', boot_img, '--out', tmp_dir])
|
RunAndCheckOutput(['unpack_bootimg', '--boot_img',
|
||||||
|
boot_img, '--out', tmp_dir])
|
||||||
ramdisk = os.path.join(tmp_dir, 'ramdisk')
|
ramdisk = os.path.join(tmp_dir, 'ramdisk')
|
||||||
if not os.path.isfile(ramdisk):
|
if not os.path.isfile(ramdisk):
|
||||||
logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
|
logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
|
||||||
@@ -3704,7 +3723,8 @@ def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
|
|||||||
elif ramdisk_format == RamdiskFormat.GZ:
|
elif ramdisk_format == RamdiskFormat.GZ:
|
||||||
with open(ramdisk, 'rb') as input_stream:
|
with open(ramdisk, 'rb') as input_stream:
|
||||||
with open(uncompressed_ramdisk, 'wb') as output_stream:
|
with open(uncompressed_ramdisk, 'wb') as output_stream:
|
||||||
p2 = Run(['minigzip', '-d'], stdin=input_stream.fileno(), stdout=output_stream.fileno())
|
p2 = Run(['minigzip', '-d'], stdin=input_stream.fileno(),
|
||||||
|
stdout=output_stream.fileno())
|
||||||
p2.wait()
|
p2.wait()
|
||||||
else:
|
else:
|
||||||
logger.error('Only support lz4 or minigzip ramdisk format.')
|
logger.error('Only support lz4 or minigzip ramdisk format.')
|
||||||
@@ -3715,13 +3735,14 @@ def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
|
|||||||
# Use "toybox cpio" instead of "cpio" because the latter invokes cpio from
|
# Use "toybox cpio" instead of "cpio" because the latter invokes cpio from
|
||||||
# the host environment.
|
# the host environment.
|
||||||
RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'],
|
RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'],
|
||||||
cwd=extracted_ramdisk)
|
cwd=extracted_ramdisk)
|
||||||
|
|
||||||
for search_path in RAMDISK_BUILD_PROP_REL_PATHS:
|
for search_path in RAMDISK_BUILD_PROP_REL_PATHS:
|
||||||
prop_file = os.path.join(extracted_ramdisk, search_path)
|
prop_file = os.path.join(extracted_ramdisk, search_path)
|
||||||
if os.path.isfile(prop_file):
|
if os.path.isfile(prop_file):
|
||||||
return prop_file
|
return prop_file
|
||||||
logger.warning('Unable to get boot image timestamp: no %s in ramdisk', search_path)
|
logger.warning(
|
||||||
|
'Unable to get boot image timestamp: no %s in ramdisk', search_path)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@@ -3754,7 +3775,8 @@ def GetBootImageTimestamp(boot_img):
|
|||||||
timestamp = props.GetProp('ro.bootimage.build.date.utc')
|
timestamp = props.GetProp('ro.bootimage.build.date.utc')
|
||||||
if timestamp:
|
if timestamp:
|
||||||
return int(timestamp)
|
return int(timestamp)
|
||||||
logger.warning('Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
|
logger.warning(
|
||||||
|
'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
except ExternalError as e:
|
except ExternalError as e:
|
||||||
|
@@ -1051,15 +1051,18 @@ def GenerateAbOtaPackage(target_file, output_file, source_file=None):
|
|||||||
"META/ab_partitions.txt is required for ab_update."
|
"META/ab_partitions.txt is required for ab_update."
|
||||||
target_info = common.BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
|
target_info = common.BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
|
||||||
source_info = common.BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
|
source_info = common.BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
|
||||||
vendor_prop = source_info.info_dict.get("vendor.build.prop")
|
# If source supports VABC, delta_generator/update_engine will attempt to
|
||||||
vabc_used = vendor_prop and \
|
# use VABC. This dangerous, as the target build won't have snapuserd to
|
||||||
vendor_prop.GetProp("ro.virtual_ab.compression.enabled") == "true" and \
|
# serve I/O request when device boots. Therefore, disable VABC if source
|
||||||
not OPTIONS.disable_vabc
|
# build doesn't supports it.
|
||||||
if vabc_used:
|
if not source_info.is_vabc or not target_info.is_vabc:
|
||||||
|
OPTIONS.disable_vabc = True
|
||||||
|
if not OPTIONS.disable_vabc:
|
||||||
# TODO(zhangkelvin) Remove this once FEC on VABC is supported
|
# TODO(zhangkelvin) Remove this once FEC on VABC is supported
|
||||||
logger.info("Virtual AB Compression enabled, disabling FEC")
|
logger.info("Virtual AB Compression enabled, disabling FEC")
|
||||||
OPTIONS.disable_fec_computation = True
|
OPTIONS.disable_fec_computation = True
|
||||||
OPTIONS.disable_verity_computation = True
|
OPTIONS.disable_verity_computation = True
|
||||||
|
|
||||||
else:
|
else:
|
||||||
assert "ab_partitions" in OPTIONS.info_dict, \
|
assert "ab_partitions" in OPTIONS.info_dict, \
|
||||||
"META/ab_partitions.txt is required for ab_update."
|
"META/ab_partitions.txt is required for ab_update."
|
||||||
|
Reference in New Issue
Block a user