releasetools: Switch to Python logging.

Converts the following files to Python logging.

add_img_to_target_files.py
blockimgdiff.py
build_image.py
check_ota_package_signature.py
common.py
img_from_target_files.py
make_recovery_patch.py
ota_from_target_files.py
sparse_img.py
verity_utils.py

This separates logging outputs from normal outputs, and allows easier
switching between different verbosity levels. It also supports adjusting
logging config via environment variable (LOGGING_CONFIG).

Test: `m dist`
Test: `python -m unittest discover build/make/tools/releasetools`
Change-Id: Idfc3f7aff83e03f41f3c9b5067d64fd595d5192d
This commit is contained in:
Tao Bao
2018-10-12 10:30:39 -07:00
parent bcd3d85971
commit 32fcdab512
10 changed files with 247 additions and 182 deletions

View File

@@ -46,6 +46,7 @@ Usage: add_img_to_target_files [flag] target_files
from __future__ import print_function from __future__ import print_function
import datetime import datetime
import logging
import os import os
import shlex import shlex
import shutil import shutil
@@ -62,8 +63,9 @@ if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr) print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1) sys.exit(1)
OPTIONS = common.OPTIONS logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
OPTIONS.add_missing = False OPTIONS.add_missing = False
OPTIONS.rebuild_recovery = False OPTIONS.rebuild_recovery = False
OPTIONS.replace_updated_files_list = [] OPTIONS.replace_updated_files_list = []
@@ -127,7 +129,7 @@ def AddSystem(output_zip, recovery_img=None, boot_img=None):
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.img") img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.img")
if os.path.exists(img.input_name): if os.path.exists(img.input_name):
print("system.img already exists; no need to rebuild...") logger.info("system.img already exists; no need to rebuild...")
return img.input_name return img.input_name
def output_sink(fn, data): def output_sink(fn, data):
@@ -142,7 +144,7 @@ def AddSystem(output_zip, recovery_img=None, boot_img=None):
common.ZipWrite(output_zip, ofile.name, arc_name) common.ZipWrite(output_zip, ofile.name, arc_name)
if OPTIONS.rebuild_recovery: if OPTIONS.rebuild_recovery:
print("Building new recovery patch") logger.info("Building new recovery patch")
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img, common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
boot_img, info_dict=OPTIONS.info_dict) boot_img, info_dict=OPTIONS.info_dict)
@@ -159,7 +161,7 @@ def AddSystemOther(output_zip):
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system_other.img") img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system_other.img")
if os.path.exists(img.input_name): if os.path.exists(img.input_name):
print("system_other.img already exists; no need to rebuild...") logger.info("system_other.img already exists; no need to rebuild...")
return return
CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system_other", img) CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system_other", img)
@@ -171,7 +173,7 @@ def AddVendor(output_zip):
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.img") img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.img")
if os.path.exists(img.input_name): if os.path.exists(img.input_name):
print("vendor.img already exists; no need to rebuild...") logger.info("vendor.img already exists; no need to rebuild...")
return img.input_name return img.input_name
block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.map") block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.map")
@@ -186,7 +188,7 @@ def AddProduct(output_zip):
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "product.img") img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "product.img")
if os.path.exists(img.input_name): if os.path.exists(img.input_name):
print("product.img already exists; no need to rebuild...") logger.info("product.img already exists; no need to rebuild...")
return img.input_name return img.input_name
block_list = OutputFile( block_list = OutputFile(
@@ -204,7 +206,7 @@ def AddProductServices(output_zip):
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES",
"product_services.img") "product_services.img")
if os.path.exists(img.input_name): if os.path.exists(img.input_name):
print("product_services.img already exists; no need to rebuild...") logger.info("product_services.img already exists; no need to rebuild...")
return img.input_name return img.input_name
block_list = OutputFile( block_list = OutputFile(
@@ -220,7 +222,7 @@ def AddOdm(output_zip):
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "odm.img") img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "odm.img")
if os.path.exists(img.input_name): if os.path.exists(img.input_name):
print("odm.img already exists; no need to rebuild...") logger.info("odm.img already exists; no need to rebuild...")
return img.input_name return img.input_name
block_list = OutputFile( block_list = OutputFile(
@@ -239,7 +241,7 @@ def AddDtbo(output_zip):
""" """
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "dtbo.img") img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "dtbo.img")
if os.path.exists(img.input_name): if os.path.exists(img.input_name):
print("dtbo.img already exists; no need to rebuild...") logger.info("dtbo.img already exists; no need to rebuild...")
return img.input_name return img.input_name
dtbo_prebuilt_path = os.path.join( dtbo_prebuilt_path = os.path.join(
@@ -269,7 +271,7 @@ def AddDtbo(output_zip):
def CreateImage(input_dir, info_dict, what, output_file, block_list=None): def CreateImage(input_dir, info_dict, what, output_file, block_list=None):
print("creating " + what + ".img...") logger.info("creating " + what + ".img...")
image_props = build_image.ImagePropFromGlobalDict(info_dict, what) image_props = build_image.ImagePropFromGlobalDict(info_dict, what)
fstab = info_dict["fstab"] fstab = info_dict["fstab"]
@@ -340,7 +342,7 @@ def AddUserdata(output_zip):
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "userdata.img") img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "userdata.img")
if os.path.exists(img.input_name): if os.path.exists(img.input_name):
print("userdata.img already exists; no need to rebuild...") logger.info("userdata.img already exists; no need to rebuild...")
return return
# Skip userdata.img if no size. # Skip userdata.img if no size.
@@ -348,7 +350,7 @@ def AddUserdata(output_zip):
if not image_props.get("partition_size"): if not image_props.get("partition_size"):
return return
print("creating userdata.img...") logger.info("creating userdata.img...")
image_props["timestamp"] = FIXED_FILE_TIMESTAMP image_props["timestamp"] = FIXED_FILE_TIMESTAMP
@@ -411,7 +413,7 @@ def AddVBMeta(output_zip, partitions, name, needed_partitions):
img = OutputFile( img = OutputFile(
output_zip, OPTIONS.input_tmp, "IMAGES", "{}.img".format(name)) output_zip, OPTIONS.input_tmp, "IMAGES", "{}.img".format(name))
if os.path.exists(img.input_name): if os.path.exists(img.input_name):
print("{}.img already exists; not rebuilding...".format(name)) logger.info("%s.img already exists; not rebuilding...", name)
return img.input_name return img.input_name
avbtool = os.getenv('AVBTOOL') or OPTIONS.info_dict["avb_avbtool"] avbtool = os.getenv('AVBTOOL') or OPTIONS.info_dict["avb_avbtool"]
@@ -495,7 +497,7 @@ def AddCache(output_zip):
img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "cache.img") img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "cache.img")
if os.path.exists(img.input_name): if os.path.exists(img.input_name):
print("cache.img already exists; no need to rebuild...") logger.info("cache.img already exists; no need to rebuild...")
return return
image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "cache") image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "cache")
@@ -503,7 +505,7 @@ def AddCache(output_zip):
if "fs_type" not in image_props: if "fs_type" not in image_props:
return return
print("creating cache.img...") logger.info("creating cache.img...")
image_props["timestamp"] = FIXED_FILE_TIMESTAMP image_props["timestamp"] = FIXED_FILE_TIMESTAMP
@@ -580,8 +582,7 @@ def AddCareMapForAbOta(output_zip, ab_partitions, image_paths):
present_props = [x for x in prop_name_list if x in build_props] present_props = [x for x in prop_name_list if x in build_props]
if not present_props: if not present_props:
print("Warning: fingerprint is not present for partition {}". logger.warning("fingerprint is not present for partition %s", partition)
format(partition))
property_id, fingerprint = "unknown", "unknown" property_id, fingerprint = "unknown", "unknown"
else: else:
property_id = present_props[0] property_id = present_props[0]
@@ -633,7 +634,7 @@ def AddPackRadioImages(output_zip, images):
prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", img_name) prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", img_name)
if os.path.exists(prebuilt_path): if os.path.exists(prebuilt_path):
print("%s already exists, no need to overwrite..." % (img_name,)) logger.info("%s already exists, no need to overwrite...", img_name)
continue continue
img_radio_path = os.path.join(OPTIONS.input_tmp, "RADIO", img_name) img_radio_path = os.path.join(OPTIONS.input_tmp, "RADIO", img_name)
@@ -698,7 +699,7 @@ def AddImagesToTargetFiles(filename):
if not OPTIONS.add_missing: if not OPTIONS.add_missing:
if os.path.isdir(os.path.join(OPTIONS.input_tmp, "IMAGES")): if os.path.isdir(os.path.join(OPTIONS.input_tmp, "IMAGES")):
print("target_files appears to already contain images.") logger.warning("target_files appears to already contain images.")
sys.exit(1) sys.exit(1)
OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.input_tmp, repacking=True) OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.input_tmp, repacking=True)
@@ -748,7 +749,7 @@ def AddImagesToTargetFiles(filename):
partitions = dict() partitions = dict()
def banner(s): def banner(s):
print("\n\n++++ " + s + " ++++\n\n") logger.info("\n\n++++ " + s + " ++++\n\n")
banner("boot") banner("boot")
# common.GetBootableImage() returns the image directly if present. # common.GetBootableImage() returns the image directly if present.
@@ -912,20 +913,21 @@ def main(argv):
"is_signing"], "is_signing"],
extra_option_handler=option_handler) extra_option_handler=option_handler)
if len(args) != 1: if len(args) != 1:
common.Usage(__doc__) common.Usage(__doc__)
sys.exit(1) sys.exit(1)
common.InitLogging()
AddImagesToTargetFiles(args[0]) AddImagesToTargetFiles(args[0])
print("done.") logger.info("done.")
if __name__ == '__main__': if __name__ == '__main__':
try: try:
common.CloseInheritedPipes() common.CloseInheritedPipes()
main(sys.argv[1:]) main(sys.argv[1:])
except common.ExternalError as e: except common.ExternalError:
print("\n ERROR: %s\n" % (e,)) logger.exception("\n ERROR:\n")
sys.exit(1) sys.exit(1)
finally: finally:
common.Cleanup() common.Cleanup()

View File

@@ -19,6 +19,7 @@ import copy
import functools import functools
import heapq import heapq
import itertools import itertools
import logging
import multiprocessing import multiprocessing
import os import os
import os.path import os.path
@@ -33,6 +34,8 @@ from rangelib import RangeSet
__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"] __all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
logger = logging.getLogger(__name__)
def compute_patch(srcfile, tgtfile, imgdiff=False): def compute_patch(srcfile, tgtfile, imgdiff=False):
patchfile = common.MakeTempFile(prefix='patch-') patchfile = common.MakeTempFile(prefix='patch-')
@@ -304,8 +307,8 @@ class ImgdiffStats(object):
"""Prints a report of the collected imgdiff stats.""" """Prints a report of the collected imgdiff stats."""
def print_header(header, separator): def print_header(header, separator):
print(header) logger.info(header)
print(separator * len(header) + '\n') logger.info(separator * len(header) + '\n')
print_header(' Imgdiff Stats Report ', '=') print_header(' Imgdiff Stats Report ', '=')
for key in self.REASONS: for key in self.REASONS:
@@ -314,7 +317,7 @@ class ImgdiffStats(object):
values = self.stats[key] values = self.stats[key]
section_header = ' {} (count: {}) '.format(key, len(values)) section_header = ' {} (count: {}) '.format(key, len(values))
print_header(section_header, '-') print_header(section_header, '-')
print(''.join([' {}\n'.format(name) for name in values])) logger.info(''.join([' {}\n'.format(name) for name in values]))
class BlockImageDiff(object): class BlockImageDiff(object):
@@ -482,7 +485,7 @@ class BlockImageDiff(object):
self.WriteTransfers(prefix) self.WriteTransfers(prefix)
# Report the imgdiff stats. # Report the imgdiff stats.
if common.OPTIONS.verbose and not self.disable_imgdiff: if not self.disable_imgdiff:
self.imgdiff_stats.Report() self.imgdiff_stats.Report()
def WriteTransfers(self, prefix): def WriteTransfers(self, prefix):
@@ -692,16 +695,17 @@ class BlockImageDiff(object):
OPTIONS = common.OPTIONS OPTIONS = common.OPTIONS
if OPTIONS.cache_size is not None: if OPTIONS.cache_size is not None:
max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
print("max stashed blocks: %d (%d bytes), " logger.info(
"limit: %d bytes (%.2f%%)\n" % ( "max stashed blocks: %d (%d bytes), limit: %d bytes (%.2f%%)\n",
max_stashed_blocks, self._max_stashed_size, max_allowed, max_stashed_blocks, self._max_stashed_size, max_allowed,
self._max_stashed_size * 100.0 / max_allowed)) self._max_stashed_size * 100.0 / max_allowed)
else: else:
print("max stashed blocks: %d (%d bytes), limit: <unknown>\n" % ( logger.info(
max_stashed_blocks, self._max_stashed_size)) "max stashed blocks: %d (%d bytes), limit: <unknown>\n",
max_stashed_blocks, self._max_stashed_size)
def ReviseStashSize(self): def ReviseStashSize(self):
print("Revising stash size...") logger.info("Revising stash size...")
stash_map = {} stash_map = {}
# Create the map between a stash and its def/use points. For example, for a # Create the map between a stash and its def/use points. For example, for a
@@ -746,7 +750,7 @@ class BlockImageDiff(object):
# that will use this stash and replace the command with "new". # that will use this stash and replace the command with "new".
use_cmd = stash_map[stash_raw_id][2] use_cmd = stash_map[stash_raw_id][2]
replaced_cmds.append(use_cmd) replaced_cmds.append(use_cmd)
print("%10d %9s %s" % (sr.size(), "explicit", use_cmd)) logger.info("%10d %9s %s", sr.size(), "explicit", use_cmd)
else: else:
# Update the stashes map. # Update the stashes map.
if sh in stashes: if sh in stashes:
@@ -762,7 +766,7 @@ class BlockImageDiff(object):
if xf.src_ranges.overlaps(xf.tgt_ranges): if xf.src_ranges.overlaps(xf.tgt_ranges):
if stashed_blocks + xf.src_ranges.size() > max_allowed: if stashed_blocks + xf.src_ranges.size() > max_allowed:
replaced_cmds.append(xf) replaced_cmds.append(xf)
print("%10d %9s %s" % (xf.src_ranges.size(), "implicit", xf)) logger.info("%10d %9s %s", xf.src_ranges.size(), "implicit", xf)
# Replace the commands in replaced_cmds with "new"s. # Replace the commands in replaced_cmds with "new"s.
for cmd in replaced_cmds: for cmd in replaced_cmds:
@@ -788,28 +792,29 @@ class BlockImageDiff(object):
stashes.pop(sh) stashes.pop(sh)
num_of_bytes = new_blocks * self.tgt.blocksize num_of_bytes = new_blocks * self.tgt.blocksize
print(" Total %d blocks (%d bytes) are packed as new blocks due to " logger.info(
"insufficient cache size." % (new_blocks, num_of_bytes)) " Total %d blocks (%d bytes) are packed as new blocks due to "
"insufficient cache size.", new_blocks, num_of_bytes)
return new_blocks return new_blocks
def ComputePatches(self, prefix): def ComputePatches(self, prefix):
print("Reticulating splines...") logger.info("Reticulating splines...")
diff_queue = [] diff_queue = []
patch_num = 0 patch_num = 0
with open(prefix + ".new.dat", "wb") as new_f: with open(prefix + ".new.dat", "wb") as new_f:
for index, xf in enumerate(self.transfers): for index, xf in enumerate(self.transfers):
if xf.style == "zero": if xf.style == "zero":
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
print("%10d %10d (%6.2f%%) %7s %s %s" % ( logger.info(
tgt_size, tgt_size, 100.0, xf.style, xf.tgt_name, "%10d %10d (%6.2f%%) %7s %s %s", tgt_size, tgt_size, 100.0,
str(xf.tgt_ranges))) xf.style, xf.tgt_name, str(xf.tgt_ranges))
elif xf.style == "new": elif xf.style == "new":
self.tgt.WriteRangeDataToFd(xf.tgt_ranges, new_f) self.tgt.WriteRangeDataToFd(xf.tgt_ranges, new_f)
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
print("%10d %10d (%6.2f%%) %7s %s %s" % ( logger.info(
tgt_size, tgt_size, 100.0, xf.style, "%10d %10d (%6.2f%%) %7s %s %s", tgt_size, tgt_size, 100.0,
xf.tgt_name, str(xf.tgt_ranges))) xf.style, xf.tgt_name, str(xf.tgt_ranges))
elif xf.style == "diff": elif xf.style == "diff":
# We can't compare src and tgt directly because they may have # We can't compare src and tgt directly because they may have
@@ -827,11 +832,12 @@ class BlockImageDiff(object):
xf.patch = None xf.patch = None
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
if xf.src_ranges != xf.tgt_ranges: if xf.src_ranges != xf.tgt_ranges:
print("%10d %10d (%6.2f%%) %7s %s %s (from %s)" % ( logger.info(
tgt_size, tgt_size, 100.0, xf.style, "%10d %10d (%6.2f%%) %7s %s %s (from %s)", tgt_size, tgt_size,
100.0, xf.style,
xf.tgt_name if xf.tgt_name == xf.src_name else ( xf.tgt_name if xf.tgt_name == xf.src_name else (
xf.tgt_name + " (from " + xf.src_name + ")"), xf.tgt_name + " (from " + xf.src_name + ")"),
str(xf.tgt_ranges), str(xf.src_ranges))) str(xf.tgt_ranges), str(xf.src_ranges))
else: else:
if xf.patch: if xf.patch:
# We have already generated the patch with imgdiff, while # We have already generated the patch with imgdiff, while
@@ -850,9 +856,9 @@ class BlockImageDiff(object):
if diff_queue: if diff_queue:
if self.threads > 1: if self.threads > 1:
print("Computing patches (using %d threads)..." % (self.threads,)) logger.info("Computing patches (using %d threads)...", self.threads)
else: else:
print("Computing patches...") logger.info("Computing patches...")
diff_total = len(diff_queue) diff_total = len(diff_queue)
patches = [None] * diff_total patches = [None] * diff_total
@@ -874,13 +880,6 @@ class BlockImageDiff(object):
xf_index, imgdiff, patch_index = diff_queue.pop() xf_index, imgdiff, patch_index = diff_queue.pop()
xf = self.transfers[xf_index] xf = self.transfers[xf_index]
if sys.stdout.isatty():
diff_left = len(diff_queue)
progress = (diff_total - diff_left) * 100 / diff_total
# '\033[K' is to clear to EOL.
print(' [%3d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
sys.stdout.flush()
patch = xf.patch patch = xf.patch
if not patch: if not patch:
src_ranges = xf.src_ranges src_ranges = xf.src_ranges
@@ -918,13 +917,10 @@ class BlockImageDiff(object):
while threads: while threads:
threads.pop().join() threads.pop().join()
if sys.stdout.isatty():
print('\n')
if error_messages: if error_messages:
print('ERROR:') logger.error('ERROR:')
print('\n'.join(error_messages)) logger.error('\n'.join(error_messages))
print('\n\n\n') logger.error('\n\n\n')
sys.exit(1) sys.exit(1)
else: else:
patches = [] patches = []
@@ -938,14 +934,13 @@ class BlockImageDiff(object):
offset += xf.patch_len offset += xf.patch_len
patch_fd.write(patch) patch_fd.write(patch)
if common.OPTIONS.verbose: tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize logger.info(
print("%10d %10d (%6.2f%%) %7s %s %s %s" % ( "%10d %10d (%6.2f%%) %7s %s %s %s", xf.patch_len, tgt_size,
xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size, xf.patch_len * 100.0 / tgt_size, xf.style,
xf.style, xf.tgt_name if xf.tgt_name == xf.src_name else (
xf.tgt_name if xf.tgt_name == xf.src_name else ( xf.tgt_name + " (from " + xf.src_name + ")"),
xf.tgt_name + " (from " + xf.src_name + ")"), xf.tgt_ranges, xf.src_ranges)
xf.tgt_ranges, xf.src_ranges))
def AssertSha1Good(self): def AssertSha1Good(self):
"""Check the SHA-1 of the src & tgt blocks in the transfer list. """Check the SHA-1 of the src & tgt blocks in the transfer list.
@@ -1005,7 +1000,7 @@ class BlockImageDiff(object):
assert touched[i] == 1 assert touched[i] == 1
def ImproveVertexSequence(self): def ImproveVertexSequence(self):
print("Improving vertex order...") logger.info("Improving vertex order...")
# At this point our digraph is acyclic; we reversed any edges that # At this point our digraph is acyclic; we reversed any edges that
# were backwards in the heuristically-generated sequence. The # were backwards in the heuristically-generated sequence. The
@@ -1057,7 +1052,7 @@ class BlockImageDiff(object):
blocks will be written to the same stash slot in WriteTransfers(). blocks will be written to the same stash slot in WriteTransfers().
""" """
print("Reversing backward edges...") logger.info("Reversing backward edges...")
in_order = 0 in_order = 0
out_of_order = 0 out_of_order = 0
stash_raw_id = 0 stash_raw_id = 0
@@ -1089,15 +1084,15 @@ class BlockImageDiff(object):
xf.goes_after[u] = None # value doesn't matter xf.goes_after[u] = None # value doesn't matter
u.goes_before[xf] = None u.goes_before[xf] = None
print((" %d/%d dependencies (%.2f%%) were violated; " logger.info(
"%d source blocks stashed.") % " %d/%d dependencies (%.2f%%) were violated; %d source blocks "
(out_of_order, in_order + out_of_order, "stashed.", out_of_order, in_order + out_of_order,
(out_of_order * 100.0 / (in_order + out_of_order)) (out_of_order * 100.0 / (in_order + out_of_order)) if (
if (in_order + out_of_order) else 0.0, in_order + out_of_order) else 0.0,
stash_size)) stash_size)
def FindVertexSequence(self): def FindVertexSequence(self):
print("Finding vertex sequence...") logger.info("Finding vertex sequence...")
# This is based on "A Fast & Effective Heuristic for the Feedback # This is based on "A Fast & Effective Heuristic for the Feedback
# Arc Set Problem" by P. Eades, X. Lin, and W.F. Smyth. Think of # Arc Set Problem" by P. Eades, X. Lin, and W.F. Smyth. Think of
@@ -1210,7 +1205,7 @@ class BlockImageDiff(object):
self.transfers = new_transfers self.transfers = new_transfers
def GenerateDigraph(self): def GenerateDigraph(self):
print("Generating digraph...") logger.info("Generating digraph...")
# Each item of source_ranges will be: # Each item of source_ranges will be:
# - None, if that block is not used as a source, # - None, if that block is not used as a source,
@@ -1376,9 +1371,9 @@ class BlockImageDiff(object):
if tgt_changed < tgt_size * crop_threshold: if tgt_changed < tgt_size * crop_threshold:
assert tgt_changed + tgt_skipped.size() == tgt_size assert tgt_changed + tgt_skipped.size() == tgt_size
print('%10d %10d (%6.2f%%) %s' % ( logger.info(
tgt_skipped.size(), tgt_size, '%10d %10d (%6.2f%%) %s', tgt_skipped.size(), tgt_size,
tgt_skipped.size() * 100.0 / tgt_size, tgt_name)) tgt_skipped.size() * 100.0 / tgt_size, tgt_name)
AddSplitTransfers( AddSplitTransfers(
"%s-skipped" % (tgt_name,), "%s-skipped" % (tgt_name,),
"%s-skipped" % (src_name,), "%s-skipped" % (src_name,),
@@ -1519,7 +1514,7 @@ class BlockImageDiff(object):
split_src_ranges, split_src_ranges,
patch_content)) patch_content))
print("Finding transfers...") logger.info("Finding transfers...")
large_apks = [] large_apks = []
split_large_apks = [] split_large_apks = []

View File

@@ -26,6 +26,7 @@ Usage: build_image.py input_directory properties_file output_image \\
from __future__ import print_function from __future__ import print_function
import logging
import os import os
import os.path import os.path
import re import re
@@ -35,6 +36,8 @@ import sys
import common import common
import verity_utils import verity_utils
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS OPTIONS = common.OPTIONS
BLOCK_SIZE = common.BLOCK_SIZE BLOCK_SIZE = common.BLOCK_SIZE
BYTES_IN_MB = 1024 * 1024 BYTES_IN_MB = 1024 * 1024
@@ -228,8 +231,8 @@ def BuildImage(in_dir, prop_dict, out_file, target_out=None):
"partition_size" not in prop_dict): "partition_size" not in prop_dict):
# If partition_size is not defined, use output of `du' + reserved_size. # If partition_size is not defined, use output of `du' + reserved_size.
size = GetDiskUsage(in_dir) size = GetDiskUsage(in_dir)
if OPTIONS.verbose: logger.info(
print("The tree size of %s is %d MB." % (in_dir, size // BYTES_IN_MB)) "The tree size of %s is %d MB.", in_dir, size // BYTES_IN_MB)
size += int(prop_dict.get("partition_reserved_size", 0)) size += int(prop_dict.get("partition_reserved_size", 0))
# Round this up to a multiple of 4K so that avbtool works # Round this up to a multiple of 4K so that avbtool works
size = common.RoundUpTo4K(size) size = common.RoundUpTo4K(size)
@@ -241,8 +244,8 @@ def BuildImage(in_dir, prop_dict, out_file, target_out=None):
lambda x: verity_utils.AVBCalcMaxImageSize( lambda x: verity_utils.AVBCalcMaxImageSize(
avbtool, avb_footer_type, x, avb_signing_args)) avbtool, avb_footer_type, x, avb_signing_args))
prop_dict["partition_size"] = str(size) prop_dict["partition_size"] = str(size)
if OPTIONS.verbose: logger.info(
print("Allocating %d MB for %s." % (size // BYTES_IN_MB, out_file)) "Allocating %d MB for %s.", size // BYTES_IN_MB, out_file)
prop_dict["image_size"] = prop_dict["partition_size"] prop_dict["image_size"] = prop_dict["partition_size"]
@@ -350,8 +353,8 @@ def BuildImage(in_dir, prop_dict, out_file, target_out=None):
du_str = "{} bytes ({} MB)".format(du, du // BYTES_IN_MB) du_str = "{} bytes ({} MB)".format(du, du // BYTES_IN_MB)
# Suppress any errors from GetDiskUsage() to avoid hiding the real errors # Suppress any errors from GetDiskUsage() to avoid hiding the real errors
# from common.RunAndCheckOutput(). # from common.RunAndCheckOutput().
except Exception as e: # pylint: disable=broad-except except Exception: # pylint: disable=broad-except
print(e, file=sys.stderr) logger.exception("Failed to compute disk usage with du")
du_str = "unknown" du_str = "unknown"
print( print(
"Out of space? The tree size of {} is {}, with reserved space of {} " "Out of space? The tree size of {} is {}, with reserved space of {} "
@@ -664,6 +667,8 @@ def main(argv):
print(__doc__) print(__doc__)
sys.exit(1) sys.exit(1)
common.InitLogging()
in_dir = argv[0] in_dir = argv[0]
glob_dict_file = argv[1] glob_dict_file = argv[1]
out_file = argv[2] out_file = argv[2]
@@ -697,7 +702,7 @@ def main(argv):
elif image_filename == "product_services.img": elif image_filename == "product_services.img":
mount_point = "product_services" mount_point = "product_services"
else: else:
print("error: unknown image file name ", image_filename, file=sys.stderr) logger.error("Unknown image file name %s", image_filename)
sys.exit(1) sys.exit(1)
image_properties = ImagePropFromGlobalDict(glob_dict, mount_point) image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
@@ -705,14 +710,14 @@ def main(argv):
try: try:
BuildImage(in_dir, image_properties, out_file, target_out) BuildImage(in_dir, image_properties, out_file, target_out)
except: except:
print("Error: Failed to build {} from {}".format(out_file, in_dir), logger.error("Failed to build %s from %s", out_file, in_dir)
file=sys.stderr)
raise raise
if prop_file_out: if prop_file_out:
glob_dict_out = GlobalDictFromImageProp(image_properties, mount_point) glob_dict_out = GlobalDictFromImageProp(image_properties, mount_point)
SaveGlobalDict(prop_file_out, glob_dict_out) SaveGlobalDict(prop_file_out, glob_dict_out)
if __name__ == '__main__': if __name__ == '__main__':
try: try:
main(sys.argv[1:]) main(sys.argv[1:])

View File

@@ -21,16 +21,18 @@ Verify a given OTA package with the specifed certificate.
from __future__ import print_function from __future__ import print_function
import argparse import argparse
import logging
import re import re
import subprocess import subprocess
import sys import sys
import zipfile import zipfile
from hashlib import sha1 from hashlib import sha1
from hashlib import sha256 from hashlib import sha256
import common import common
logger = logging.getLogger(__name__)
def CertUsesSha256(cert): def CertUsesSha256(cert):
"""Check if the cert uses SHA-256 hashing algorithm.""" """Check if the cert uses SHA-256 hashing algorithm."""
@@ -181,6 +183,8 @@ def main():
parser.add_argument('package', help='The OTA package to be verified.') parser.add_argument('package', help='The OTA package to be verified.')
args = parser.parse_args() args = parser.parse_args()
common.InitLogging()
VerifyPackage(args.certificate, args.package) VerifyPackage(args.certificate, args.package)
VerifyAbOtaPayload(args.certificate, args.package) VerifyAbOtaPayload(args.certificate, args.package)

View File

@@ -20,6 +20,9 @@ import getopt
import getpass import getpass
import gzip import gzip
import imp import imp
import json
import logging
import logging.config
import os import os
import platform import platform
import re import re
@@ -37,6 +40,8 @@ from hashlib import sha1, sha256
import blockimgdiff import blockimgdiff
import sparse_img import sparse_img
logger = logging.getLogger(__name__)
class Options(object): class Options(object):
def __init__(self): def __init__(self):
@@ -121,13 +126,53 @@ class ExternalError(RuntimeError):
pass pass
def InitLogging():
DEFAULT_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format':
'%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'default': {
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'WARNING',
'propagate': True,
}
}
}
env_config = os.getenv('LOGGING_CONFIG')
if env_config:
with open(env_config) as f:
config = json.load(f)
else:
config = DEFAULT_LOGGING_CONFIG
# Increase the logging level for verbose mode.
if OPTIONS.verbose:
config = copy.deepcopy(DEFAULT_LOGGING_CONFIG)
config['loggers']['']['level'] = 'INFO'
logging.config.dictConfig(config)
def Run(args, verbose=None, **kwargs): def Run(args, verbose=None, **kwargs):
"""Creates and returns a subprocess.Popen object. """Creates and returns a subprocess.Popen object.
Args: Args:
args: The command represented as a list of strings. args: The command represented as a list of strings.
verbose: Whether the commands should be shown (default to OPTIONS.verbose verbose: Whether the commands should be shown. Default to the global
if unspecified). verbosity if unspecified.
kwargs: Any additional args to be passed to subprocess.Popen(), such as env, kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
stdin, etc. stdout and stderr will default to subprocess.PIPE and stdin, etc. stdout and stderr will default to subprocess.PIPE and
subprocess.STDOUT respectively unless caller specifies any of them. subprocess.STDOUT respectively unless caller specifies any of them.
@@ -135,13 +180,12 @@ def Run(args, verbose=None, **kwargs):
Returns: Returns:
A subprocess.Popen object. A subprocess.Popen object.
""" """
if verbose is None:
verbose = OPTIONS.verbose
if 'stdout' not in kwargs and 'stderr' not in kwargs: if 'stdout' not in kwargs and 'stderr' not in kwargs:
kwargs['stdout'] = subprocess.PIPE kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT kwargs['stderr'] = subprocess.STDOUT
if verbose: # Don't log any if caller explicitly says so.
print(" Running: \"{}\"".format(" ".join(args))) if verbose != False:
logger.info(" Running: \"%s\"", " ".join(args))
return subprocess.Popen(args, **kwargs) return subprocess.Popen(args, **kwargs)
@@ -150,8 +194,8 @@ def RunAndCheckOutput(args, verbose=None, **kwargs):
Args: Args:
args: The command represented as a list of strings. args: The command represented as a list of strings.
verbose: Whether the commands should be shown (default to OPTIONS.verbose verbose: Whether the commands should be shown. Default to the global
if unspecified). verbosity if unspecified.
kwargs: Any additional args to be passed to subprocess.Popen(), such as env, kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
stdin, etc. stdout and stderr will default to subprocess.PIPE and stdin, etc. stdout and stderr will default to subprocess.PIPE and
subprocess.STDOUT respectively unless caller specifies any of them. subprocess.STDOUT respectively unless caller specifies any of them.
@@ -162,12 +206,11 @@ def RunAndCheckOutput(args, verbose=None, **kwargs):
Raises: Raises:
ExternalError: On non-zero exit from the command. ExternalError: On non-zero exit from the command.
""" """
if verbose is None:
verbose = OPTIONS.verbose
proc = Run(args, verbose=verbose, **kwargs) proc = Run(args, verbose=verbose, **kwargs)
output, _ = proc.communicate() output, _ = proc.communicate()
if verbose: # Don't log any if caller explicitly says so.
print("{}".format(output.rstrip())) if verbose != False:
logger.info("%s", output.rstrip())
if proc.returncode != 0: if proc.returncode != 0:
raise ExternalError( raise ExternalError(
"Failed to run command '{}' (exit code {}):\n{}".format( "Failed to run command '{}' (exit code {}):\n{}".format(
@@ -277,8 +320,8 @@ def LoadInfoDict(input_file, repacking=False):
if os.path.exists(system_base_fs_file): if os.path.exists(system_base_fs_file):
d["system_base_fs_file"] = system_base_fs_file d["system_base_fs_file"] = system_base_fs_file
else: else:
print("Warning: failed to find system base fs file: %s" % ( logger.warning(
system_base_fs_file,)) "Failed to find system base fs file: %s", system_base_fs_file)
del d["system_base_fs_file"] del d["system_base_fs_file"]
if "vendor_base_fs_file" in d: if "vendor_base_fs_file" in d:
@@ -287,8 +330,8 @@ def LoadInfoDict(input_file, repacking=False):
if os.path.exists(vendor_base_fs_file): if os.path.exists(vendor_base_fs_file):
d["vendor_base_fs_file"] = vendor_base_fs_file d["vendor_base_fs_file"] = vendor_base_fs_file
else: else:
print("Warning: failed to find vendor base fs file: %s" % ( logger.warning(
vendor_base_fs_file,)) "Failed to find vendor base fs file: %s", vendor_base_fs_file)
del d["vendor_base_fs_file"] del d["vendor_base_fs_file"]
def makeint(key): def makeint(key):
@@ -364,7 +407,7 @@ def LoadBuildProp(read_helper, prop_file):
try: try:
data = read_helper(prop_file) data = read_helper(prop_file)
except KeyError: except KeyError:
print("Warning: could not read %s" % (prop_file,)) logger.warning("Failed to read %s", prop_file)
data = "" data = ""
return LoadDictionaryFromLines(data.split("\n")) return LoadDictionaryFromLines(data.split("\n"))
@@ -394,7 +437,7 @@ def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
try: try:
data = read_helper(recovery_fstab_path) data = read_helper(recovery_fstab_path)
except KeyError: except KeyError:
print("Warning: could not find {}".format(recovery_fstab_path)) logger.warning("Failed to find %s", recovery_fstab_path)
data = "" data = ""
assert fstab_version == 2 assert fstab_version == 2
@@ -447,7 +490,7 @@ def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
def DumpInfoDict(d): def DumpInfoDict(d):
for k, v in sorted(d.items()): for k, v in sorted(d.items()):
print("%-25s = (%s) %s" % (k, type(v).__name__, v)) logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
def AppendAVBSigningArgs(cmd, partition): def AppendAVBSigningArgs(cmd, partition):
@@ -657,15 +700,15 @@ def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name) prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path): if os.path.exists(prebuilt_path):
print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)) logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
return File.FromLocalFile(name, prebuilt_path) return File.FromLocalFile(name, prebuilt_path)
prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name) prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path): if os.path.exists(prebuilt_path):
print("using prebuilt %s from IMAGES..." % (prebuilt_name,)) logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
return File.FromLocalFile(name, prebuilt_path) return File.FromLocalFile(name, prebuilt_path)
print("building image from target_files %s..." % (tree_subdir,)) logger.info("building image from target_files %s...", tree_subdir)
if info_dict is None: if info_dict is None:
info_dict = OPTIONS.info_dict info_dict = OPTIONS.info_dict
@@ -1001,9 +1044,9 @@ def CheckSize(data, target, info_dict):
if pct >= 99.0: if pct >= 99.0:
raise ExternalError(msg) raise ExternalError(msg)
elif pct >= 95.0: elif pct >= 95.0:
print("\n WARNING: %s\n" % (msg,)) logger.warning("\n WARNING: %s\n", msg)
elif OPTIONS.verbose: else:
print(" ", msg) logger.info(" %s", msg)
def ReadApkCerts(tf_zip): def ReadApkCerts(tf_zip):
@@ -1302,13 +1345,13 @@ class PasswordManager(object):
continue continue
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line) m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
if not m: if not m:
print("failed to parse password file: ", line) logger.warning("Failed to parse password file: %s", line)
else: else:
result[m.group(2)] = m.group(1) result[m.group(2)] = m.group(1)
f.close() f.close()
except IOError as e: except IOError as e:
if e.errno != errno.ENOENT: if e.errno != errno.ENOENT:
print("error reading password file: ", str(e)) logger.exception("Error reading password file:")
return result return result
@@ -1452,10 +1495,10 @@ class DeviceSpecificParams(object):
if x == ".py": if x == ".py":
f = b f = b
info = imp.find_module(f, [d]) info = imp.find_module(f, [d])
print("loaded device-specific extensions from", path) logger.info("loaded device-specific extensions from %s", path)
self.module = imp.load_module("device_specific", *info) self.module = imp.load_module("device_specific", *info)
except ImportError: except ImportError:
print("unable to load device-specific module; assuming none") logger.info("unable to load device-specific module; assuming none")
def _DoCall(self, function_name, *args, **kwargs): def _DoCall(self, function_name, *args, **kwargs):
"""Call the named function in the device-specific module, passing """Call the named function in the device-specific module, passing
@@ -1597,7 +1640,7 @@ class Difference(object):
th.start() th.start()
th.join(timeout=300) # 5 mins th.join(timeout=300) # 5 mins
if th.is_alive(): if th.is_alive():
print("WARNING: diff command timed out") logger.warning("diff command timed out")
p.terminate() p.terminate()
th.join(5) th.join(5)
if th.is_alive(): if th.is_alive():
@@ -1605,8 +1648,7 @@ class Difference(object):
th.join() th.join()
if p.returncode != 0: if p.returncode != 0:
print("WARNING: failure running %s:\n%s\n" % ( logger.warning("Failure running %s:\n%s\n", diff_program, "".join(err))
diff_program, "".join(err)))
self.patch = None self.patch = None
return None, None, None return None, None, None
diff = ptemp.read() diff = ptemp.read()
@@ -1630,7 +1672,7 @@ class Difference(object):
def ComputeDifferences(diffs): def ComputeDifferences(diffs):
"""Call ComputePatch on all the Difference objects in 'diffs'.""" """Call ComputePatch on all the Difference objects in 'diffs'."""
print(len(diffs), "diffs to compute") logger.info("%d diffs to compute", len(diffs))
# Do the largest files first, to try and reduce the long-pole effect. # Do the largest files first, to try and reduce the long-pole effect.
by_size = [(i.tf.size, i) for i in diffs] by_size = [(i.tf.size, i) for i in diffs]
@@ -1656,14 +1698,14 @@ def ComputeDifferences(diffs):
else: else:
name = "%s (%s)" % (tf.name, sf.name) name = "%s (%s)" % (tf.name, sf.name)
if patch is None: if patch is None:
print( logger.error("patching failed! %40s", name)
"patching failed! %s" % (name,))
else: else:
print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % ( logger.info(
dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)) "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
tf.size, 100.0 * len(patch) / tf.size, name)
lock.release() lock.release()
except Exception as e: except Exception:
print(e) logger.exception("Failed to compute diff from worker")
raise raise
# start worker threads; wait for them all to finish. # start worker threads; wait for them all to finish.
@@ -2086,6 +2128,6 @@ fi
# in the L release. # in the L release.
sh_location = "bin/install-recovery.sh" sh_location = "bin/install-recovery.sh"
print("putting script in", sh_location) logger.info("putting script in %s", sh_location)
output_sink(sh_location, sh) output_sink(sh_location, sh)

View File

@@ -28,6 +28,7 @@ Usage: img_from_target_files [flags] input_target_files output_image_zip
from __future__ import print_function from __future__ import print_function
import logging
import os import os
import shutil import shutil
import sys import sys
@@ -39,6 +40,7 @@ if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr) print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1) sys.exit(1)
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS OPTIONS = common.OPTIONS
@@ -72,6 +74,8 @@ def main(argv):
common.Usage(__doc__) common.Usage(__doc__)
sys.exit(1) sys.exit(1)
common.InitLogging()
OPTIONS.input_tmp = common.UnzipTemp(args[0], ["IMAGES/*", "OTA/*"]) OPTIONS.input_tmp = common.UnzipTemp(args[0], ["IMAGES/*", "OTA/*"])
output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED) output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED)
CopyInfo(output_zip) CopyInfo(output_zip)
@@ -90,11 +94,11 @@ def main(argv):
common.ZipWrite(output_zip, os.path.join(images_path, image), image) common.ZipWrite(output_zip, os.path.join(images_path, image), image)
finally: finally:
print("cleaning up...") logger.info("cleaning up...")
common.ZipClose(output_zip) common.ZipClose(output_zip)
shutil.rmtree(OPTIONS.input_tmp) shutil.rmtree(OPTIONS.input_tmp)
print("done.") logger.info("done.")
if __name__ == '__main__': if __name__ == '__main__':
@@ -102,5 +106,5 @@ if __name__ == '__main__':
common.CloseInheritedPipes() common.CloseInheritedPipes()
main(sys.argv[1:]) main(sys.argv[1:])
except common.ExternalError as e: except common.ExternalError as e:
print("\n ERROR: %s\n" % (e,)) logger.exception("\n ERROR:\n")
sys.exit(1) sys.exit(1)

View File

@@ -16,24 +16,27 @@
from __future__ import print_function from __future__ import print_function
import logging
import os
import sys import sys
import common
if sys.hexversion < 0x02070000: if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr) print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1) sys.exit(1)
import os logger = logging.getLogger(__name__)
import common
OPTIONS = common.OPTIONS OPTIONS = common.OPTIONS
def main(argv):
# def option_handler(o, a):
# return False
def main(argv):
args = common.ParseOptions(argv, __doc__) args = common.ParseOptions(argv, __doc__)
input_dir, output_dir = args input_dir, output_dir = args
common.InitLogging()
OPTIONS.info_dict = common.LoadInfoDict(input_dir) OPTIONS.info_dict = common.LoadInfoDict(input_dir)
recovery_img = common.GetBootableImage("recovery.img", "recovery.img", recovery_img = common.GetBootableImage("recovery.img", "recovery.img",

View File

@@ -164,6 +164,7 @@ A/B OTA specific options
from __future__ import print_function from __future__ import print_function
import logging
import multiprocessing import multiprocessing
import os.path import os.path
import shlex import shlex
@@ -181,6 +182,7 @@ if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr) print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1) sys.exit(1)
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS OPTIONS = common.OPTIONS
OPTIONS.package_key = None OPTIONS.package_key = None
@@ -576,11 +578,11 @@ def _WriteRecoveryImageToBoot(script, output_zip):
OPTIONS.input_tmp, "RECOVERY") OPTIONS.input_tmp, "RECOVERY")
common.ZipWriteStr( common.ZipWriteStr(
output_zip, recovery_two_step_img_name, recovery_two_step_img.data) output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
print("two-step package: using %s in stage 1/3" % ( logger.info(
recovery_two_step_img_name,)) "two-step package: using %s in stage 1/3", recovery_two_step_img_name)
script.WriteRawImage("/boot", recovery_two_step_img_name) script.WriteRawImage("/boot", recovery_two_step_img_name)
else: else:
print("two-step package: using recovery.img in stage 1/3") logger.info("two-step package: using recovery.img in stage 1/3")
# The "recovery.img" entry has been written into package earlier. # The "recovery.img" entry has been written into package earlier.
script.WriteRawImage("/boot", "recovery.img") script.WriteRawImage("/boot", "recovery.img")
@@ -1344,8 +1346,8 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_file):
target_api_version = target_info["recovery_api_version"] target_api_version = target_info["recovery_api_version"]
source_api_version = source_info["recovery_api_version"] source_api_version = source_info["recovery_api_version"]
if source_api_version == 0: if source_api_version == 0:
print("WARNING: generating edify script for a source that " logger.warning(
"can't install it.") "Generating edify script for a source that can't install it.")
script = edify_generator.EdifyGenerator( script = edify_generator.EdifyGenerator(
source_api_version, target_info, fstab=source_info["fstab"]) source_api_version, target_info, fstab=source_info["fstab"])
@@ -1523,8 +1525,9 @@ else if get_stage("%(bcb_dev)s") != "3/3" then
else: else:
include_full_boot = False include_full_boot = False
print("boot target: %d source: %d diff: %d" % ( logger.info(
target_boot.size, source_boot.size, len(d))) "boot target: %d source: %d diff: %d", target_boot.size,
source_boot.size, len(d))
common.ZipWriteStr(output_zip, "boot.img.p", d) common.ZipWriteStr(output_zip, "boot.img.p", d)
@@ -1574,19 +1577,19 @@ else
if OPTIONS.two_step: if OPTIONS.two_step:
common.ZipWriteStr(output_zip, "boot.img", target_boot.data) common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
script.WriteRawImage("/boot", "boot.img") script.WriteRawImage("/boot", "boot.img")
print("writing full boot image (forced by two-step mode)") logger.info("writing full boot image (forced by two-step mode)")
if not OPTIONS.two_step: if not OPTIONS.two_step:
if updating_boot: if updating_boot:
if include_full_boot: if include_full_boot:
print("boot image changed; including full.") logger.info("boot image changed; including full.")
script.Print("Installing boot image...") script.Print("Installing boot image...")
script.WriteRawImage("/boot", "boot.img") script.WriteRawImage("/boot", "boot.img")
else: else:
# Produce the boot image by applying a patch to the current # Produce the boot image by applying a patch to the current
# contents of the boot partition, and write it back to the # contents of the boot partition, and write it back to the
# partition. # partition.
print("boot image changed; including patch.") logger.info("boot image changed; including patch.")
script.Print("Patching boot image...") script.Print("Patching boot image...")
script.ShowProgress(0.1, 10) script.ShowProgress(0.1, 10)
script.PatchPartition( script.PatchPartition(
@@ -1596,7 +1599,7 @@ else
boot_type, boot_device, source_boot.size, source_boot.sha1), boot_type, boot_device, source_boot.size, source_boot.sha1),
'boot.img.p') 'boot.img.p')
else: else:
print("boot image unchanged; skipping.") logger.info("boot image unchanged; skipping.")
# Do device-specific installation (eg, write radio image). # Do device-specific installation (eg, write radio image).
device_specific.IncrementalOTA_InstallEnd() device_specific.IncrementalOTA_InstallEnd()
@@ -1787,7 +1790,7 @@ def WriteABOTAPackageWithBrilloScript(target_file, output_file,
common.ZipWriteStr(output_zip, care_map_name, care_map_data, common.ZipWriteStr(output_zip, care_map_name, care_map_data,
compress_type=zipfile.ZIP_STORED) compress_type=zipfile.ZIP_STORED)
else: else:
print("Warning: cannot find care map file in target_file package") logger.warning("Cannot find care map file in target_file package")
AddCompatibilityArchiveIfTrebleEnabled( AddCompatibilityArchiveIfTrebleEnabled(
target_zip, output_zip, target_info, source_info) target_zip, output_zip, target_info, source_info)
@@ -1903,6 +1906,8 @@ def main(argv):
common.Usage(__doc__) common.Usage(__doc__)
sys.exit(1) sys.exit(1)
common.InitLogging()
if OPTIONS.downgrade: if OPTIONS.downgrade:
# We should only allow downgrading incrementals (as opposed to full). # We should only allow downgrading incrementals (as opposed to full).
# Otherwise the device may go back from arbitrary build with this full # Otherwise the device may go back from arbitrary build with this full
@@ -1923,9 +1928,8 @@ def main(argv):
with zipfile.ZipFile(args[0], 'r') as input_zip: with zipfile.ZipFile(args[0], 'r') as input_zip:
OPTIONS.info_dict = common.LoadInfoDict(input_zip) OPTIONS.info_dict = common.LoadInfoDict(input_zip)
if OPTIONS.verbose: logger.info("--- target info ---")
print("--- target info ---") common.DumpInfoDict(OPTIONS.info_dict)
common.DumpInfoDict(OPTIONS.info_dict)
# Load the source build dict if applicable. # Load the source build dict if applicable.
if OPTIONS.incremental_source is not None: if OPTIONS.incremental_source is not None:
@@ -1933,9 +1937,8 @@ def main(argv):
with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip: with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
OPTIONS.source_info_dict = common.LoadInfoDict(source_zip) OPTIONS.source_info_dict = common.LoadInfoDict(source_zip)
if OPTIONS.verbose: logger.info("--- source info ---")
print("--- source info ---") common.DumpInfoDict(OPTIONS.source_info_dict)
common.DumpInfoDict(OPTIONS.source_info_dict)
# Load OEM dicts if provided. # Load OEM dicts if provided.
OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source) OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source)
@@ -1959,7 +1962,7 @@ def main(argv):
output_file=args[1], output_file=args[1],
source_file=OPTIONS.incremental_source) source_file=OPTIONS.incremental_source)
print("done.") logger.info("done.")
return return
# Sanity check the loaded info dicts first. # Sanity check the loaded info dicts first.
@@ -1970,7 +1973,7 @@ def main(argv):
# Non-A/B OTAs rely on /cache partition to store temporary files. # Non-A/B OTAs rely on /cache partition to store temporary files.
cache_size = OPTIONS.info_dict.get("cache_size") cache_size = OPTIONS.info_dict.get("cache_size")
if cache_size is None: if cache_size is None:
print("--- can't determine the cache partition size ---") logger.warning("--- can't determine the cache partition size ---")
OPTIONS.cache_size = cache_size OPTIONS.cache_size = cache_size
if OPTIONS.extra_script is not None: if OPTIONS.extra_script is not None:
@@ -1979,7 +1982,7 @@ def main(argv):
if OPTIONS.extracted_input is not None: if OPTIONS.extracted_input is not None:
OPTIONS.input_tmp = OPTIONS.extracted_input OPTIONS.input_tmp = OPTIONS.extracted_input
else: else:
print("unzipping target target-files...") logger.info("unzipping target target-files...")
OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN) OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN)
OPTIONS.target_tmp = OPTIONS.input_tmp OPTIONS.target_tmp = OPTIONS.input_tmp
@@ -1991,7 +1994,7 @@ def main(argv):
if OPTIONS.device_specific is None: if OPTIONS.device_specific is None:
from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py") from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
if os.path.exists(from_input): if os.path.exists(from_input):
print("(using device-specific extensions from target_files)") logger.info("(using device-specific extensions from target_files)")
OPTIONS.device_specific = from_input OPTIONS.device_specific = from_input
else: else:
OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions") OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
@@ -2008,7 +2011,7 @@ def main(argv):
# Generate an incremental OTA. # Generate an incremental OTA.
else: else:
print("unzipping source target-files...") logger.info("unzipping source target-files...")
OPTIONS.source_tmp = common.UnzipTemp( OPTIONS.source_tmp = common.UnzipTemp(
OPTIONS.incremental_source, UNZIP_PATTERN) OPTIONS.incremental_source, UNZIP_PATTERN)
with zipfile.ZipFile(args[0], 'r') as input_zip, \ with zipfile.ZipFile(args[0], 'r') as input_zip, \
@@ -2024,15 +2027,15 @@ def main(argv):
target_files_diff.recursiveDiff( target_files_diff.recursiveDiff(
'', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file) '', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file)
print("done.") logger.info("done.")
if __name__ == '__main__': if __name__ == '__main__':
try: try:
common.CloseInheritedPipes() common.CloseInheritedPipes()
main(sys.argv[1:]) main(sys.argv[1:])
except common.ExternalError as e: except common.ExternalError:
print("\n ERROR: %s\n" % (e,)) logger.exception("\n ERROR:\n")
sys.exit(1) sys.exit(1)
finally: finally:
common.Cleanup() common.Cleanup()

View File

@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import bisect import bisect
import logging
import os import os
import struct import struct
import threading import threading
@@ -20,6 +21,8 @@ from hashlib import sha1
import rangelib import rangelib
logger = logging.getLogger(__name__)
class SparseImage(object): class SparseImage(object):
"""Wraps a sparse image file into an image object. """Wraps a sparse image file into an image object.
@@ -61,8 +64,9 @@ class SparseImage(object):
raise ValueError("Chunk header size was expected to be 12, but is %u." % raise ValueError("Chunk header size was expected to be 12, but is %u." %
(chunk_hdr_sz,)) (chunk_hdr_sz,))
print("Total of %u %u-byte output blocks in %u input chunks." logger.info(
% (total_blks, blk_sz, total_chunks)) "Total of %u %u-byte output blocks in %u input chunks.", total_blks,
blk_sz, total_chunks)
if not build_map: if not build_map:
assert not hashtree_info_generator, \ assert not hashtree_info_generator, \

View File

@@ -16,6 +16,7 @@
from __future__ import print_function from __future__ import print_function
import logging
import os.path import os.path
import shlex import shlex
import struct import struct
@@ -24,6 +25,8 @@ import common
import sparse_img import sparse_img
from rangelib import RangeSet from rangelib import RangeSet
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS OPTIONS = common.OPTIONS
BLOCK_SIZE = common.BLOCK_SIZE BLOCK_SIZE = common.BLOCK_SIZE
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7" FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
@@ -71,7 +74,7 @@ def GetSimgSize(image_file):
def ZeroPadSimg(image_file, pad_size): def ZeroPadSimg(image_file, pad_size):
blocks = pad_size // BLOCK_SIZE blocks = pad_size // BLOCK_SIZE
print("Padding %d blocks (%d bytes)" % (blocks, pad_size)) logger.info("Padding %d blocks (%d bytes)", blocks, pad_size)
simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False) simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
simg.AppendFillChunk(0, blocks) simg.AppendFillChunk(0, blocks)
@@ -114,9 +117,9 @@ def AdjustPartitionSizeForVerity(partition_size, fec_supported):
else: else:
hi = i hi = i
if OPTIONS.verbose: logger.info(
print("Adjusted partition size for verity, partition_size: {}," "Adjusted partition size for verity, partition_size: %s, verity_size: %s",
" verity_size: {}".format(result, verity_size)) result, verity_size)
AdjustPartitionSizeForVerity.results[key] = (result, verity_size) AdjustPartitionSizeForVerity.results[key] = (result, verity_size)
return (result, verity_size) return (result, verity_size)
@@ -326,9 +329,9 @@ def AVBCalcMinPartitionSize(image_size, size_calculator):
else: else:
lo = mid + BLOCK_SIZE lo = mid + BLOCK_SIZE
if OPTIONS.verbose: logger.info(
print("AVBCalcMinPartitionSize({}): partition_size: {}.".format( "AVBCalcMinPartitionSize(%d): partition_size: %d.",
image_size, partition_size)) image_size, partition_size)
return partition_size return partition_size
@@ -514,9 +517,9 @@ class VerifiedBootVersion1HashtreeInfoGenerator(HashtreeInfoGenerator):
salt, self.hashtree_info.salt) salt, self.hashtree_info.salt)
if root_hash != self.hashtree_info.root_hash: if root_hash != self.hashtree_info.root_hash:
print( logger.warning(
"Calculated root hash {} doesn't match the one in metadata {}".format( "Calculated root hash %s doesn't match the one in metadata %s",
root_hash, self.hashtree_info.root_hash)) root_hash, self.hashtree_info.root_hash)
return False return False
# Reads the generated hash tree and checks if it has the exact same bytes # Reads the generated hash tree and checks if it has the exact same bytes