Merge "full support for OTA of vendor partitions"

This commit is contained in:
Doug Zongker
2014-06-17 15:07:15 +00:00
committed by Android (Google) Code Review
5 changed files with 386 additions and 272 deletions

View File

@@ -1335,8 +1335,10 @@ endif
$(hide) ./build/tools/releasetools/make_recovery_patch $(zip_root) $(zip_root)
@# Zip everything up, preserving symlinks
$(hide) (cd $(zip_root) && zip -qry ../$(notdir $@) .)
@# Run fs_config on all the system, boot ramdisk, and recovery ramdisk files in the zip, and save the output
@# Run fs_config on all the system, vendor, boot ramdisk,
@# and recovery ramdisk files in the zip, and save the output
$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="SYSTEM/" } /^SYSTEM\// {print "system/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -S $(SELINUX_FC) > $(zip_root)/META/filesystem_config.txt
$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="VENDOR/" } /^VENDOR\// {print "vendor/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -S $(SELINUX_FC) > $(zip_root)/META/vendor_filesystem_config.txt
$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="BOOT/RAMDISK/" } /^BOOT\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -S $(SELINUX_FC) > $(zip_root)/META/boot_filesystem_config.txt
$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="RECOVERY/RAMDISK/" } /^RECOVERY\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -S $(SELINUX_FC) > $(zip_root)/META/recovery_filesystem_config.txt
$(hide) (cd $(zip_root) && zip -q ../$(notdir $@) META/*filesystem_config.txt)

View File

@@ -1008,14 +1008,14 @@ def XZ(path):
p.communicate()
assert p.returncode == 0, "Couldn't compress patch"
def MakeSystemPatch(source_file, target_file):
def MakePartitionPatch(source_file, target_file, partition):
with tempfile.NamedTemporaryFile() as output_file:
XDelta3(source_file.name, target_file.name, output_file.name)
XZ(output_file.name)
with open(output_file.name + ".xz") as patch_file:
patch_data = patch_file.read()
os.unlink(patch_file.name)
return File("system.muimg.p", patch_data)
return File(partition + ".muimg.p", patch_data)
def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
info_dict=None):

View File

@@ -203,11 +203,10 @@ class EdifyGenerator(object):
p.device, p.length, p.mount_point))
def WipeBlockDevice(self, partition):
if partition != "/system":
raise ValueError(("WipeBlockDevice currently only works "
"on /system, not %s\n") % (partition,))
if partition not in ("/system", "/vendor"):
raise ValueError(("WipeBlockDevice doesn't work on %s\n") % (partition,))
fstab = self.info.get("fstab", None)
size = self.info.get("system_size", None)
size = self.info.get(partition.lstrip("/") + "_size", None)
device = fstab[partition].device
self.script.append('wipe_block_device("%s", %s);' % (device, size))

View File

@@ -59,9 +59,21 @@ def AddSystem(output_zip, sparse=True):
data = BuildSystem(OPTIONS.input_tmp, OPTIONS.info_dict, sparse=sparse)
common.ZipWriteStr(output_zip, "system.img", data)
def BuildSystem(input_dir, info_dict, sparse=True, map_file=None):
print "creating system.img..."
return CreateImage(input_dir, info_dict, "system",
sparse=sparse, map_file=map_file)
def AddVendor(output_zip, sparse=True):
data = BuildVendor(OPTIONS.input_tmp, OPTIONS.info_dict, sparse=sparse)
common.ZipWriteStr(output_zip, "vendor.img", data)
def BuildVendor(input_dir, info_dict, sparse=True, map_file=None):
return CreateImage(input_dir, info_dict, "vendor",
sparse=sparse, map_file=map_file)
def CreateImage(input_dir, info_dict, what, sparse=True, map_file=None):
print "creating " + what + ".img..."
img = tempfile.NamedTemporaryFile()
@@ -69,8 +81,8 @@ def BuildSystem(input_dir, info_dict, sparse=True, map_file=None):
# mkyaffs2image. It wants "system" but we have a directory named
# "SYSTEM", so create a symlink.
try:
os.symlink(os.path.join(input_dir, "SYSTEM"),
os.path.join(input_dir, "system"))
os.symlink(os.path.join(input_dir, what.upper()),
os.path.join(input_dir, what))
except OSError, e:
# bogus error on my mac version?
# File "./build/tools/releasetools/img_from_target_files", line 86, in AddSystem
@@ -79,22 +91,28 @@ def BuildSystem(input_dir, info_dict, sparse=True, map_file=None):
if (e.errno == errno.EEXIST):
pass
image_props = build_image.ImagePropFromGlobalDict(info_dict, "system")
image_props = build_image.ImagePropFromGlobalDict(info_dict, what)
fstab = info_dict["fstab"]
if fstab:
image_props["fs_type" ] = fstab["/system"].fs_type
image_props["fs_type" ] = fstab["/" + what].fs_type
fs_config = os.path.join(input_dir, "META/filesystem_config.txt")
if what == "system":
fs_config_prefix = ""
else:
fs_config_prefix = what + "_"
fs_config = os.path.join(
input_dir, "META/" + fs_config_prefix + "filesystem_config.txt")
if not os.path.exists(fs_config): fs_config = None
fc_config = os.path.join(input_dir, "BOOT/RAMDISK/file_contexts")
if not os.path.exists(fc_config): fc_config = None
succ = build_image.BuildImage(os.path.join(input_dir, "system"),
succ = build_image.BuildImage(os.path.join(input_dir, what),
image_props, img.name,
fs_config=fs_config,
fc_config=fc_config)
assert succ, "build system.img image failed"
assert succ, "build " + what + ".img image failed"
mapdata = None
@@ -104,7 +122,7 @@ def BuildSystem(input_dir, info_dict, sparse=True, map_file=None):
else:
success, name = build_image.UnsparseImage(img.name, replace=False)
if not success:
assert False, "unsparsing system.img failed"
assert False, "unsparsing " + what + ".img failed"
if map_file:
mmap = tempfile.NamedTemporaryFile()
@@ -131,45 +149,6 @@ def BuildSystem(input_dir, info_dict, sparse=True, map_file=None):
return mapdata, data
def AddVendor(output_zip):
"""Turn the contents of VENDOR into vendor.img and store it in
output_zip."""
image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict,
"vendor")
# The build system has to explicitly request for vendor.img.
if "fs_type" not in image_props:
return
print "creating vendor.img..."
img = tempfile.NamedTemporaryFile()
# The name of the directory it is making an image out of matters to
# mkyaffs2image. It wants "vendor" but we have a directory named
# "VENDOR", so create a symlink or an empty directory if VENDOR does not
# exist.
if not os.path.exists(os.path.join(OPTIONS.input_tmp, "vendor")):
if os.path.exists(os.path.join(OPTIONS.input_tmp, "VENDOR")):
os.symlink(os.path.join(OPTIONS.input_tmp, "VENDOR"),
os.path.join(OPTIONS.input_tmp, "vendor"))
else:
os.mkdir(os.path.join(OPTIONS.input_tmp, "vendor"))
img = tempfile.NamedTemporaryFile()
fstab = OPTIONS.info_dict["fstab"]
if fstab:
image_props["fs_type" ] = fstab["/vendor"].fs_type
succ = build_image.BuildImage(os.path.join(OPTIONS.input_tmp, "vendor"),
image_props, img.name)
assert succ, "build vendor.img image failed"
common.CheckSize(img.name, "vendor.img", OPTIONS.info_dict)
output_zip.write(img.name, "vendor.img")
img.close()
def AddUserdata(output_zip):
"""Create an empty userdata image and store it in output_zip."""
@@ -287,10 +266,21 @@ def main(argv):
if recovery_image:
recovery_image.AddToZip(output_zip)
def banner(s):
print "\n\n++++ " + s + " ++++\n\n"
if not bootable_only:
banner("AddSystem")
AddSystem(output_zip)
AddVendor(output_zip)
try:
input_zip.getinfo("VENDOR/")
banner("AddVendor")
AddVendor(output_zip)
except KeyError:
pass # no vendor partition for this device
banner("AddUserdata")
AddUserdata(output_zip)
banner("AddCache")
AddCache(output_zip)
CopyInfo(output_zip)

View File

@@ -159,50 +159,21 @@ def ClosestFileMatch(src, tgtfiles, existing):
return result
return None
class Item:
"""Items represent the metadata (user, group, mode) of files and
directories in the system image."""
ITEMS = {}
def __init__(self, name, dir=False):
self.name = name
self.uid = None
self.gid = None
self.mode = None
self.selabel = None
self.capabilities = None
self.dir = dir
class ItemSet:
def __init__(self, partition, fs_config):
self.partition = partition
self.fs_config = fs_config
self.ITEMS = {}
if name:
self.parent = Item.Get(os.path.dirname(name), dir=True)
self.parent.children.append(self)
else:
self.parent = None
if dir:
self.children = []
def Dump(self, indent=0):
if self.uid is not None:
print "%s%s %d %d %o" % (" "*indent, self.name, self.uid, self.gid, self.mode)
else:
print "%s%s %s %s %s" % (" "*indent, self.name, self.uid, self.gid, self.mode)
if self.dir:
print "%s%s" % (" "*indent, self.descendants)
print "%s%s" % (" "*indent, self.best_subtree)
for i in self.children:
i.Dump(indent=indent+1)
@classmethod
def Get(cls, name, dir=False):
if name not in cls.ITEMS:
cls.ITEMS[name] = Item(name, dir=dir)
return cls.ITEMS[name]
@classmethod
def GetMetadata(cls, input_zip):
def Get(self, name, dir=False):
if name not in self.ITEMS:
self.ITEMS[name] = Item(self, name, dir=dir)
return self.ITEMS[name]
def GetMetadata(self, input_zip):
# The target_files contains a record of what the uid,
# gid, and mode are supposed to be.
output = input_zip.read("META/filesystem_config.txt")
output = input_zip.read(self.fs_config)
for line in output.split("\n"):
if not line: continue
@@ -220,7 +191,7 @@ class Item:
if key == "capabilities":
capabilities = value
i = cls.ITEMS.get(name, None)
i = self.ITEMS.get(name, None)
if i is not None:
i.uid = int(uid)
i.gid = int(gid)
@@ -231,11 +202,44 @@ class Item:
i.children.sort(key=lambda i: i.name)
# set metadata for the files generated by this script.
i = cls.ITEMS.get("system/recovery-from-boot.p", None)
i = self.ITEMS.get("system/recovery-from-boot.p", None)
if i: i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0644, None, None
i = cls.ITEMS.get("system/etc/install-recovery.sh", None)
i = self.ITEMS.get("system/etc/install-recovery.sh", None)
if i: i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0544, None, None
class Item:
"""Items represent the metadata (user, group, mode) of files and
directories in the system image."""
def __init__(self, itemset, name, dir=False):
self.itemset = itemset
self.name = name
self.uid = None
self.gid = None
self.mode = None
self.selabel = None
self.capabilities = None
self.dir = dir
if name:
self.parent = itemset.Get(os.path.dirname(name), dir=True)
self.parent.children.append(self)
else:
self.parent = None
if dir:
self.children = []
def Dump(self, indent=0):
if self.uid is not None:
print "%s%s %d %d %o" % (" "*indent, self.name, self.uid, self.gid, self.mode)
else:
print "%s%s %s %s %s" % (" "*indent, self.name, self.uid, self.gid, self.mode)
if self.dir:
print "%s%s" % (" "*indent, self.descendants)
print "%s%s" % (" "*indent, self.best_subtree)
for i in self.children:
i.Dump(indent=indent+1)
def CountChildMetadata(self):
"""Count up the (uid, gid, mode, selabel, capabilities) tuples for
all children and determine the best strategy for using set_perm_recursive and
@@ -320,9 +324,8 @@ class Item:
recurse(self, (-1, -1, -1, -1, None, None))
def CopySystemFiles(input_zip, output_zip=None,
substitute=None):
"""Copies files underneath system/ in the input zip to the output
def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None):
"""Copies files for the partition in the input zip to the output
zip. Populates the Item class with their metadata, and returns a
list of symlinks. output_zip may be None, in which case the copy is
skipped (but the other side effects still happen). substitute is an
@@ -332,15 +335,17 @@ def CopySystemFiles(input_zip, output_zip=None,
symlinks = []
partition = itemset.partition
for info in input_zip.infolist():
if info.filename.startswith("SYSTEM/"):
if info.filename.startswith(partition.upper() + "/"):
basefilename = info.filename[7:]
if IsSymlink(info):
symlinks.append((input_zip.read(info.filename),
"/system/" + basefilename))
"/" + partition + "/" + basefilename))
else:
info2 = copy.copy(info)
fn = info2.filename = "system/" + basefilename
fn = info2.filename = partition + "/" + basefilename
if substitute and fn in substitute and substitute[fn] is None:
continue
if output_zip is not None:
@@ -350,9 +355,9 @@ def CopySystemFiles(input_zip, output_zip=None,
data = input_zip.read(info.filename)
output_zip.writestr(info2, data)
if fn.endswith("/"):
Item.Get(fn[:-1], dir=True)
itemset.Get(fn[:-1], dir=True)
else:
Item.Get(fn, dir=False)
itemset.Get(fn, dir=False)
symlinks.sort()
return symlinks
@@ -387,6 +392,13 @@ def HasRecoveryPatch(target_files_zip):
except KeyError:
return False
def HasVendorPartition(target_files_zip):
try:
target_files_zip.getinfo("VENDOR/")
return True
except KeyError:
return False
def GetOemProperty(name, oem_props, oem_dict, info_dict):
if oem_props is not None and name in oem_props:
return oem_dict[name]
@@ -489,10 +501,13 @@ else if get_stage("%(bcb_dev)s", "stage") == "3/3" then
if OPTIONS.wipe_user_data:
system_progress -= 0.1
if HasVendorPartition(input_zip):
system_progress -= 0.1
if "selinux_fc" in OPTIONS.info_dict:
WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip)
system_items = ItemSet("system", "META/filesystem_config.txt")
script.ShowProgress(system_progress, 0)
if block_based:
mapdata, data = img_from_target_files.BuildSystem(
@@ -510,7 +525,7 @@ else if get_stage("%(bcb_dev)s", "stage") == "3/3" then
script.UnpackPackageDir("recovery", "/system")
script.UnpackPackageDir("system", "/system")
symlinks = CopySystemFiles(input_zip, output_zip)
symlinks = CopyPartitionFiles(system_items, input_zip, output_zip)
script.MakeSymlinks(symlinks)
boot_img = common.GetBootableImage("boot.img", "boot.img",
@@ -519,13 +534,37 @@ else if get_stage("%(bcb_dev)s", "stage") == "3/3" then
if not block_based:
def output_sink(fn, data):
common.ZipWriteStr(output_zip, "recovery/" + fn, data)
Item.Get("system/" + fn, dir=False)
system_items.Get("system/" + fn, dir=False)
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink,
recovery_img, boot_img)
Item.GetMetadata(input_zip)
Item.Get("system").SetPermissions(script)
system_items.GetMetadata(input_zip)
system_items.Get("system").SetPermissions(script)
if HasVendorPartition(input_zip):
vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt")
script.ShowProgress(0.1, 0)
if block_based:
mapdata, data = img_from_target_files.BuildVendor(
OPTIONS.input_tmp, OPTIONS.info_dict,
sparse=False, map_file=True)
common.ZipWriteStr(output_zip, "vendor.map", mapdata)
common.ZipWriteStr(output_zip, "vendor.muimg", data)
script.WipeBlockDevice("/vendor")
script.WriteRawImage("/vendor", "vendor.muimg", mapfn="vendor.map")
else:
script.FormatPartition("/vendor")
script.Mount("/vendor")
script.UnpackPackageDir("vendor", "/vendor")
symlinks = CopyPartitionFiles(vendor_items, input_zip, output_zip)
script.MakeSymlinks(symlinks)
vendor_items.GetMetadata(input_zip)
vendor_items.Get("vendor").SetPermissions(script)
common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict)
common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
@@ -544,7 +583,7 @@ else if get_stage("%(bcb_dev)s", "stage") == "3/3" then
if OPTIONS.wipe_user_data:
script.ShowProgress(0.1, 10)
script.FormatPartition("/data")
if OPTIONS.two_step:
script.AppendExtra("""
set_stage("%(bcb_dev)s", "");
@@ -571,14 +610,15 @@ def WriteMetadata(metadata, output_zip):
"".join(["%s=%s\n" % kv
for kv in sorted(metadata.iteritems())]))
def LoadSystemFiles(z):
"""Load all the files from SYSTEM/... in a given target-files
def LoadPartitionFiles(z, partition):
"""Load all the files from the given partition in a given target-files
ZipFile, and return a dict of {filename: File object}."""
out = {}
prefix = partition.upper() + "/"
for info in z.infolist():
if info.filename.startswith("SYSTEM/") and not IsSymlink(info):
if info.filename.startswith(prefix) and not IsSymlink(info):
basefilename = info.filename[7:]
fn = "system/" + basefilename
fn = partition + "/" + basefilename
data = z.read(info.filename)
out[fn] = common.File(fn, data)
return out
@@ -602,6 +642,45 @@ def AddToKnownPaths(filename, known_paths):
known_paths.add(path)
dirs.pop()
class BlockDifference:
def __init__(self, partition, builder, output_zip):
with tempfile.NamedTemporaryFile() as src_file:
with tempfile.NamedTemporaryFile() as tgt_file:
print "building source " + partition + " image..."
src_file = tempfile.NamedTemporaryFile()
src_mapdata, src_data = builder(OPTIONS.source_tmp,
OPTIONS.source_info_dict,
sparse=False, map_file=True)
self.src_sha1 = sha1(src_data).hexdigest()
print "source " + partition + " sha1:", self.src_sha1
src_file.write(src_data)
print "building target " + partition + " image..."
tgt_file = tempfile.NamedTemporaryFile()
tgt_mapdata, tgt_data = builder(OPTIONS.target_tmp,
OPTIONS.target_info_dict,
sparse=False, map_file=True)
self.tgt_sha1 = sha1(tgt_data).hexdigest()
print "target " + partition + " sha1:", self.tgt_sha1
tgt_len = len(tgt_data)
tgt_file.write(tgt_data)
system_type, self.device = common.GetTypeAndDevice("/" + partition,
OPTIONS.info_dict)
self.patch = common.MakePartitionPatch(src_file, tgt_file, partition)
TestBlockPatch(src_data, src_mapdata, self.patch.data,
tgt_mapdata, self.tgt_sha1)
src_data = None
tgt_data = None
self.patch.AddToZip(output_zip, compression=zipfile.ZIP_STORED)
self.src_mapfilename = self.patch.name + ".src.map"
common.ZipWriteStr(output_zip, self.src_mapfilename, src_mapdata)
self.tgt_mapfilename = self.patch.name + ".tgt.map"
common.ZipWriteStr(output_zip, self.tgt_mapfilename, tgt_mapdata)
def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
source_version = OPTIONS.source_info_dict["recovery_api_version"]
target_version = OPTIONS.target_info_dict["recovery_api_version"]
@@ -648,40 +727,13 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
"/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
updating_recovery = (source_recovery.data != target_recovery.data)
with tempfile.NamedTemporaryFile() as src_file:
with tempfile.NamedTemporaryFile() as tgt_file:
print "building source system image..."
src_file = tempfile.NamedTemporaryFile()
src_mapdata, src_data = img_from_target_files.BuildSystem(
OPTIONS.source_tmp, OPTIONS.source_info_dict,
sparse=False, map_file=True)
src_sys_sha1 = sha1(src_data).hexdigest()
print "source system sha1:", src_sys_sha1
src_file.write(src_data)
print "building target system image..."
tgt_file = tempfile.NamedTemporaryFile()
tgt_mapdata, tgt_data = img_from_target_files.BuildSystem(
OPTIONS.target_tmp, OPTIONS.target_info_dict,
sparse=False, map_file=True)
tgt_sys_sha1 = sha1(tgt_data).hexdigest()
print "target system sha1:", tgt_sys_sha1
tgt_sys_len = len(tgt_data)
tgt_file.write(tgt_data)
system_type, system_device = common.GetTypeAndDevice("/system", OPTIONS.info_dict)
system_patch = common.MakeSystemPatch(src_file, tgt_file)
TestBlockPatch(src_data, src_mapdata, system_patch.data, tgt_mapdata, tgt_sys_sha1)
src_data = None
tgt_data = None
system_patch.AddToZip(output_zip, compression=zipfile.ZIP_STORED)
src_mapfilename = system_patch.name + ".src.map"
common.ZipWriteStr(output_zip, src_mapfilename, src_mapdata)
tgt_mapfilename = system_patch.name + ".tgt.map"
common.ZipWriteStr(output_zip, tgt_mapfilename, tgt_mapdata)
system_diff = BlockDifference("system", img_from_target_files.BuildSystem,
output_zip)
if HasVendorPartition(target_zip):
if not HasVendorPartition(source_zip):
raise RuntimeError("can't generate incremental that adds /vendor")
vendor_diff = BlockDifference("vendor", img_from_target_files.BuildVendor,
output_zip)
oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties")
oem_dict = None
@@ -774,12 +826,23 @@ else
device_specific.IncrementalOTA_InstallBegin()
if HasVendorPartition(target_zip):
script.Print("Patching vendor image...")
script.ShowProgress(0.1, 0)
script.Syspatch(vendor_diff.device,
vendor_diff.tgt_mapfilename, vendor_diff.tgt_sha1,
vendor_diff.src_mapfilename, vendor_diff.src_sha1,
vendor_diff.patch.name)
sys_progress = 0.8
else:
sys_progress = 0.9
script.Print("Patching system image...")
script.ShowProgress(0.9, 0)
script.Syspatch(system_device,
tgt_mapfilename, tgt_sys_sha1,
src_mapfilename, src_sys_sha1,
system_patch.name)
script.ShowProgress(sys_progress, 0)
script.Syspatch(system_diff.device,
system_diff.tgt_mapfilename, system_diff.tgt_sha1,
system_diff.src_mapfilename, system_diff.src_sha1,
system_diff.patch.name)
if OPTIONS.two_step:
common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
@@ -881,6 +944,127 @@ def TestBlockPatch(src_muimg, src_map, patch_data, tgt_map, tgt_sha1):
print "test of system image patch succeeded"
class FileDifference:
def __init__(self, partition, source_zip, target_zip, output_zip):
print "Loading target..."
self.target_data = target_data = LoadPartitionFiles(target_zip, partition)
print "Loading source..."
self.source_data = source_data = LoadPartitionFiles(source_zip, partition)
self.verbatim_targets = verbatim_targets = []
self.patch_list = patch_list = []
diffs = []
self.renames = renames = {}
known_paths = set()
largest_source_size = 0
matching_file_cache = {}
for fn, sf in source_data.items():
assert fn == sf.name
matching_file_cache["path:" + fn] = sf
if fn in target_data.keys():
AddToKnownPaths(fn, known_paths)
# Only allow eligibility for filename/sha matching
# if there isn't a perfect path match.
if target_data.get(sf.name) is None:
matching_file_cache["file:" + fn.split("/")[-1]] = sf
matching_file_cache["sha:" + sf.sha1] = sf
for fn in sorted(target_data.keys()):
tf = target_data[fn]
assert fn == tf.name
sf = ClosestFileMatch(tf, matching_file_cache, renames)
if sf is not None and sf.name != tf.name:
print "File has moved from " + sf.name + " to " + tf.name
renames[sf.name] = tf
if sf is None or fn in OPTIONS.require_verbatim:
# This file should be included verbatim
if fn in OPTIONS.prohibit_verbatim:
raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
print "send", fn, "verbatim"
tf.AddToZip(output_zip)
verbatim_targets.append((fn, tf.size))
if fn in target_data.keys():
AddToKnownPaths(fn, known_paths)
elif tf.sha1 != sf.sha1:
# File is different; consider sending as a patch
diffs.append(common.Difference(tf, sf))
else:
# Target file data identical to source (may still be renamed)
pass
common.ComputeDifferences(diffs)
for diff in diffs:
tf, sf, d = diff.GetPatch()
path = "/".join(tf.name.split("/")[:-1])
if d is None or len(d) > tf.size * OPTIONS.patch_threshold or \
path not in known_paths:
# patch is almost as big as the file; don't bother patching
# or a patch + rename cannot take place due to the target
# directory not existing
tf.AddToZip(output_zip)
verbatim_targets.append((tf.name, tf.size))
if sf.name in renames:
del renames[sf.name]
AddToKnownPaths(tf.name, known_paths)
else:
common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d)
patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest()))
largest_source_size = max(largest_source_size, sf.size)
self.largest_source_size = largest_source_size
def EmitVerification(self, script):
so_far = 0
for tf, sf, size, patch_sha in self.patch_list:
if tf.name != sf.name:
script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1)
so_far += sf.size
return so_far
def RemoveUnneededFiles(self, script, extras=()):
script.DeleteFiles(["/"+i[0] for i in self.verbatim_targets] +
["/"+i for i in sorted(self.source_data)
if i not in self.target_data and
i not in self.renames] +
list(extras))
def TotalPatchSize(self):
return sum(i[1].size for i in self.patch_list)
def EmitPatches(self, script, total_patch_size, so_far):
self.deferred_patch_list = deferred_patch_list = []
for item in self.patch_list:
tf, sf, size, _ = item
if tf.name == "system/build.prop":
deferred_patch_list.append(item)
continue
if (sf.name != tf.name):
script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
so_far += tf.size
script.SetProgress(so_far / total_patch_size)
return so_far
def EmitDeferredPatches(self, script):
for item in self.deferred_patch_list:
tf, sf, size, _ = item
script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
script.SetPermissions("/system/build.prop", 0, 0, 0644, None, None)
def EmitRenames(self, script):
if len(self.renames) > 0:
script.Print("Renaming files...")
for src, tgt in self.renames.iteritems():
print "Renaming " + src + " to " + tgt.name
script.RenameFile(src, tgt.name)
def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
target_has_recovery_patch = HasRecoveryPatch(target_zip)
source_has_recovery_patch = HasRecoveryPatch(source_zip)
@@ -923,75 +1107,13 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
metadata=metadata,
info_dict=OPTIONS.info_dict)
print "Loading target..."
target_data = LoadSystemFiles(target_zip)
print "Loading source..."
source_data = LoadSystemFiles(source_zip)
verbatim_targets = []
patch_list = []
diffs = []
renames = {}
known_paths = set()
largest_source_size = 0
matching_file_cache = {}
for fn, sf in source_data.items():
assert fn == sf.name
matching_file_cache["path:" + fn] = sf
if fn in target_data.keys():
AddToKnownPaths(fn, known_paths)
# Only allow eligibility for filename/sha matching
# if there isn't a perfect path match.
if target_data.get(sf.name) is None:
matching_file_cache["file:" + fn.split("/")[-1]] = sf
matching_file_cache["sha:" + sf.sha1] = sf
for fn in sorted(target_data.keys()):
tf = target_data[fn]
assert fn == tf.name
sf = ClosestFileMatch(tf, matching_file_cache, renames)
if sf is not None and sf.name != tf.name:
print "File has moved from " + sf.name + " to " + tf.name
renames[sf.name] = tf
if sf is None or fn in OPTIONS.require_verbatim:
# This file should be included verbatim
if fn in OPTIONS.prohibit_verbatim:
raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
print "send", fn, "verbatim"
tf.AddToZip(output_zip)
verbatim_targets.append((fn, tf.size))
if fn in target_data.keys():
AddToKnownPaths(fn, known_paths)
elif tf.sha1 != sf.sha1:
# File is different; consider sending as a patch
diffs.append(common.Difference(tf, sf))
else:
# Target file data identical to source (may still be renamed)
pass
common.ComputeDifferences(diffs)
for diff in diffs:
tf, sf, d = diff.GetPatch()
path = "/".join(tf.name.split("/")[:-1])
if d is None or len(d) > tf.size * OPTIONS.patch_threshold or \
path not in known_paths:
# patch is almost as big as the file; don't bother patching
# or a patch + rename cannot take place due to the target
# directory not existing
tf.AddToZip(output_zip)
verbatim_targets.append((tf.name, tf.size))
if sf.name in renames:
del renames[sf.name]
AddToKnownPaths(tf.name, known_paths)
else:
common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d)
patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest()))
largest_source_size = max(largest_source_size, sf.size)
system_diff = FileDifference("system", source_zip, target_zip, output_zip)
script.Mount("/system")
if HasVendorPartition(target_zip):
vendor_diff = FileDifference("vendor", source_zip, target_zip, output_zip)
script.Mount("/vendor")
else:
vendor_diff = None
target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.target_info_dict)
source_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.source_info_dict)
@@ -1075,13 +1197,9 @@ else if get_stage("%(bcb_dev)s", "stage") != "3/3" then
device_specific.IncrementalOTA_VerifyBegin()
script.ShowProgress(0.1, 0)
so_far = 0
for tf, sf, size, patch_sha in patch_list:
if tf.name != sf.name:
script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1)
so_far += sf.size
so_far = system_diff.EmitVerification(script)
if vendor_diff:
so_far += vendor_diff.EmitVerification(script)
if updating_boot:
d = common.Difference(target_boot, source_boot)
@@ -1099,8 +1217,12 @@ else if get_stage("%(bcb_dev)s", "stage") != "3/3" then
target_boot.size, target_boot.sha1))
so_far += source_boot.size
if patch_list or updating_recovery or updating_boot:
script.CacheFreeSpaceCheck(largest_source_size)
size = []
if system_diff.patch_list: size.append(system_diff.largest_source_size)
if vendor_diff:
if vendor_diff.patch_list: size.append(vendor_diff.largest_source_size)
if size or updating_recovery or updating_boot:
script.CacheFreeSpaceCheck(max(size))
device_specific.IncrementalOTA_VerifyEnd()
@@ -1122,30 +1244,22 @@ else
print "writing full boot image (forced by two-step mode)"
script.Print("Removing unneeded files...")
script.DeleteFiles(["/"+i[0] for i in verbatim_targets] +
["/"+i for i in sorted(source_data)
if i not in target_data and
i not in renames] +
["/system/recovery.img"])
system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",))
if vendor_diff:
vendor_diff.RemoveUnneededFiles(script)
script.ShowProgress(0.8, 0)
total_patch_size = float(sum([i[1].size for i in patch_list]) + 1)
total_patch_size = 1.0 + system_diff.TotalPatchSize()
if vendor_diff:
total_patch_size += vendor_diff.TotalPatchSize()
if updating_boot:
total_patch_size += target_boot.size
so_far = 0
script.Print("Patching system files...")
deferred_patch_list = []
for item in patch_list:
tf, sf, size, _ = item
if tf.name == "system/build.prop":
deferred_patch_list.append(item)
continue
if (sf.name != tf.name):
script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
so_far += tf.size
script.SetProgress(so_far / total_patch_size)
so_far = system_diff.EmitPatches(script, total_patch_size, 0)
if vendor_diff:
script.Print("Patching vendor files...")
so_far = vendor_diff.EmitPatches(script, total_patch_size, so_far)
if not OPTIONS.two_step:
if updating_boot:
@@ -1166,6 +1280,10 @@ else
else:
print "boot image unchanged; skipping."
system_items = ItemSet("system", "META/filesystem_config.txt")
if vendor_diff:
vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt")
if updating_recovery:
# Recovery is generated as a patch using both the boot image
# (which contains the same linux kernel as recovery) and the file
@@ -1179,7 +1297,7 @@ else
if not target_has_recovery_patch:
def output_sink(fn, data):
common.ZipWriteStr(output_zip, "recovery/" + fn, data)
Item.Get("system/" + fn, dir=False)
system_items.Get("system/" + fn, dir=False)
common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink,
target_recovery, target_boot)
@@ -1191,16 +1309,24 @@ else
script.ShowProgress(0.1, 10)
target_symlinks = CopySystemFiles(target_zip, None)
target_symlinks = CopyPartitionFiles(system_items, target_zip, None)
if vendor_diff:
target_symlinks.extend(CopyPartitionFiles(vendor_items, target_zip, None))
temp_script = script.MakeTemporary()
system_items.GetMetadata(target_zip)
system_items.Get("system").SetPermissions(temp_script)
if vendor_diff:
vendor_items.GetMetadata(target_zip)
vendor_items.Get("vendor").SetPermissions(temp_script)
# Note that this call will mess up the trees of Items, so make sure
# we're done with them.
source_symlinks = CopyPartitionFiles(system_items, source_zip, None)
if vendor_diff:
source_symlinks.extend(CopyPartitionFiles(vendor_items, source_zip, None))
target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks])
temp_script = script.MakeTemporary()
Item.GetMetadata(target_zip)
Item.Get("system").SetPermissions(temp_script)
# Note that this call will mess up the tree of Items, so make sure
# we're done with it.
source_symlinks = CopySystemFiles(source_zip, None)
source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks])
# Delete all the symlinks in source that aren't in target. This
@@ -1212,20 +1338,20 @@ else
to_delete.append(link)
script.DeleteFiles(to_delete)
if verbatim_targets:
script.Print("Unpacking new files...")
if system_diff.verbatim_targets:
script.Print("Unpacking new system files...")
script.UnpackPackageDir("system", "/system")
if vendor_diff and vendor_diff.verbatim_targets:
script.Print("Unpacking new vendor files...")
script.UnpackPackageDir("vendor", "/vendor")
if updating_recovery and not target_has_recovery_patch:
script.Print("Unpacking new recovery...")
script.UnpackPackageDir("recovery", "/system")
if len(renames) > 0:
script.Print("Renaming files...")
for src in renames:
print "Renaming " + src + " to " + renames[src].name
script.RenameFile(src, renames[src].name)
system_diff.EmitRenames(script)
if vendor_diff:
vendor_diff.EmitRenames(script)
script.Print("Symlinks and permissions...")
@@ -1256,10 +1382,7 @@ else
# device can still come up, it appears to be the old build and will
# get set the OTA package again to retry.
script.Print("Patching remaining system files...")
for item in deferred_patch_list:
tf, sf, size, _ = item
script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
script.SetPermissions("/system/build.prop", 0, 0, 0644, None, None)
system_diff.EmitDeferredPatches(script)
if OPTIONS.wipe_user_data:
script.Print("Erasing user data...")