|
|
|
@@ -159,50 +159,21 @@ def ClosestFileMatch(src, tgtfiles, existing):
|
|
|
|
|
return result
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
class Item:
|
|
|
|
|
"""Items represent the metadata (user, group, mode) of files and
|
|
|
|
|
directories in the system image."""
|
|
|
|
|
ITEMS = {}
|
|
|
|
|
def __init__(self, name, dir=False):
|
|
|
|
|
self.name = name
|
|
|
|
|
self.uid = None
|
|
|
|
|
self.gid = None
|
|
|
|
|
self.mode = None
|
|
|
|
|
self.selabel = None
|
|
|
|
|
self.capabilities = None
|
|
|
|
|
self.dir = dir
|
|
|
|
|
class ItemSet:
|
|
|
|
|
def __init__(self, partition, fs_config):
|
|
|
|
|
self.partition = partition
|
|
|
|
|
self.fs_config = fs_config
|
|
|
|
|
self.ITEMS = {}
|
|
|
|
|
|
|
|
|
|
if name:
|
|
|
|
|
self.parent = Item.Get(os.path.dirname(name), dir=True)
|
|
|
|
|
self.parent.children.append(self)
|
|
|
|
|
else:
|
|
|
|
|
self.parent = None
|
|
|
|
|
if dir:
|
|
|
|
|
self.children = []
|
|
|
|
|
|
|
|
|
|
def Dump(self, indent=0):
|
|
|
|
|
if self.uid is not None:
|
|
|
|
|
print "%s%s %d %d %o" % (" "*indent, self.name, self.uid, self.gid, self.mode)
|
|
|
|
|
else:
|
|
|
|
|
print "%s%s %s %s %s" % (" "*indent, self.name, self.uid, self.gid, self.mode)
|
|
|
|
|
if self.dir:
|
|
|
|
|
print "%s%s" % (" "*indent, self.descendants)
|
|
|
|
|
print "%s%s" % (" "*indent, self.best_subtree)
|
|
|
|
|
for i in self.children:
|
|
|
|
|
i.Dump(indent=indent+1)
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def Get(cls, name, dir=False):
|
|
|
|
|
if name not in cls.ITEMS:
|
|
|
|
|
cls.ITEMS[name] = Item(name, dir=dir)
|
|
|
|
|
return cls.ITEMS[name]
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def GetMetadata(cls, input_zip):
|
|
|
|
|
def Get(self, name, dir=False):
|
|
|
|
|
if name not in self.ITEMS:
|
|
|
|
|
self.ITEMS[name] = Item(self, name, dir=dir)
|
|
|
|
|
return self.ITEMS[name]
|
|
|
|
|
|
|
|
|
|
def GetMetadata(self, input_zip):
|
|
|
|
|
# The target_files contains a record of what the uid,
|
|
|
|
|
# gid, and mode are supposed to be.
|
|
|
|
|
output = input_zip.read("META/filesystem_config.txt")
|
|
|
|
|
output = input_zip.read(self.fs_config)
|
|
|
|
|
|
|
|
|
|
for line in output.split("\n"):
|
|
|
|
|
if not line: continue
|
|
|
|
@@ -220,7 +191,7 @@ class Item:
|
|
|
|
|
if key == "capabilities":
|
|
|
|
|
capabilities = value
|
|
|
|
|
|
|
|
|
|
i = cls.ITEMS.get(name, None)
|
|
|
|
|
i = self.ITEMS.get(name, None)
|
|
|
|
|
if i is not None:
|
|
|
|
|
i.uid = int(uid)
|
|
|
|
|
i.gid = int(gid)
|
|
|
|
@@ -231,11 +202,44 @@ class Item:
|
|
|
|
|
i.children.sort(key=lambda i: i.name)
|
|
|
|
|
|
|
|
|
|
# set metadata for the files generated by this script.
|
|
|
|
|
i = cls.ITEMS.get("system/recovery-from-boot.p", None)
|
|
|
|
|
i = self.ITEMS.get("system/recovery-from-boot.p", None)
|
|
|
|
|
if i: i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0644, None, None
|
|
|
|
|
i = cls.ITEMS.get("system/etc/install-recovery.sh", None)
|
|
|
|
|
i = self.ITEMS.get("system/etc/install-recovery.sh", None)
|
|
|
|
|
if i: i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0544, None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Item:
|
|
|
|
|
"""Items represent the metadata (user, group, mode) of files and
|
|
|
|
|
directories in the system image."""
|
|
|
|
|
def __init__(self, itemset, name, dir=False):
|
|
|
|
|
self.itemset = itemset
|
|
|
|
|
self.name = name
|
|
|
|
|
self.uid = None
|
|
|
|
|
self.gid = None
|
|
|
|
|
self.mode = None
|
|
|
|
|
self.selabel = None
|
|
|
|
|
self.capabilities = None
|
|
|
|
|
self.dir = dir
|
|
|
|
|
|
|
|
|
|
if name:
|
|
|
|
|
self.parent = itemset.Get(os.path.dirname(name), dir=True)
|
|
|
|
|
self.parent.children.append(self)
|
|
|
|
|
else:
|
|
|
|
|
self.parent = None
|
|
|
|
|
if dir:
|
|
|
|
|
self.children = []
|
|
|
|
|
|
|
|
|
|
def Dump(self, indent=0):
|
|
|
|
|
if self.uid is not None:
|
|
|
|
|
print "%s%s %d %d %o" % (" "*indent, self.name, self.uid, self.gid, self.mode)
|
|
|
|
|
else:
|
|
|
|
|
print "%s%s %s %s %s" % (" "*indent, self.name, self.uid, self.gid, self.mode)
|
|
|
|
|
if self.dir:
|
|
|
|
|
print "%s%s" % (" "*indent, self.descendants)
|
|
|
|
|
print "%s%s" % (" "*indent, self.best_subtree)
|
|
|
|
|
for i in self.children:
|
|
|
|
|
i.Dump(indent=indent+1)
|
|
|
|
|
|
|
|
|
|
def CountChildMetadata(self):
|
|
|
|
|
"""Count up the (uid, gid, mode, selabel, capabilities) tuples for
|
|
|
|
|
all children and determine the best strategy for using set_perm_recursive and
|
|
|
|
@@ -320,9 +324,8 @@ class Item:
|
|
|
|
|
recurse(self, (-1, -1, -1, -1, None, None))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def CopySystemFiles(input_zip, output_zip=None,
|
|
|
|
|
substitute=None):
|
|
|
|
|
"""Copies files underneath system/ in the input zip to the output
|
|
|
|
|
def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None):
|
|
|
|
|
"""Copies files for the partition in the input zip to the output
|
|
|
|
|
zip. Populates the Item class with their metadata, and returns a
|
|
|
|
|
list of symlinks. output_zip may be None, in which case the copy is
|
|
|
|
|
skipped (but the other side effects still happen). substitute is an
|
|
|
|
@@ -332,15 +335,17 @@ def CopySystemFiles(input_zip, output_zip=None,
|
|
|
|
|
|
|
|
|
|
symlinks = []
|
|
|
|
|
|
|
|
|
|
partition = itemset.partition
|
|
|
|
|
|
|
|
|
|
for info in input_zip.infolist():
|
|
|
|
|
if info.filename.startswith("SYSTEM/"):
|
|
|
|
|
if info.filename.startswith(partition.upper() + "/"):
|
|
|
|
|
basefilename = info.filename[7:]
|
|
|
|
|
if IsSymlink(info):
|
|
|
|
|
symlinks.append((input_zip.read(info.filename),
|
|
|
|
|
"/system/" + basefilename))
|
|
|
|
|
"/" + partition + "/" + basefilename))
|
|
|
|
|
else:
|
|
|
|
|
info2 = copy.copy(info)
|
|
|
|
|
fn = info2.filename = "system/" + basefilename
|
|
|
|
|
fn = info2.filename = partition + "/" + basefilename
|
|
|
|
|
if substitute and fn in substitute and substitute[fn] is None:
|
|
|
|
|
continue
|
|
|
|
|
if output_zip is not None:
|
|
|
|
@@ -350,9 +355,9 @@ def CopySystemFiles(input_zip, output_zip=None,
|
|
|
|
|
data = input_zip.read(info.filename)
|
|
|
|
|
output_zip.writestr(info2, data)
|
|
|
|
|
if fn.endswith("/"):
|
|
|
|
|
Item.Get(fn[:-1], dir=True)
|
|
|
|
|
itemset.Get(fn[:-1], dir=True)
|
|
|
|
|
else:
|
|
|
|
|
Item.Get(fn, dir=False)
|
|
|
|
|
itemset.Get(fn, dir=False)
|
|
|
|
|
|
|
|
|
|
symlinks.sort()
|
|
|
|
|
return symlinks
|
|
|
|
@@ -387,6 +392,13 @@ def HasRecoveryPatch(target_files_zip):
|
|
|
|
|
except KeyError:
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
def HasVendorPartition(target_files_zip):
|
|
|
|
|
try:
|
|
|
|
|
target_files_zip.getinfo("VENDOR/")
|
|
|
|
|
return True
|
|
|
|
|
except KeyError:
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
def GetOemProperty(name, oem_props, oem_dict, info_dict):
|
|
|
|
|
if oem_props is not None and name in oem_props:
|
|
|
|
|
return oem_dict[name]
|
|
|
|
@@ -489,10 +501,13 @@ else if get_stage("%(bcb_dev)s", "stage") == "3/3" then
|
|
|
|
|
|
|
|
|
|
if OPTIONS.wipe_user_data:
|
|
|
|
|
system_progress -= 0.1
|
|
|
|
|
if HasVendorPartition(input_zip):
|
|
|
|
|
system_progress -= 0.1
|
|
|
|
|
|
|
|
|
|
if "selinux_fc" in OPTIONS.info_dict:
|
|
|
|
|
WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip)
|
|
|
|
|
|
|
|
|
|
system_items = ItemSet("system", "META/filesystem_config.txt")
|
|
|
|
|
script.ShowProgress(system_progress, 0)
|
|
|
|
|
if block_based:
|
|
|
|
|
mapdata, data = img_from_target_files.BuildSystem(
|
|
|
|
@@ -510,7 +525,7 @@ else if get_stage("%(bcb_dev)s", "stage") == "3/3" then
|
|
|
|
|
script.UnpackPackageDir("recovery", "/system")
|
|
|
|
|
script.UnpackPackageDir("system", "/system")
|
|
|
|
|
|
|
|
|
|
symlinks = CopySystemFiles(input_zip, output_zip)
|
|
|
|
|
symlinks = CopyPartitionFiles(system_items, input_zip, output_zip)
|
|
|
|
|
script.MakeSymlinks(symlinks)
|
|
|
|
|
|
|
|
|
|
boot_img = common.GetBootableImage("boot.img", "boot.img",
|
|
|
|
@@ -519,13 +534,37 @@ else if get_stage("%(bcb_dev)s", "stage") == "3/3" then
|
|
|
|
|
if not block_based:
|
|
|
|
|
def output_sink(fn, data):
|
|
|
|
|
common.ZipWriteStr(output_zip, "recovery/" + fn, data)
|
|
|
|
|
Item.Get("system/" + fn, dir=False)
|
|
|
|
|
system_items.Get("system/" + fn, dir=False)
|
|
|
|
|
|
|
|
|
|
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink,
|
|
|
|
|
recovery_img, boot_img)
|
|
|
|
|
|
|
|
|
|
Item.GetMetadata(input_zip)
|
|
|
|
|
Item.Get("system").SetPermissions(script)
|
|
|
|
|
system_items.GetMetadata(input_zip)
|
|
|
|
|
system_items.Get("system").SetPermissions(script)
|
|
|
|
|
|
|
|
|
|
if HasVendorPartition(input_zip):
|
|
|
|
|
vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt")
|
|
|
|
|
script.ShowProgress(0.1, 0)
|
|
|
|
|
|
|
|
|
|
if block_based:
|
|
|
|
|
mapdata, data = img_from_target_files.BuildVendor(
|
|
|
|
|
OPTIONS.input_tmp, OPTIONS.info_dict,
|
|
|
|
|
sparse=False, map_file=True)
|
|
|
|
|
|
|
|
|
|
common.ZipWriteStr(output_zip, "vendor.map", mapdata)
|
|
|
|
|
common.ZipWriteStr(output_zip, "vendor.muimg", data)
|
|
|
|
|
script.WipeBlockDevice("/vendor")
|
|
|
|
|
script.WriteRawImage("/vendor", "vendor.muimg", mapfn="vendor.map")
|
|
|
|
|
else:
|
|
|
|
|
script.FormatPartition("/vendor")
|
|
|
|
|
script.Mount("/vendor")
|
|
|
|
|
script.UnpackPackageDir("vendor", "/vendor")
|
|
|
|
|
|
|
|
|
|
symlinks = CopyPartitionFiles(vendor_items, input_zip, output_zip)
|
|
|
|
|
script.MakeSymlinks(symlinks)
|
|
|
|
|
|
|
|
|
|
vendor_items.GetMetadata(input_zip)
|
|
|
|
|
vendor_items.Get("vendor").SetPermissions(script)
|
|
|
|
|
|
|
|
|
|
common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict)
|
|
|
|
|
common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
|
|
|
|
@@ -571,14 +610,15 @@ def WriteMetadata(metadata, output_zip):
|
|
|
|
|
"".join(["%s=%s\n" % kv
|
|
|
|
|
for kv in sorted(metadata.iteritems())]))
|
|
|
|
|
|
|
|
|
|
def LoadSystemFiles(z):
|
|
|
|
|
"""Load all the files from SYSTEM/... in a given target-files
|
|
|
|
|
def LoadPartitionFiles(z, partition):
|
|
|
|
|
"""Load all the files from the given partition in a given target-files
|
|
|
|
|
ZipFile, and return a dict of {filename: File object}."""
|
|
|
|
|
out = {}
|
|
|
|
|
prefix = partition.upper() + "/"
|
|
|
|
|
for info in z.infolist():
|
|
|
|
|
if info.filename.startswith("SYSTEM/") and not IsSymlink(info):
|
|
|
|
|
if info.filename.startswith(prefix) and not IsSymlink(info):
|
|
|
|
|
basefilename = info.filename[7:]
|
|
|
|
|
fn = "system/" + basefilename
|
|
|
|
|
fn = partition + "/" + basefilename
|
|
|
|
|
data = z.read(info.filename)
|
|
|
|
|
out[fn] = common.File(fn, data)
|
|
|
|
|
return out
|
|
|
|
@@ -602,6 +642,45 @@ def AddToKnownPaths(filename, known_paths):
|
|
|
|
|
known_paths.add(path)
|
|
|
|
|
dirs.pop()
|
|
|
|
|
|
|
|
|
|
class BlockDifference:
|
|
|
|
|
def __init__(self, partition, builder, output_zip):
|
|
|
|
|
with tempfile.NamedTemporaryFile() as src_file:
|
|
|
|
|
with tempfile.NamedTemporaryFile() as tgt_file:
|
|
|
|
|
print "building source " + partition + " image..."
|
|
|
|
|
src_file = tempfile.NamedTemporaryFile()
|
|
|
|
|
src_mapdata, src_data = builder(OPTIONS.source_tmp,
|
|
|
|
|
OPTIONS.source_info_dict,
|
|
|
|
|
sparse=False, map_file=True)
|
|
|
|
|
|
|
|
|
|
self.src_sha1 = sha1(src_data).hexdigest()
|
|
|
|
|
print "source " + partition + " sha1:", self.src_sha1
|
|
|
|
|
src_file.write(src_data)
|
|
|
|
|
|
|
|
|
|
print "building target " + partition + " image..."
|
|
|
|
|
tgt_file = tempfile.NamedTemporaryFile()
|
|
|
|
|
tgt_mapdata, tgt_data = builder(OPTIONS.target_tmp,
|
|
|
|
|
OPTIONS.target_info_dict,
|
|
|
|
|
sparse=False, map_file=True)
|
|
|
|
|
self.tgt_sha1 = sha1(tgt_data).hexdigest()
|
|
|
|
|
print "target " + partition + " sha1:", self.tgt_sha1
|
|
|
|
|
tgt_len = len(tgt_data)
|
|
|
|
|
tgt_file.write(tgt_data)
|
|
|
|
|
|
|
|
|
|
system_type, self.device = common.GetTypeAndDevice("/" + partition,
|
|
|
|
|
OPTIONS.info_dict)
|
|
|
|
|
self.patch = common.MakePartitionPatch(src_file, tgt_file, partition)
|
|
|
|
|
|
|
|
|
|
TestBlockPatch(src_data, src_mapdata, self.patch.data,
|
|
|
|
|
tgt_mapdata, self.tgt_sha1)
|
|
|
|
|
src_data = None
|
|
|
|
|
tgt_data = None
|
|
|
|
|
|
|
|
|
|
self.patch.AddToZip(output_zip, compression=zipfile.ZIP_STORED)
|
|
|
|
|
self.src_mapfilename = self.patch.name + ".src.map"
|
|
|
|
|
common.ZipWriteStr(output_zip, self.src_mapfilename, src_mapdata)
|
|
|
|
|
self.tgt_mapfilename = self.patch.name + ".tgt.map"
|
|
|
|
|
common.ZipWriteStr(output_zip, self.tgt_mapfilename, tgt_mapdata)
|
|
|
|
|
|
|
|
|
|
def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
|
|
|
|
|
source_version = OPTIONS.source_info_dict["recovery_api_version"]
|
|
|
|
|
target_version = OPTIONS.target_info_dict["recovery_api_version"]
|
|
|
|
@@ -648,40 +727,13 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
|
|
|
|
|
"/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
|
|
|
|
|
updating_recovery = (source_recovery.data != target_recovery.data)
|
|
|
|
|
|
|
|
|
|
with tempfile.NamedTemporaryFile() as src_file:
|
|
|
|
|
with tempfile.NamedTemporaryFile() as tgt_file:
|
|
|
|
|
print "building source system image..."
|
|
|
|
|
src_file = tempfile.NamedTemporaryFile()
|
|
|
|
|
src_mapdata, src_data = img_from_target_files.BuildSystem(
|
|
|
|
|
OPTIONS.source_tmp, OPTIONS.source_info_dict,
|
|
|
|
|
sparse=False, map_file=True)
|
|
|
|
|
|
|
|
|
|
src_sys_sha1 = sha1(src_data).hexdigest()
|
|
|
|
|
print "source system sha1:", src_sys_sha1
|
|
|
|
|
src_file.write(src_data)
|
|
|
|
|
|
|
|
|
|
print "building target system image..."
|
|
|
|
|
tgt_file = tempfile.NamedTemporaryFile()
|
|
|
|
|
tgt_mapdata, tgt_data = img_from_target_files.BuildSystem(
|
|
|
|
|
OPTIONS.target_tmp, OPTIONS.target_info_dict,
|
|
|
|
|
sparse=False, map_file=True)
|
|
|
|
|
tgt_sys_sha1 = sha1(tgt_data).hexdigest()
|
|
|
|
|
print "target system sha1:", tgt_sys_sha1
|
|
|
|
|
tgt_sys_len = len(tgt_data)
|
|
|
|
|
tgt_file.write(tgt_data)
|
|
|
|
|
|
|
|
|
|
system_type, system_device = common.GetTypeAndDevice("/system", OPTIONS.info_dict)
|
|
|
|
|
system_patch = common.MakeSystemPatch(src_file, tgt_file)
|
|
|
|
|
|
|
|
|
|
TestBlockPatch(src_data, src_mapdata, system_patch.data, tgt_mapdata, tgt_sys_sha1)
|
|
|
|
|
src_data = None
|
|
|
|
|
tgt_data = None
|
|
|
|
|
|
|
|
|
|
system_patch.AddToZip(output_zip, compression=zipfile.ZIP_STORED)
|
|
|
|
|
src_mapfilename = system_patch.name + ".src.map"
|
|
|
|
|
common.ZipWriteStr(output_zip, src_mapfilename, src_mapdata)
|
|
|
|
|
tgt_mapfilename = system_patch.name + ".tgt.map"
|
|
|
|
|
common.ZipWriteStr(output_zip, tgt_mapfilename, tgt_mapdata)
|
|
|
|
|
system_diff = BlockDifference("system", img_from_target_files.BuildSystem,
|
|
|
|
|
output_zip)
|
|
|
|
|
if HasVendorPartition(target_zip):
|
|
|
|
|
if not HasVendorPartition(source_zip):
|
|
|
|
|
raise RuntimeError("can't generate incremental that adds /vendor")
|
|
|
|
|
vendor_diff = BlockDifference("vendor", img_from_target_files.BuildVendor,
|
|
|
|
|
output_zip)
|
|
|
|
|
|
|
|
|
|
oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties")
|
|
|
|
|
oem_dict = None
|
|
|
|
@@ -774,12 +826,23 @@ else
|
|
|
|
|
|
|
|
|
|
device_specific.IncrementalOTA_InstallBegin()
|
|
|
|
|
|
|
|
|
|
if HasVendorPartition(target_zip):
|
|
|
|
|
script.Print("Patching vendor image...")
|
|
|
|
|
script.ShowProgress(0.1, 0)
|
|
|
|
|
script.Syspatch(vendor_diff.device,
|
|
|
|
|
vendor_diff.tgt_mapfilename, vendor_diff.tgt_sha1,
|
|
|
|
|
vendor_diff.src_mapfilename, vendor_diff.src_sha1,
|
|
|
|
|
vendor_diff.patch.name)
|
|
|
|
|
sys_progress = 0.8
|
|
|
|
|
else:
|
|
|
|
|
sys_progress = 0.9
|
|
|
|
|
|
|
|
|
|
script.Print("Patching system image...")
|
|
|
|
|
script.ShowProgress(0.9, 0)
|
|
|
|
|
script.Syspatch(system_device,
|
|
|
|
|
tgt_mapfilename, tgt_sys_sha1,
|
|
|
|
|
src_mapfilename, src_sys_sha1,
|
|
|
|
|
system_patch.name)
|
|
|
|
|
script.ShowProgress(sys_progress, 0)
|
|
|
|
|
script.Syspatch(system_diff.device,
|
|
|
|
|
system_diff.tgt_mapfilename, system_diff.tgt_sha1,
|
|
|
|
|
system_diff.src_mapfilename, system_diff.src_sha1,
|
|
|
|
|
system_diff.patch.name)
|
|
|
|
|
|
|
|
|
|
if OPTIONS.two_step:
|
|
|
|
|
common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
|
|
|
|
@@ -881,6 +944,127 @@ def TestBlockPatch(src_muimg, src_map, patch_data, tgt_map, tgt_sha1):
|
|
|
|
|
print "test of system image patch succeeded"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class FileDifference:
|
|
|
|
|
def __init__(self, partition, source_zip, target_zip, output_zip):
|
|
|
|
|
print "Loading target..."
|
|
|
|
|
self.target_data = target_data = LoadPartitionFiles(target_zip, partition)
|
|
|
|
|
print "Loading source..."
|
|
|
|
|
self.source_data = source_data = LoadPartitionFiles(source_zip, partition)
|
|
|
|
|
|
|
|
|
|
self.verbatim_targets = verbatim_targets = []
|
|
|
|
|
self.patch_list = patch_list = []
|
|
|
|
|
diffs = []
|
|
|
|
|
self.renames = renames = {}
|
|
|
|
|
known_paths = set()
|
|
|
|
|
largest_source_size = 0
|
|
|
|
|
|
|
|
|
|
matching_file_cache = {}
|
|
|
|
|
for fn, sf in source_data.items():
|
|
|
|
|
assert fn == sf.name
|
|
|
|
|
matching_file_cache["path:" + fn] = sf
|
|
|
|
|
if fn in target_data.keys():
|
|
|
|
|
AddToKnownPaths(fn, known_paths)
|
|
|
|
|
# Only allow eligibility for filename/sha matching
|
|
|
|
|
# if there isn't a perfect path match.
|
|
|
|
|
if target_data.get(sf.name) is None:
|
|
|
|
|
matching_file_cache["file:" + fn.split("/")[-1]] = sf
|
|
|
|
|
matching_file_cache["sha:" + sf.sha1] = sf
|
|
|
|
|
|
|
|
|
|
for fn in sorted(target_data.keys()):
|
|
|
|
|
tf = target_data[fn]
|
|
|
|
|
assert fn == tf.name
|
|
|
|
|
sf = ClosestFileMatch(tf, matching_file_cache, renames)
|
|
|
|
|
if sf is not None and sf.name != tf.name:
|
|
|
|
|
print "File has moved from " + sf.name + " to " + tf.name
|
|
|
|
|
renames[sf.name] = tf
|
|
|
|
|
|
|
|
|
|
if sf is None or fn in OPTIONS.require_verbatim:
|
|
|
|
|
# This file should be included verbatim
|
|
|
|
|
if fn in OPTIONS.prohibit_verbatim:
|
|
|
|
|
raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
|
|
|
|
|
print "send", fn, "verbatim"
|
|
|
|
|
tf.AddToZip(output_zip)
|
|
|
|
|
verbatim_targets.append((fn, tf.size))
|
|
|
|
|
if fn in target_data.keys():
|
|
|
|
|
AddToKnownPaths(fn, known_paths)
|
|
|
|
|
elif tf.sha1 != sf.sha1:
|
|
|
|
|
# File is different; consider sending as a patch
|
|
|
|
|
diffs.append(common.Difference(tf, sf))
|
|
|
|
|
else:
|
|
|
|
|
# Target file data identical to source (may still be renamed)
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
common.ComputeDifferences(diffs)
|
|
|
|
|
|
|
|
|
|
for diff in diffs:
|
|
|
|
|
tf, sf, d = diff.GetPatch()
|
|
|
|
|
path = "/".join(tf.name.split("/")[:-1])
|
|
|
|
|
if d is None or len(d) > tf.size * OPTIONS.patch_threshold or \
|
|
|
|
|
path not in known_paths:
|
|
|
|
|
# patch is almost as big as the file; don't bother patching
|
|
|
|
|
# or a patch + rename cannot take place due to the target
|
|
|
|
|
# directory not existing
|
|
|
|
|
tf.AddToZip(output_zip)
|
|
|
|
|
verbatim_targets.append((tf.name, tf.size))
|
|
|
|
|
if sf.name in renames:
|
|
|
|
|
del renames[sf.name]
|
|
|
|
|
AddToKnownPaths(tf.name, known_paths)
|
|
|
|
|
else:
|
|
|
|
|
common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d)
|
|
|
|
|
patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest()))
|
|
|
|
|
largest_source_size = max(largest_source_size, sf.size)
|
|
|
|
|
|
|
|
|
|
self.largest_source_size = largest_source_size
|
|
|
|
|
|
|
|
|
|
def EmitVerification(self, script):
|
|
|
|
|
so_far = 0
|
|
|
|
|
for tf, sf, size, patch_sha in self.patch_list:
|
|
|
|
|
if tf.name != sf.name:
|
|
|
|
|
script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
|
|
|
|
|
script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1)
|
|
|
|
|
so_far += sf.size
|
|
|
|
|
return so_far
|
|
|
|
|
|
|
|
|
|
def RemoveUnneededFiles(self, script, extras=()):
|
|
|
|
|
script.DeleteFiles(["/"+i[0] for i in self.verbatim_targets] +
|
|
|
|
|
["/"+i for i in sorted(self.source_data)
|
|
|
|
|
if i not in self.target_data and
|
|
|
|
|
i not in self.renames] +
|
|
|
|
|
list(extras))
|
|
|
|
|
|
|
|
|
|
def TotalPatchSize(self):
|
|
|
|
|
return sum(i[1].size for i in self.patch_list)
|
|
|
|
|
|
|
|
|
|
def EmitPatches(self, script, total_patch_size, so_far):
|
|
|
|
|
self.deferred_patch_list = deferred_patch_list = []
|
|
|
|
|
for item in self.patch_list:
|
|
|
|
|
tf, sf, size, _ = item
|
|
|
|
|
if tf.name == "system/build.prop":
|
|
|
|
|
deferred_patch_list.append(item)
|
|
|
|
|
continue
|
|
|
|
|
if (sf.name != tf.name):
|
|
|
|
|
script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
|
|
|
|
|
script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
|
|
|
|
|
so_far += tf.size
|
|
|
|
|
script.SetProgress(so_far / total_patch_size)
|
|
|
|
|
return so_far
|
|
|
|
|
|
|
|
|
|
def EmitDeferredPatches(self, script):
|
|
|
|
|
for item in self.deferred_patch_list:
|
|
|
|
|
tf, sf, size, _ = item
|
|
|
|
|
script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
|
|
|
|
|
script.SetPermissions("/system/build.prop", 0, 0, 0644, None, None)
|
|
|
|
|
|
|
|
|
|
def EmitRenames(self, script):
|
|
|
|
|
if len(self.renames) > 0:
|
|
|
|
|
script.Print("Renaming files...")
|
|
|
|
|
for src, tgt in self.renames.iteritems():
|
|
|
|
|
print "Renaming " + src + " to " + tgt.name
|
|
|
|
|
script.RenameFile(src, tgt.name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
|
|
|
|
|
target_has_recovery_patch = HasRecoveryPatch(target_zip)
|
|
|
|
|
source_has_recovery_patch = HasRecoveryPatch(source_zip)
|
|
|
|
@@ -923,75 +1107,13 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
|
|
|
|
|
metadata=metadata,
|
|
|
|
|
info_dict=OPTIONS.info_dict)
|
|
|
|
|
|
|
|
|
|
print "Loading target..."
|
|
|
|
|
target_data = LoadSystemFiles(target_zip)
|
|
|
|
|
print "Loading source..."
|
|
|
|
|
source_data = LoadSystemFiles(source_zip)
|
|
|
|
|
|
|
|
|
|
verbatim_targets = []
|
|
|
|
|
patch_list = []
|
|
|
|
|
diffs = []
|
|
|
|
|
renames = {}
|
|
|
|
|
known_paths = set()
|
|
|
|
|
largest_source_size = 0
|
|
|
|
|
|
|
|
|
|
matching_file_cache = {}
|
|
|
|
|
for fn, sf in source_data.items():
|
|
|
|
|
assert fn == sf.name
|
|
|
|
|
matching_file_cache["path:" + fn] = sf
|
|
|
|
|
if fn in target_data.keys():
|
|
|
|
|
AddToKnownPaths(fn, known_paths)
|
|
|
|
|
# Only allow eligibility for filename/sha matching
|
|
|
|
|
# if there isn't a perfect path match.
|
|
|
|
|
if target_data.get(sf.name) is None:
|
|
|
|
|
matching_file_cache["file:" + fn.split("/")[-1]] = sf
|
|
|
|
|
matching_file_cache["sha:" + sf.sha1] = sf
|
|
|
|
|
|
|
|
|
|
for fn in sorted(target_data.keys()):
|
|
|
|
|
tf = target_data[fn]
|
|
|
|
|
assert fn == tf.name
|
|
|
|
|
sf = ClosestFileMatch(tf, matching_file_cache, renames)
|
|
|
|
|
if sf is not None and sf.name != tf.name:
|
|
|
|
|
print "File has moved from " + sf.name + " to " + tf.name
|
|
|
|
|
renames[sf.name] = tf
|
|
|
|
|
|
|
|
|
|
if sf is None or fn in OPTIONS.require_verbatim:
|
|
|
|
|
# This file should be included verbatim
|
|
|
|
|
if fn in OPTIONS.prohibit_verbatim:
|
|
|
|
|
raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
|
|
|
|
|
print "send", fn, "verbatim"
|
|
|
|
|
tf.AddToZip(output_zip)
|
|
|
|
|
verbatim_targets.append((fn, tf.size))
|
|
|
|
|
if fn in target_data.keys():
|
|
|
|
|
AddToKnownPaths(fn, known_paths)
|
|
|
|
|
elif tf.sha1 != sf.sha1:
|
|
|
|
|
# File is different; consider sending as a patch
|
|
|
|
|
diffs.append(common.Difference(tf, sf))
|
|
|
|
|
else:
|
|
|
|
|
# Target file data identical to source (may still be renamed)
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
common.ComputeDifferences(diffs)
|
|
|
|
|
|
|
|
|
|
for diff in diffs:
|
|
|
|
|
tf, sf, d = diff.GetPatch()
|
|
|
|
|
path = "/".join(tf.name.split("/")[:-1])
|
|
|
|
|
if d is None or len(d) > tf.size * OPTIONS.patch_threshold or \
|
|
|
|
|
path not in known_paths:
|
|
|
|
|
# patch is almost as big as the file; don't bother patching
|
|
|
|
|
# or a patch + rename cannot take place due to the target
|
|
|
|
|
# directory not existing
|
|
|
|
|
tf.AddToZip(output_zip)
|
|
|
|
|
verbatim_targets.append((tf.name, tf.size))
|
|
|
|
|
if sf.name in renames:
|
|
|
|
|
del renames[sf.name]
|
|
|
|
|
AddToKnownPaths(tf.name, known_paths)
|
|
|
|
|
else:
|
|
|
|
|
common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d)
|
|
|
|
|
patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest()))
|
|
|
|
|
largest_source_size = max(largest_source_size, sf.size)
|
|
|
|
|
|
|
|
|
|
system_diff = FileDifference("system", source_zip, target_zip, output_zip)
|
|
|
|
|
script.Mount("/system")
|
|
|
|
|
if HasVendorPartition(target_zip):
|
|
|
|
|
vendor_diff = FileDifference("vendor", source_zip, target_zip, output_zip)
|
|
|
|
|
script.Mount("/vendor")
|
|
|
|
|
else:
|
|
|
|
|
vendor_diff = None
|
|
|
|
|
|
|
|
|
|
target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.target_info_dict)
|
|
|
|
|
source_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.source_info_dict)
|
|
|
|
@@ -1075,13 +1197,9 @@ else if get_stage("%(bcb_dev)s", "stage") != "3/3" then
|
|
|
|
|
device_specific.IncrementalOTA_VerifyBegin()
|
|
|
|
|
|
|
|
|
|
script.ShowProgress(0.1, 0)
|
|
|
|
|
so_far = 0
|
|
|
|
|
|
|
|
|
|
for tf, sf, size, patch_sha in patch_list:
|
|
|
|
|
if tf.name != sf.name:
|
|
|
|
|
script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
|
|
|
|
|
script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1)
|
|
|
|
|
so_far += sf.size
|
|
|
|
|
so_far = system_diff.EmitVerification(script)
|
|
|
|
|
if vendor_diff:
|
|
|
|
|
so_far += vendor_diff.EmitVerification(script)
|
|
|
|
|
|
|
|
|
|
if updating_boot:
|
|
|
|
|
d = common.Difference(target_boot, source_boot)
|
|
|
|
@@ -1099,8 +1217,12 @@ else if get_stage("%(bcb_dev)s", "stage") != "3/3" then
|
|
|
|
|
target_boot.size, target_boot.sha1))
|
|
|
|
|
so_far += source_boot.size
|
|
|
|
|
|
|
|
|
|
if patch_list or updating_recovery or updating_boot:
|
|
|
|
|
script.CacheFreeSpaceCheck(largest_source_size)
|
|
|
|
|
size = []
|
|
|
|
|
if system_diff.patch_list: size.append(system_diff.largest_source_size)
|
|
|
|
|
if vendor_diff:
|
|
|
|
|
if vendor_diff.patch_list: size.append(vendor_diff.largest_source_size)
|
|
|
|
|
if size or updating_recovery or updating_boot:
|
|
|
|
|
script.CacheFreeSpaceCheck(max(size))
|
|
|
|
|
|
|
|
|
|
device_specific.IncrementalOTA_VerifyEnd()
|
|
|
|
|
|
|
|
|
@@ -1122,30 +1244,22 @@ else
|
|
|
|
|
print "writing full boot image (forced by two-step mode)"
|
|
|
|
|
|
|
|
|
|
script.Print("Removing unneeded files...")
|
|
|
|
|
script.DeleteFiles(["/"+i[0] for i in verbatim_targets] +
|
|
|
|
|
["/"+i for i in sorted(source_data)
|
|
|
|
|
if i not in target_data and
|
|
|
|
|
i not in renames] +
|
|
|
|
|
["/system/recovery.img"])
|
|
|
|
|
system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",))
|
|
|
|
|
if vendor_diff:
|
|
|
|
|
vendor_diff.RemoveUnneededFiles(script)
|
|
|
|
|
|
|
|
|
|
script.ShowProgress(0.8, 0)
|
|
|
|
|
total_patch_size = float(sum([i[1].size for i in patch_list]) + 1)
|
|
|
|
|
total_patch_size = 1.0 + system_diff.TotalPatchSize()
|
|
|
|
|
if vendor_diff:
|
|
|
|
|
total_patch_size += vendor_diff.TotalPatchSize()
|
|
|
|
|
if updating_boot:
|
|
|
|
|
total_patch_size += target_boot.size
|
|
|
|
|
so_far = 0
|
|
|
|
|
|
|
|
|
|
script.Print("Patching system files...")
|
|
|
|
|
deferred_patch_list = []
|
|
|
|
|
for item in patch_list:
|
|
|
|
|
tf, sf, size, _ = item
|
|
|
|
|
if tf.name == "system/build.prop":
|
|
|
|
|
deferred_patch_list.append(item)
|
|
|
|
|
continue
|
|
|
|
|
if (sf.name != tf.name):
|
|
|
|
|
script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
|
|
|
|
|
script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
|
|
|
|
|
so_far += tf.size
|
|
|
|
|
script.SetProgress(so_far / total_patch_size)
|
|
|
|
|
so_far = system_diff.EmitPatches(script, total_patch_size, 0)
|
|
|
|
|
if vendor_diff:
|
|
|
|
|
script.Print("Patching vendor files...")
|
|
|
|
|
so_far = vendor_diff.EmitPatches(script, total_patch_size, so_far)
|
|
|
|
|
|
|
|
|
|
if not OPTIONS.two_step:
|
|
|
|
|
if updating_boot:
|
|
|
|
@@ -1166,6 +1280,10 @@ else
|
|
|
|
|
else:
|
|
|
|
|
print "boot image unchanged; skipping."
|
|
|
|
|
|
|
|
|
|
system_items = ItemSet("system", "META/filesystem_config.txt")
|
|
|
|
|
if vendor_diff:
|
|
|
|
|
vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt")
|
|
|
|
|
|
|
|
|
|
if updating_recovery:
|
|
|
|
|
# Recovery is generated as a patch using both the boot image
|
|
|
|
|
# (which contains the same linux kernel as recovery) and the file
|
|
|
|
@@ -1179,7 +1297,7 @@ else
|
|
|
|
|
if not target_has_recovery_patch:
|
|
|
|
|
def output_sink(fn, data):
|
|
|
|
|
common.ZipWriteStr(output_zip, "recovery/" + fn, data)
|
|
|
|
|
Item.Get("system/" + fn, dir=False)
|
|
|
|
|
system_items.Get("system/" + fn, dir=False)
|
|
|
|
|
|
|
|
|
|
common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink,
|
|
|
|
|
target_recovery, target_boot)
|
|
|
|
@@ -1191,16 +1309,24 @@ else
|
|
|
|
|
|
|
|
|
|
script.ShowProgress(0.1, 10)
|
|
|
|
|
|
|
|
|
|
target_symlinks = CopySystemFiles(target_zip, None)
|
|
|
|
|
target_symlinks = CopyPartitionFiles(system_items, target_zip, None)
|
|
|
|
|
if vendor_diff:
|
|
|
|
|
target_symlinks.extend(CopyPartitionFiles(vendor_items, target_zip, None))
|
|
|
|
|
|
|
|
|
|
temp_script = script.MakeTemporary()
|
|
|
|
|
system_items.GetMetadata(target_zip)
|
|
|
|
|
system_items.Get("system").SetPermissions(temp_script)
|
|
|
|
|
if vendor_diff:
|
|
|
|
|
vendor_items.GetMetadata(target_zip)
|
|
|
|
|
vendor_items.Get("vendor").SetPermissions(temp_script)
|
|
|
|
|
|
|
|
|
|
# Note that this call will mess up the trees of Items, so make sure
|
|
|
|
|
# we're done with them.
|
|
|
|
|
source_symlinks = CopyPartitionFiles(system_items, source_zip, None)
|
|
|
|
|
if vendor_diff:
|
|
|
|
|
source_symlinks.extend(CopyPartitionFiles(vendor_items, source_zip, None))
|
|
|
|
|
|
|
|
|
|
target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks])
|
|
|
|
|
temp_script = script.MakeTemporary()
|
|
|
|
|
Item.GetMetadata(target_zip)
|
|
|
|
|
Item.Get("system").SetPermissions(temp_script)
|
|
|
|
|
|
|
|
|
|
# Note that this call will mess up the tree of Items, so make sure
|
|
|
|
|
# we're done with it.
|
|
|
|
|
source_symlinks = CopySystemFiles(source_zip, None)
|
|
|
|
|
source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks])
|
|
|
|
|
|
|
|
|
|
# Delete all the symlinks in source that aren't in target. This
|
|
|
|
@@ -1212,20 +1338,20 @@ else
|
|
|
|
|
to_delete.append(link)
|
|
|
|
|
script.DeleteFiles(to_delete)
|
|
|
|
|
|
|
|
|
|
if verbatim_targets:
|
|
|
|
|
script.Print("Unpacking new files...")
|
|
|
|
|
if system_diff.verbatim_targets:
|
|
|
|
|
script.Print("Unpacking new system files...")
|
|
|
|
|
script.UnpackPackageDir("system", "/system")
|
|
|
|
|
if vendor_diff and vendor_diff.verbatim_targets:
|
|
|
|
|
script.Print("Unpacking new vendor files...")
|
|
|
|
|
script.UnpackPackageDir("vendor", "/vendor")
|
|
|
|
|
|
|
|
|
|
if updating_recovery and not target_has_recovery_patch:
|
|
|
|
|
script.Print("Unpacking new recovery...")
|
|
|
|
|
script.UnpackPackageDir("recovery", "/system")
|
|
|
|
|
|
|
|
|
|
if len(renames) > 0:
|
|
|
|
|
script.Print("Renaming files...")
|
|
|
|
|
|
|
|
|
|
for src in renames:
|
|
|
|
|
print "Renaming " + src + " to " + renames[src].name
|
|
|
|
|
script.RenameFile(src, renames[src].name)
|
|
|
|
|
system_diff.EmitRenames(script)
|
|
|
|
|
if vendor_diff:
|
|
|
|
|
vendor_diff.EmitRenames(script)
|
|
|
|
|
|
|
|
|
|
script.Print("Symlinks and permissions...")
|
|
|
|
|
|
|
|
|
@@ -1256,10 +1382,7 @@ else
|
|
|
|
|
# device can still come up, it appears to be the old build and will
|
|
|
|
|
# get set the OTA package again to retry.
|
|
|
|
|
script.Print("Patching remaining system files...")
|
|
|
|
|
for item in deferred_patch_list:
|
|
|
|
|
tf, sf, size, _ = item
|
|
|
|
|
script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1, "patch/"+sf.name+".p")
|
|
|
|
|
script.SetPermissions("/system/build.prop", 0, 0, 0644, None, None)
|
|
|
|
|
system_diff.EmitDeferredPatches(script)
|
|
|
|
|
|
|
|
|
|
if OPTIONS.wipe_user_data:
|
|
|
|
|
script.Print("Erasing user data...")
|
|
|
|
|