imgdiff now understands the zip file format and can produce smaller patches by doing the patching in uncompressed space. Start taking advantage of this for .zip, .apk, and .jar files.
827 lines
27 KiB
Python
Executable File
827 lines
27 KiB
Python
Executable File
#!/usr/bin/env python
|
|
#
|
|
# Copyright (C) 2008 The Android Open Source Project
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
"""
|
|
Given a target-files zipfile, produces an OTA package that installs
|
|
that build. An incremental OTA is produced if -i is given, otherwise
|
|
a full OTA is produced.
|
|
|
|
Usage: ota_from_target_files [flags] input_target_files output_ota_package
|
|
|
|
-b (--board_config) <file>
|
|
Deprecated.
|
|
|
|
-k (--package_key) <key>
|
|
Key to use to sign the package (default is
|
|
"build/target/product/security/testkey").
|
|
|
|
-i (--incremental_from) <file>
|
|
Generate an incremental OTA using the given target-files zip as
|
|
the starting build.
|
|
|
|
-w (--wipe_user_data)
|
|
Generate an OTA package that will wipe the user data partition
|
|
when installed.
|
|
|
|
-n (--no_prereq)
|
|
Omit the timestamp prereq check normally included at the top of
|
|
the build scripts (used for developer OTA packages which
|
|
legitimately need to go back and forth).
|
|
|
|
-e (--extra_script) <file>
|
|
Insert the contents of file at the end of the update script.
|
|
|
|
-m (--script_mode) <mode>
|
|
Specify 'amend' or 'edify' scripts, or 'auto' to pick
|
|
automatically (this is the default).
|
|
|
|
"""
|
|
|
|
import sys
|
|
|
|
if sys.hexversion < 0x02040000:
|
|
print >> sys.stderr, "Python 2.4 or newer is required."
|
|
sys.exit(1)
|
|
|
|
import copy
|
|
import os
|
|
import re
|
|
import sha
|
|
import subprocess
|
|
import tempfile
|
|
import time
|
|
import zipfile
|
|
|
|
import common
|
|
import amend_generator
|
|
import edify_generator
|
|
import both_generator
|
|
|
|
OPTIONS = common.OPTIONS
|
|
OPTIONS.package_key = "build/target/product/security/testkey"
|
|
OPTIONS.incremental_source = None
|
|
OPTIONS.require_verbatim = set()
|
|
OPTIONS.prohibit_verbatim = set(("system/build.prop",))
|
|
OPTIONS.patch_threshold = 0.95
|
|
OPTIONS.wipe_user_data = False
|
|
OPTIONS.omit_prereq = False
|
|
OPTIONS.extra_script = None
|
|
OPTIONS.script_mode = 'auto'
|
|
|
|
def MostPopularKey(d, default):
|
|
"""Given a dict, return the key corresponding to the largest
|
|
value. Returns 'default' if the dict is empty."""
|
|
x = [(v, k) for (k, v) in d.iteritems()]
|
|
if not x: return default
|
|
x.sort()
|
|
return x[-1][1]
|
|
|
|
|
|
def IsSymlink(info):
|
|
"""Return true if the zipfile.ZipInfo object passed in represents a
|
|
symlink."""
|
|
return (info.external_attr >> 16) == 0120777
|
|
|
|
|
|
|
|
class Item:
|
|
"""Items represent the metadata (user, group, mode) of files and
|
|
directories in the system image."""
|
|
ITEMS = {}
|
|
def __init__(self, name, dir=False):
|
|
self.name = name
|
|
self.uid = None
|
|
self.gid = None
|
|
self.mode = None
|
|
self.dir = dir
|
|
|
|
if name:
|
|
self.parent = Item.Get(os.path.dirname(name), dir=True)
|
|
self.parent.children.append(self)
|
|
else:
|
|
self.parent = None
|
|
if dir:
|
|
self.children = []
|
|
|
|
def Dump(self, indent=0):
|
|
if self.uid is not None:
|
|
print "%s%s %d %d %o" % (" "*indent, self.name, self.uid, self.gid, self.mode)
|
|
else:
|
|
print "%s%s %s %s %s" % (" "*indent, self.name, self.uid, self.gid, self.mode)
|
|
if self.dir:
|
|
print "%s%s" % (" "*indent, self.descendants)
|
|
print "%s%s" % (" "*indent, self.best_subtree)
|
|
for i in self.children:
|
|
i.Dump(indent=indent+1)
|
|
|
|
@classmethod
|
|
def Get(cls, name, dir=False):
|
|
if name not in cls.ITEMS:
|
|
cls.ITEMS[name] = Item(name, dir=dir)
|
|
return cls.ITEMS[name]
|
|
|
|
@classmethod
|
|
def GetMetadata(cls):
|
|
"""Run the external 'fs_config' program to determine the desired
|
|
uid, gid, and mode for every Item object."""
|
|
p = common.Run(["fs_config"], stdin=subprocess.PIPE,
|
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
suffix = { False: "", True: "/" }
|
|
input = "".join(["%s%s\n" % (i.name, suffix[i.dir])
|
|
for i in cls.ITEMS.itervalues() if i.name])
|
|
output, error = p.communicate(input)
|
|
assert not error
|
|
|
|
for line in output.split("\n"):
|
|
if not line: continue
|
|
name, uid, gid, mode = line.split()
|
|
i = cls.ITEMS[name]
|
|
i.uid = int(uid)
|
|
i.gid = int(gid)
|
|
i.mode = int(mode, 8)
|
|
if i.dir:
|
|
i.children.sort(key=lambda i: i.name)
|
|
|
|
def CountChildMetadata(self):
|
|
"""Count up the (uid, gid, mode) tuples for all children and
|
|
determine the best strategy for using set_perm_recursive and
|
|
set_perm to correctly chown/chmod all the files to their desired
|
|
values. Recursively calls itself for all descendants.
|
|
|
|
Returns a dict of {(uid, gid, dmode, fmode): count} counting up
|
|
all descendants of this node. (dmode or fmode may be None.) Also
|
|
sets the best_subtree of each directory Item to the (uid, gid,
|
|
dmode, fmode) tuple that will match the most descendants of that
|
|
Item.
|
|
"""
|
|
|
|
assert self.dir
|
|
d = self.descendants = {(self.uid, self.gid, self.mode, None): 1}
|
|
for i in self.children:
|
|
if i.dir:
|
|
for k, v in i.CountChildMetadata().iteritems():
|
|
d[k] = d.get(k, 0) + v
|
|
else:
|
|
k = (i.uid, i.gid, None, i.mode)
|
|
d[k] = d.get(k, 0) + 1
|
|
|
|
# Find the (uid, gid, dmode, fmode) tuple that matches the most
|
|
# descendants.
|
|
|
|
# First, find the (uid, gid) pair that matches the most
|
|
# descendants.
|
|
ug = {}
|
|
for (uid, gid, _, _), count in d.iteritems():
|
|
ug[(uid, gid)] = ug.get((uid, gid), 0) + count
|
|
ug = MostPopularKey(ug, (0, 0))
|
|
|
|
# Now find the dmode and fmode that match the most descendants
|
|
# with that (uid, gid), and choose those.
|
|
best_dmode = (0, 0755)
|
|
best_fmode = (0, 0644)
|
|
for k, count in d.iteritems():
|
|
if k[:2] != ug: continue
|
|
if k[2] is not None and count >= best_dmode[0]: best_dmode = (count, k[2])
|
|
if k[3] is not None and count >= best_fmode[0]: best_fmode = (count, k[3])
|
|
self.best_subtree = ug + (best_dmode[1], best_fmode[1])
|
|
|
|
return d
|
|
|
|
def SetPermissions(self, script):
|
|
"""Append set_perm/set_perm_recursive commands to 'script' to
|
|
set all permissions, users, and groups for the tree of files
|
|
rooted at 'self'."""
|
|
|
|
self.CountChildMetadata()
|
|
|
|
def recurse(item, current):
|
|
# current is the (uid, gid, dmode, fmode) tuple that the current
|
|
# item (and all its children) have already been set to. We only
|
|
# need to issue set_perm/set_perm_recursive commands if we're
|
|
# supposed to be something different.
|
|
if item.dir:
|
|
if current != item.best_subtree:
|
|
script.SetPermissionsRecursive("/"+item.name, *item.best_subtree)
|
|
current = item.best_subtree
|
|
|
|
if item.uid != current[0] or item.gid != current[1] or \
|
|
item.mode != current[2]:
|
|
script.SetPermissions("/"+item.name, item.uid, item.gid, item.mode)
|
|
|
|
for i in item.children:
|
|
recurse(i, current)
|
|
else:
|
|
if item.uid != current[0] or item.gid != current[1] or \
|
|
item.mode != current[3]:
|
|
script.SetPermissions("/"+item.name, item.uid, item.gid, item.mode)
|
|
|
|
recurse(self, (-1, -1, -1, -1))
|
|
|
|
|
|
def CopySystemFiles(input_zip, output_zip=None,
|
|
substitute=None):
|
|
"""Copies files underneath system/ in the input zip to the output
|
|
zip. Populates the Item class with their metadata, and returns a
|
|
list of symlinks. output_zip may be None, in which case the copy is
|
|
skipped (but the other side effects still happen). substitute is an
|
|
optional dict of {output filename: contents} to be output instead of
|
|
certain input files.
|
|
"""
|
|
|
|
symlinks = []
|
|
|
|
for info in input_zip.infolist():
|
|
if info.filename.startswith("SYSTEM/"):
|
|
basefilename = info.filename[7:]
|
|
if IsSymlink(info):
|
|
symlinks.append((input_zip.read(info.filename),
|
|
"/system/" + basefilename))
|
|
else:
|
|
info2 = copy.copy(info)
|
|
fn = info2.filename = "system/" + basefilename
|
|
if substitute and fn in substitute and substitute[fn] is None:
|
|
continue
|
|
if output_zip is not None:
|
|
if substitute and fn in substitute:
|
|
data = substitute[fn]
|
|
else:
|
|
data = input_zip.read(info.filename)
|
|
output_zip.writestr(info2, data)
|
|
if fn.endswith("/"):
|
|
Item.Get(fn[:-1], dir=True)
|
|
else:
|
|
Item.Get(fn, dir=False)
|
|
|
|
symlinks.sort()
|
|
return symlinks
|
|
|
|
|
|
def SignOutput(temp_zip_name, output_zip_name):
|
|
key_passwords = common.GetKeyPasswords([OPTIONS.package_key])
|
|
pw = key_passwords[OPTIONS.package_key]
|
|
|
|
common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw,
|
|
whole_file=True)
|
|
|
|
|
|
def AppendAssertions(script, input_zip):
|
|
device = GetBuildProp("ro.product.device", input_zip)
|
|
script.AssertDevice(device)
|
|
|
|
|
|
def MakeRecoveryPatch(output_zip, recovery_img, boot_img):
|
|
"""Generate a binary patch that creates the recovery image starting
|
|
with the boot image. (Most of the space in these images is just the
|
|
kernel, which is identical for the two, so the resulting patch
|
|
should be efficient.) Add it to the output zip, along with a shell
|
|
script that is run from init.rc on first boot to actually do the
|
|
patching and install the new recovery image.
|
|
|
|
recovery_img and boot_img should be File objects for the
|
|
corresponding images.
|
|
|
|
Returns an Item for the shell script, which must be made
|
|
executable.
|
|
"""
|
|
|
|
patch = Difference(recovery_img, boot_img, "imgdiff")
|
|
common.ZipWriteStr(output_zip, "system/recovery-from-boot.p", patch)
|
|
Item.Get("system/recovery-from-boot.p", dir=False)
|
|
|
|
# Images with different content will have a different first page, so
|
|
# we check to see if this recovery has already been installed by
|
|
# testing just the first 2k.
|
|
HEADER_SIZE = 2048
|
|
header_sha1 = sha.sha(recovery_img.data[:HEADER_SIZE]).hexdigest()
|
|
sh = """#!/system/bin/sh
|
|
if ! applypatch -c MTD:recovery:%(header_size)d:%(header_sha1)s; then
|
|
log -t recovery "Installing new recovery image"
|
|
applypatch MTD:boot:%(boot_size)d:%(boot_sha1)s MTD:recovery %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p
|
|
else
|
|
log -t recovery "Recovery image already installed"
|
|
fi
|
|
""" % { 'boot_size': boot_img.size,
|
|
'boot_sha1': boot_img.sha1,
|
|
'header_size': HEADER_SIZE,
|
|
'header_sha1': header_sha1,
|
|
'recovery_size': recovery_img.size,
|
|
'recovery_sha1': recovery_img.sha1 }
|
|
common.ZipWriteStr(output_zip, "system/etc/install-recovery.sh", sh)
|
|
return Item.Get("system/etc/install-recovery.sh", dir=False)
|
|
|
|
|
|
def WriteFullOTAPackage(input_zip, output_zip):
|
|
if OPTIONS.script_mode == "auto":
|
|
script = both_generator.BothGenerator(2)
|
|
elif OPTIONS.script_mode == "amend":
|
|
script = amend_generator.AmendGenerator()
|
|
else:
|
|
# TODO: how to determine this? We don't know what version it will
|
|
# be installed on top of. For now, we expect the API just won't
|
|
# change very often.
|
|
script = edify_generator.EdifyGenerator(2)
|
|
|
|
device_specific = common.DeviceSpecificParams(
|
|
input_zip=input_zip,
|
|
output_zip=output_zip,
|
|
script=script,
|
|
input_tmp=OPTIONS.input_tmp)
|
|
|
|
if not OPTIONS.omit_prereq:
|
|
ts = GetBuildProp("ro.build.date.utc", input_zip)
|
|
script.AssertOlderBuild(ts)
|
|
|
|
AppendAssertions(script, input_zip)
|
|
device_specific.FullOTA_Assertions()
|
|
|
|
script.ShowProgress(0.5, 0)
|
|
|
|
if OPTIONS.wipe_user_data:
|
|
script.FormatPartition("userdata")
|
|
|
|
script.FormatPartition("system")
|
|
script.Mount("MTD", "system", "/system")
|
|
script.UnpackPackageDir("system", "/system")
|
|
|
|
symlinks = CopySystemFiles(input_zip, output_zip)
|
|
script.MakeSymlinks(symlinks)
|
|
|
|
boot_img = File("boot.img", common.BuildBootableImage(
|
|
os.path.join(OPTIONS.input_tmp, "BOOT")))
|
|
recovery_img = File("recovery.img", common.BuildBootableImage(
|
|
os.path.join(OPTIONS.input_tmp, "RECOVERY")))
|
|
i = MakeRecoveryPatch(output_zip, recovery_img, boot_img)
|
|
|
|
Item.GetMetadata()
|
|
|
|
# GetMetadata uses the data in android_filesystem_config.h to assign
|
|
# the uid/gid/mode of all files. We want to override that for the
|
|
# recovery patching shell script to make it executable.
|
|
i.uid = 0
|
|
i.gid = 0
|
|
i.mode = 0544
|
|
Item.Get("system").SetPermissions(script)
|
|
|
|
common.CheckSize(boot_img.data, "boot.img")
|
|
common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
|
|
script.ShowProgress(0.2, 0)
|
|
|
|
script.ShowProgress(0.2, 10)
|
|
script.WriteRawImage("boot", "boot.img")
|
|
|
|
script.ShowProgress(0.1, 0)
|
|
device_specific.FullOTA_InstallEnd()
|
|
|
|
if OPTIONS.extra_script is not None:
|
|
script.AppendExtra(OPTIONS.extra_script)
|
|
|
|
script.AddToZip(input_zip, output_zip)
|
|
|
|
|
|
class File(object):
|
|
def __init__(self, name, data):
|
|
self.name = name
|
|
self.data = data
|
|
self.size = len(data)
|
|
self.sha1 = sha.sha(data).hexdigest()
|
|
|
|
def WriteToTemp(self):
|
|
t = tempfile.NamedTemporaryFile()
|
|
t.write(self.data)
|
|
t.flush()
|
|
return t
|
|
|
|
def AddToZip(self, z):
|
|
common.ZipWriteStr(z, self.name, self.data)
|
|
|
|
|
|
def LoadSystemFiles(z):
|
|
"""Load all the files from SYSTEM/... in a given target-files
|
|
ZipFile, and return a dict of {filename: File object}."""
|
|
out = {}
|
|
for info in z.infolist():
|
|
if info.filename.startswith("SYSTEM/") and not IsSymlink(info):
|
|
fn = "system/" + info.filename[7:]
|
|
data = z.read(info.filename)
|
|
out[fn] = File(fn, data)
|
|
return out
|
|
|
|
|
|
def Difference(tf, sf, diff_program):
|
|
"""Return the patch (as a string of data) needed to turn sf into tf.
|
|
diff_program is the name of an external program (or list, if
|
|
additional arguments are desired) to run to generate the diff.
|
|
"""
|
|
|
|
ttemp = tf.WriteToTemp()
|
|
stemp = sf.WriteToTemp()
|
|
|
|
ext = os.path.splitext(tf.name)[1]
|
|
|
|
try:
|
|
ptemp = tempfile.NamedTemporaryFile()
|
|
if isinstance(diff_program, list):
|
|
cmd = copy.copy(diff_program)
|
|
else:
|
|
cmd = [diff_program]
|
|
cmd.append(stemp.name)
|
|
cmd.append(ttemp.name)
|
|
cmd.append(ptemp.name)
|
|
p = common.Run(cmd)
|
|
_, err = p.communicate()
|
|
if err or p.returncode != 0:
|
|
print "WARNING: failure running %s:\n%s\n" % (diff_program, err)
|
|
return None
|
|
diff = ptemp.read()
|
|
finally:
|
|
ptemp.close()
|
|
stemp.close()
|
|
ttemp.close()
|
|
|
|
return diff
|
|
|
|
|
|
def GetBuildProp(property, z):
|
|
"""Return the fingerprint of the build of a given target-files
|
|
ZipFile object."""
|
|
bp = z.read("SYSTEM/build.prop")
|
|
if not property:
|
|
return bp
|
|
m = re.search(re.escape(property) + r"=(.*)\n", bp)
|
|
if not m:
|
|
raise common.ExternalError("couldn't find %s in build.prop" % (property,))
|
|
return m.group(1).strip()
|
|
|
|
|
|
def GetRecoveryAPIVersion(zip):
|
|
"""Returns the version of the recovery API. Version 0 is the older
|
|
amend code (no separate binary)."""
|
|
try:
|
|
version = zip.read("META/recovery-api-version.txt")
|
|
return int(version)
|
|
except KeyError:
|
|
try:
|
|
# version one didn't have the recovery-api-version.txt file, but
|
|
# it did include an updater binary.
|
|
zip.getinfo("OTA/bin/updater")
|
|
return 1
|
|
except KeyError:
|
|
return 0
|
|
|
|
|
|
DIFF_METHOD_BY_EXT = {
|
|
".gz" : "imgdiff",
|
|
".zip" : ["imgdiff", "-z"],
|
|
".jar" : ["imgdiff", "-z"],
|
|
".apk" : ["imgdiff", "-z"],
|
|
}
|
|
|
|
|
|
def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
|
|
source_version = GetRecoveryAPIVersion(source_zip)
|
|
|
|
if OPTIONS.script_mode == 'amend':
|
|
script = amend_generator.AmendGenerator()
|
|
elif OPTIONS.script_mode == 'edify':
|
|
if source_version == 0:
|
|
print ("WARNING: generating edify script for a source that "
|
|
"can't install it.")
|
|
script = edify_generator.EdifyGenerator(source_version)
|
|
elif OPTIONS.script_mode == 'auto':
|
|
if source_version > 0:
|
|
script = edify_generator.EdifyGenerator(source_version)
|
|
else:
|
|
script = amend_generator.AmendGenerator()
|
|
else:
|
|
raise ValueError('unknown script mode "%s"' % (OPTIONS.script_mode,))
|
|
|
|
device_specific = common.DeviceSpecificParams(
|
|
source_zip=source_zip,
|
|
target_zip=target_zip,
|
|
output_zip=output_zip,
|
|
script=script)
|
|
|
|
print "Loading target..."
|
|
target_data = LoadSystemFiles(target_zip)
|
|
print "Loading source..."
|
|
source_data = LoadSystemFiles(source_zip)
|
|
|
|
verbatim_targets = []
|
|
patch_list = []
|
|
largest_source_size = 0
|
|
for fn in sorted(target_data.keys()):
|
|
tf = target_data[fn]
|
|
sf = source_data.get(fn, None)
|
|
|
|
if sf is None or fn in OPTIONS.require_verbatim:
|
|
# This file should be included verbatim
|
|
if fn in OPTIONS.prohibit_verbatim:
|
|
raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
|
|
print "send", fn, "verbatim"
|
|
tf.AddToZip(output_zip)
|
|
verbatim_targets.append((fn, tf.size))
|
|
elif tf.sha1 != sf.sha1:
|
|
# File is different; consider sending as a patch
|
|
ext = os.path.splitext(tf.name)[1]
|
|
diff_method = DIFF_METHOD_BY_EXT.get(ext, "bsdiff")
|
|
d = Difference(tf, sf, diff_method)
|
|
if d is not None:
|
|
print fn, tf.size, len(d), (float(len(d)) / tf.size)
|
|
if d is None or len(d) > tf.size * OPTIONS.patch_threshold:
|
|
# patch is almost as big as the file; don't bother patching
|
|
tf.AddToZip(output_zip)
|
|
verbatim_targets.append((fn, tf.size))
|
|
else:
|
|
common.ZipWriteStr(output_zip, "patch/" + fn + ".p", d)
|
|
patch_list.append((fn, tf, sf, tf.size))
|
|
largest_source_size = max(largest_source_size, sf.size)
|
|
else:
|
|
# Target file identical to source.
|
|
pass
|
|
|
|
total_verbatim_size = sum([i[1] for i in verbatim_targets])
|
|
total_patched_size = sum([i[3] for i in patch_list])
|
|
|
|
source_fp = GetBuildProp("ro.build.fingerprint", source_zip)
|
|
target_fp = GetBuildProp("ro.build.fingerprint", target_zip)
|
|
|
|
script.Mount("MTD", "system", "/system")
|
|
script.AssertSomeFingerprint(source_fp, target_fp)
|
|
|
|
source_boot = File("/tmp/boot.img",
|
|
common.BuildBootableImage(
|
|
os.path.join(OPTIONS.source_tmp, "BOOT")))
|
|
target_boot = File("/tmp/boot.img",
|
|
common.BuildBootableImage(
|
|
os.path.join(OPTIONS.target_tmp, "BOOT")))
|
|
updating_boot = (source_boot.data != target_boot.data)
|
|
|
|
source_recovery = File("system/recovery.img",
|
|
common.BuildBootableImage(
|
|
os.path.join(OPTIONS.source_tmp, "RECOVERY")))
|
|
target_recovery = File("system/recovery.img",
|
|
common.BuildBootableImage(
|
|
os.path.join(OPTIONS.target_tmp, "RECOVERY")))
|
|
updating_recovery = (source_recovery.data != target_recovery.data)
|
|
|
|
# We reserve the last 0.3 of the progress bar for the
|
|
# device-specific IncrementalOTA_InstallEnd() call at the end, which
|
|
# will typically install a radio image.
|
|
progress_bar_total = 0.7
|
|
if updating_boot:
|
|
progress_bar_total -= 0.1
|
|
|
|
AppendAssertions(script, target_zip)
|
|
device_specific.IncrementalOTA_Assertions()
|
|
|
|
script.Print("Verifying current system...")
|
|
|
|
pb_verify = progress_bar_total * 0.3 * \
|
|
(total_patched_size /
|
|
float(total_patched_size+total_verbatim_size+1))
|
|
|
|
for i, (fn, tf, sf, size) in enumerate(patch_list):
|
|
if i % 5 == 0:
|
|
next_sizes = sum([i[3] for i in patch_list[i:i+5]])
|
|
script.ShowProgress(next_sizes * pb_verify / (total_patched_size+1), 1)
|
|
|
|
script.PatchCheck("/"+fn, tf.sha1, sf.sha1)
|
|
|
|
if updating_boot:
|
|
d = Difference(target_boot, source_boot, "imgdiff")
|
|
print "boot target: %d source: %d diff: %d" % (
|
|
target_boot.size, source_boot.size, len(d))
|
|
|
|
common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
|
|
|
|
script.PatchCheck("MTD:boot:%d:%s:%d:%s" %
|
|
(source_boot.size, source_boot.sha1,
|
|
target_boot.size, target_boot.sha1))
|
|
|
|
if patch_list or updating_recovery or updating_boot:
|
|
script.CacheFreeSpaceCheck(largest_source_size)
|
|
script.Print("Unpacking patches...")
|
|
script.UnpackPackageDir("patch", "/tmp/patchtmp")
|
|
|
|
device_specific.IncrementalOTA_VerifyEnd()
|
|
|
|
script.Comment("---- start making changes here ----")
|
|
|
|
if OPTIONS.wipe_user_data:
|
|
script.Print("Erasing user data...")
|
|
script.FormatPartition("userdata")
|
|
|
|
script.Print("Removing unneeded files...")
|
|
script.DeleteFiles(["/"+i[0] for i in verbatim_targets] +
|
|
["/"+i for i in sorted(source_data)
|
|
if i not in target_data] +
|
|
["/system/recovery.img"])
|
|
|
|
if updating_boot:
|
|
# Produce the boot image by applying a patch to the current
|
|
# contents of the boot partition, and write it back to the
|
|
# partition.
|
|
script.Print("Patching boot image...")
|
|
script.ApplyPatch("MTD:boot:%d:%s:%d:%s"
|
|
% (source_boot.size, source_boot.sha1,
|
|
target_boot.size, target_boot.sha1),
|
|
"-",
|
|
target_boot.size, target_boot.sha1,
|
|
source_boot.sha1, "/tmp/patchtmp/boot.img.p")
|
|
print "boot image changed; including."
|
|
else:
|
|
print "boot image unchanged; skipping."
|
|
|
|
if updating_recovery:
|
|
# Is it better to generate recovery as a patch from the current
|
|
# boot image, or from the previous recovery image? For large
|
|
# updates with significant kernel changes, probably the former.
|
|
# For small updates where the kernel hasn't changed, almost
|
|
# certainly the latter. We pick the first option. Future
|
|
# complicated schemes may let us effectively use both.
|
|
#
|
|
# A wacky possibility: as long as there is room in the boot
|
|
# partition, include the binaries and image files from recovery in
|
|
# the boot image (though not in the ramdisk) so they can be used
|
|
# as fodder for constructing the recovery image.
|
|
recovery_sh_item = MakeRecoveryPatch(output_zip,
|
|
target_recovery, target_boot)
|
|
print "recovery image changed; including as patch from boot."
|
|
else:
|
|
print "recovery image unchanged; skipping."
|
|
|
|
script.Print("Patching system files...")
|
|
pb_apply = progress_bar_total * 0.7 * \
|
|
(total_patched_size /
|
|
float(total_patched_size+total_verbatim_size+1))
|
|
for i, (fn, tf, sf, size) in enumerate(patch_list):
|
|
if i % 5 == 0:
|
|
next_sizes = sum([i[3] for i in patch_list[i:i+5]])
|
|
script.ShowProgress(next_sizes * pb_apply / (total_patched_size+1), 1)
|
|
script.ApplyPatch("/"+fn, "-", tf.size, tf.sha1,
|
|
sf.sha1, "/tmp/patchtmp/"+fn+".p")
|
|
|
|
target_symlinks = CopySystemFiles(target_zip, None)
|
|
|
|
target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks])
|
|
temp_script = script.MakeTemporary()
|
|
Item.GetMetadata()
|
|
if updating_recovery:
|
|
recovery_sh_item.uid = 0
|
|
recovery_sh_item.gid = 0
|
|
recovery_sh_item.mode = 0544
|
|
Item.Get("system").SetPermissions(temp_script)
|
|
|
|
# Note that this call will mess up the tree of Items, so make sure
|
|
# we're done with it.
|
|
source_symlinks = CopySystemFiles(source_zip, None)
|
|
source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks])
|
|
|
|
# Delete all the symlinks in source that aren't in target. This
|
|
# needs to happen before verbatim files are unpacked, in case a
|
|
# symlink in the source is replaced by a real file in the target.
|
|
to_delete = []
|
|
for dest, link in source_symlinks:
|
|
if link not in target_symlinks_d:
|
|
to_delete.append(link)
|
|
script.DeleteFiles(to_delete)
|
|
|
|
if verbatim_targets:
|
|
pb_verbatim = progress_bar_total * \
|
|
(total_verbatim_size /
|
|
float(total_patched_size+total_verbatim_size+1))
|
|
script.ShowProgress(pb_verbatim, 5)
|
|
script.Print("Unpacking new files...")
|
|
script.UnpackPackageDir("system", "/system")
|
|
|
|
script.Print("Symlinks and permissions...")
|
|
|
|
# Create all the symlinks that don't already exist, or point to
|
|
# somewhere different than what we want. Delete each symlink before
|
|
# creating it, since the 'symlink' command won't overwrite.
|
|
to_create = []
|
|
for dest, link in target_symlinks:
|
|
if link in source_symlinks_d:
|
|
if dest != source_symlinks_d[link]:
|
|
to_create.append((dest, link))
|
|
else:
|
|
to_create.append((dest, link))
|
|
script.DeleteFiles([i[1] for i in to_create])
|
|
script.MakeSymlinks(to_create)
|
|
|
|
# Now that the symlinks are created, we can set all the
|
|
# permissions.
|
|
script.AppendScript(temp_script)
|
|
|
|
# Write the radio image, if necessary.
|
|
script.ShowProgress(0.3, 10)
|
|
device_specific.IncrementalOTA_InstallEnd()
|
|
|
|
if OPTIONS.extra_script is not None:
|
|
scirpt.AppendExtra(OPTIONS.extra_script)
|
|
|
|
script.AddToZip(target_zip, output_zip)
|
|
|
|
|
|
def main(argv):
|
|
|
|
def option_handler(o, a):
|
|
if o in ("-b", "--board_config"):
|
|
pass # deprecated
|
|
elif o in ("-k", "--package_key"):
|
|
OPTIONS.package_key = a
|
|
elif o in ("-i", "--incremental_from"):
|
|
OPTIONS.incremental_source = a
|
|
elif o in ("-w", "--wipe_user_data"):
|
|
OPTIONS.wipe_user_data = True
|
|
elif o in ("-n", "--no_prereq"):
|
|
OPTIONS.omit_prereq = True
|
|
elif o in ("-e", "--extra_script"):
|
|
OPTIONS.extra_script = a
|
|
elif o in ("-m", "--script_mode"):
|
|
OPTIONS.script_mode = a
|
|
else:
|
|
return False
|
|
return True
|
|
|
|
args = common.ParseOptions(argv, __doc__,
|
|
extra_opts="b:k:i:d:wne:m:",
|
|
extra_long_opts=["board_config=",
|
|
"package_key=",
|
|
"incremental_from=",
|
|
"wipe_user_data",
|
|
"no_prereq",
|
|
"extra_script=",
|
|
"script_mode="],
|
|
extra_option_handler=option_handler)
|
|
|
|
if len(args) != 2:
|
|
common.Usage(__doc__)
|
|
sys.exit(1)
|
|
|
|
if OPTIONS.script_mode not in ("amend", "edify", "auto"):
|
|
raise ValueError('unknown script mode "%s"' % (OPTIONS.script_mode,))
|
|
|
|
if OPTIONS.extra_script is not None:
|
|
OPTIONS.extra_script = open(OPTIONS.extra_script).read()
|
|
|
|
print "unzipping target target-files..."
|
|
OPTIONS.input_tmp = common.UnzipTemp(args[0])
|
|
|
|
common.LoadMaxSizes()
|
|
if not OPTIONS.max_image_size:
|
|
print
|
|
print " WARNING: Failed to load max image sizes; will not enforce"
|
|
print " image size limits."
|
|
print
|
|
|
|
OPTIONS.target_tmp = OPTIONS.input_tmp
|
|
input_zip = zipfile.ZipFile(args[0], "r")
|
|
if OPTIONS.package_key:
|
|
temp_zip_file = tempfile.NamedTemporaryFile()
|
|
output_zip = zipfile.ZipFile(temp_zip_file, "w",
|
|
compression=zipfile.ZIP_DEFLATED)
|
|
else:
|
|
output_zip = zipfile.ZipFile(args[1], "w",
|
|
compression=zipfile.ZIP_DEFLATED)
|
|
|
|
if OPTIONS.incremental_source is None:
|
|
WriteFullOTAPackage(input_zip, output_zip)
|
|
else:
|
|
print "unzipping source target-files..."
|
|
OPTIONS.source_tmp = common.UnzipTemp(OPTIONS.incremental_source)
|
|
source_zip = zipfile.ZipFile(OPTIONS.incremental_source, "r")
|
|
WriteIncrementalOTAPackage(input_zip, source_zip, output_zip)
|
|
|
|
output_zip.close()
|
|
if OPTIONS.package_key:
|
|
SignOutput(temp_zip_file.name, args[1])
|
|
temp_zip_file.close()
|
|
|
|
common.Cleanup()
|
|
|
|
print "done."
|
|
|
|
|
|
if __name__ == '__main__':
|
|
try:
|
|
main(sys.argv[1:])
|
|
except common.ExternalError, e:
|
|
print
|
|
print " ERROR: %s" % (e,)
|
|
print
|
|
sys.exit(1)
|