Merge korg/donut into korg/master

Conflicts:

	core/apicheck_msg_current.txt
	core/combo/linux-arm.mk
	core/prelink-linux-arm.map
	tools/droiddoc/src/ClassInfo.java
This commit is contained in:
Jean-Baptiste Queru
2009-07-25 16:58:22 -07:00
68 changed files with 3580 additions and 1091 deletions

View File

@@ -12,18 +12,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
ifneq ($(TARGET_SIMULATOR),true)
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
ifneq ($(TARGET_SIMULATOR),true)
LOCAL_SRC_FILES := applypatch.c bsdiff.c freecache.c imgpatch.c
LOCAL_MODULE := libapplypatch
LOCAL_MODULE_TAGS := eng
LOCAL_C_INCLUDES += external/bzip2 external/zlib bootable/recovery
LOCAL_STATIC_LIBRARIES += libmtdutils libmincrypt libbz libz
LOCAL_SRC_FILES := applypatch.c bsdiff.c freecache.c
include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := main.c
LOCAL_MODULE := applypatch
LOCAL_FORCE_STATIC_EXECUTABLE := true
LOCAL_MODULE_TAGS := eng
LOCAL_C_INCLUDES += external/bzip2
LOCAL_STATIC_LIBRARIES += libmincrypt libbz libc
LOCAL_STATIC_LIBRARIES += libapplypatch
LOCAL_STATIC_LIBRARIES += libmtdutils libmincrypt libbz libz
LOCAL_STATIC_LIBRARIES += libcutils libstdc++ libc
include $(BUILD_EXECUTABLE)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := imgdiff.c
LOCAL_MODULE := imgdiff
LOCAL_FORCE_STATIC_EXECUTABLE := true
LOCAL_MODULE_TAGS := eng
LOCAL_C_INCLUDES += external/zlib
LOCAL_STATIC_LIBRARIES += libz
include $(BUILD_HOST_EXECUTABLE)
endif # !TARGET_SIMULATOR

View File

@@ -25,12 +25,25 @@
#include "mincrypt/sha.h"
#include "applypatch.h"
#include "mtdutils/mtdutils.h"
int SaveFileContents(const char* filename, FileContents file);
int LoadMTDContents(const char* filename, FileContents* file);
int ParseSha1(const char* str, uint8_t* digest);
static int mtd_partitions_scanned = 0;
// Read a file into memory; store it and its associated metadata in
// *file. Return 0 on success.
int LoadFileContents(const char* filename, FileContents* file) {
file->data = NULL;
// A special 'filename' beginning with "MTD:" means to load the
// contents of an MTD partition.
if (strncmp(filename, "MTD:", 4) == 0) {
return LoadMTDContents(filename, file);
}
if (stat(filename, &file->st) != 0) {
fprintf(stderr, "failed to stat \"%s\": %s\n", filename, strerror(errno));
return -1;
@@ -43,6 +56,7 @@ int LoadFileContents(const char* filename, FileContents* file) {
if (f == NULL) {
fprintf(stderr, "failed to open \"%s\": %s\n", filename, strerror(errno));
free(file->data);
file->data = NULL;
return -1;
}
@@ -51,6 +65,7 @@ int LoadFileContents(const char* filename, FileContents* file) {
fprintf(stderr, "short read of \"%s\" (%d bytes of %d)\n",
filename, bytes_read, file->size);
free(file->data);
file->data = NULL;
return -1;
}
fclose(f);
@@ -59,6 +74,182 @@ int LoadFileContents(const char* filename, FileContents* file) {
return 0;
}
static size_t* size_array;
// comparison function for qsort()ing an int array of indexes into
// size_array[].
static int compare_size_indices(const void* a, const void* b) {
int aa = *(int*)a;
int bb = *(int*)b;
if (size_array[aa] < size_array[bb]) {
return -1;
} else if (size_array[aa] > size_array[bb]) {
return 1;
} else {
return 0;
}
}
// Load the contents of an MTD partition into the provided
// FileContents. filename should be a string of the form
// "MTD:<partition_name>:<size_1>:<sha1_1>:<size_2>:<sha1_2>:...".
// The smallest size_n bytes for which that prefix of the mtd contents
// has the corresponding sha1 hash will be loaded. It is acceptable
// for a size value to be repeated with different sha1s. Will return
// 0 on success.
//
// This complexity is needed because if an OTA installation is
// interrupted, the partition might contain either the source or the
// target data, which might be of different lengths. We need to know
// the length in order to read from MTD (there is no "end-of-file"
// marker), so the caller must specify the possible lengths and the
// hash of the data, and we'll do the load expecting to find one of
// those hashes.
int LoadMTDContents(const char* filename, FileContents* file) {
char* copy = strdup(filename);
const char* magic = strtok(copy, ":");
if (strcmp(magic, "MTD") != 0) {
fprintf(stderr, "LoadMTDContents called with bad filename (%s)\n",
filename);
return -1;
}
const char* partition = strtok(NULL, ":");
int i;
int colons = 0;
for (i = 0; filename[i] != '\0'; ++i) {
if (filename[i] == ':') {
++colons;
}
}
if (colons < 3 || colons%2 == 0) {
fprintf(stderr, "LoadMTDContents called with bad filename (%s)\n",
filename);
}
int pairs = (colons-1)/2; // # of (size,sha1) pairs in filename
int* index = malloc(pairs * sizeof(int));
size_t* size = malloc(pairs * sizeof(size_t));
char** sha1sum = malloc(pairs * sizeof(char*));
for (i = 0; i < pairs; ++i) {
const char* size_str = strtok(NULL, ":");
size[i] = strtol(size_str, NULL, 10);
if (size[i] == 0) {
fprintf(stderr, "LoadMTDContents called with bad size (%s)\n", filename);
return -1;
}
sha1sum[i] = strtok(NULL, ":");
index[i] = i;
}
// sort the index[] array so it indexes the pairs in order of
// increasing size.
size_array = size;
qsort(index, pairs, sizeof(int), compare_size_indices);
if (!mtd_partitions_scanned) {
mtd_scan_partitions();
mtd_partitions_scanned = 1;
}
const MtdPartition* mtd = mtd_find_partition_by_name(partition);
if (mtd == NULL) {
fprintf(stderr, "mtd partition \"%s\" not found (loading %s)\n",
partition, filename);
return -1;
}
MtdReadContext* ctx = mtd_read_partition(mtd);
if (ctx == NULL) {
fprintf(stderr, "failed to initialize read of mtd partition \"%s\"\n",
partition);
return -1;
}
SHA_CTX sha_ctx;
SHA_init(&sha_ctx);
uint8_t parsed_sha[SHA_DIGEST_SIZE];
// allocate enough memory to hold the largest size.
file->data = malloc(size[index[pairs-1]]);
char* p = (char*)file->data;
file->size = 0; // # bytes read so far
for (i = 0; i < pairs; ++i) {
// Read enough additional bytes to get us up to the next size
// (again, we're trying the possibilities in order of increasing
// size).
size_t next = size[index[i]] - file->size;
size_t read = 0;
if (next > 0) {
read = mtd_read_data(ctx, p, next);
if (next != read) {
fprintf(stderr, "short read (%d bytes of %d) for partition \"%s\"\n",
read, next, partition);
free(file->data);
file->data = NULL;
return -1;
}
SHA_update(&sha_ctx, p, read);
file->size += read;
}
// Duplicate the SHA context and finalize the duplicate so we can
// check it against this pair's expected hash.
SHA_CTX temp_ctx;
memcpy(&temp_ctx, &sha_ctx, sizeof(SHA_CTX));
const uint8_t* sha_so_far = SHA_final(&temp_ctx);
if (ParseSha1(sha1sum[index[i]], parsed_sha) != 0) {
fprintf(stderr, "failed to parse sha1 %s in %s\n",
sha1sum[index[i]], filename);
free(file->data);
file->data = NULL;
return -1;
}
if (memcmp(sha_so_far, parsed_sha, SHA_DIGEST_SIZE) == 0) {
// we have a match. stop reading the partition; we'll return
// the data we've read so far.
printf("mtd read matched size %d sha %s\n",
size[index[i]], sha1sum[index[i]]);
break;
}
p += read;
}
mtd_read_close(ctx);
if (i == pairs) {
// Ran off the end of the list of (size,sha1) pairs without
// finding a match.
fprintf(stderr, "contents of MTD partition \"%s\" didn't match %s\n",
partition, filename);
free(file->data);
file->data = NULL;
return -1;
}
const uint8_t* sha_final = SHA_final(&sha_ctx);
for (i = 0; i < SHA_DIGEST_SIZE; ++i) {
file->sha1[i] = sha_final[i];
}
// Fake some stat() info.
file->st.st_mode = 0644;
file->st.st_uid = 0;
file->st.st_gid = 0;
free(copy);
free(index);
free(size);
free(sha1sum);
return 0;
}
// Save the contents of the given FileContents object under the given
// filename. Return 0 on success.
int SaveFileContents(const char* filename, FileContents file) {
@@ -91,6 +282,76 @@ int SaveFileContents(const char* filename, FileContents file) {
return 0;
}
// Copy the contents of source_file to target_mtd partition, a string
// of the form "MTD:<partition>[:...]". Return 0 on success.
int CopyToMTDPartition(const char* source_file, const char* target_mtd) {
char* partition = strchr(target_mtd, ':');
if (partition == NULL) {
fprintf(stderr, "bad MTD target name \"%s\"\n", target_mtd);
return -1;
}
++partition;
// Trim off anything after a colon, eg "MTD:boot:blah:blah:blah...".
// We want just the partition name "boot".
partition = strdup(partition);
char* end = strchr(partition, ':');
if (end != NULL)
*end = '\0';
FILE* f = fopen(source_file, "rb");
if (f == NULL) {
fprintf(stderr, "failed to open %s for reading: %s\n",
source_file, strerror(errno));
return -1;
}
if (!mtd_partitions_scanned) {
mtd_scan_partitions();
mtd_partitions_scanned = 1;
}
const MtdPartition* mtd = mtd_find_partition_by_name(partition);
if (mtd == NULL) {
fprintf(stderr, "mtd partition \"%s\" not found for writing\n", partition);
return -1;
}
MtdWriteContext* ctx = mtd_write_partition(mtd);
if (ctx == NULL) {
fprintf(stderr, "failed to init mtd partition \"%s\" for writing\n",
partition);
return -1;
}
const int buffer_size = 4096;
char buffer[buffer_size];
size_t read;
while ((read = fread(buffer, 1, buffer_size, f)) > 0) {
size_t written = mtd_write_data(ctx, buffer, read);
if (written != read) {
fprintf(stderr, "only wrote %d of %d bytes to MTD %s\n",
written, read, partition);
mtd_write_close(ctx);
return -1;
}
}
fclose(f);
if (mtd_erase_blocks(ctx, -1) < 0) {
fprintf(stderr, "error finishing mtd write of %s\n", partition);
mtd_write_close(ctx);
return -1;
}
if (mtd_write_close(ctx)) {
fprintf(stderr, "error closing mtd write of %s\n", partition);
return -1;
}
free(partition);
return 0;
}
// Take a string 'str' of 40 hex digits and parse it into the 20
// byte array 'digest'. 'str' may contain only the digest or be of
@@ -176,8 +437,13 @@ int CheckMode(int argc, char** argv) {
FileContents file;
file.data = NULL;
// It's okay to specify no sha1s; the check will pass if the
// LoadFileContents is successful. (Useful for reading MTD
// partitions, where the filename encodes the sha1s; no need to
// check them twice.)
if (LoadFileContents(argv[2], &file) != 0 ||
FindMatchingPatch(file.sha1, patches, num_patches) == NULL) {
(num_patches > 0 &&
FindMatchingPatch(file.sha1, patches, num_patches) == NULL)) {
fprintf(stderr, "file \"%s\" doesn't have any of expected "
"sha1 sums; checking cache\n", argv[2]);
@@ -226,27 +492,57 @@ size_t FreeSpaceForFile(const char* filename) {
// replacement for it) and idempotent (it's okay to run this program
// multiple times).
//
// - if the sha1 hash of <file> is <tgt-sha1>, does nothing and exits
// - if the sha1 hash of <tgt-file> is <tgt-sha1>, does nothing and exits
// successfully.
//
// - otherwise, if the sha1 hash of <file> is <src-sha1>, applies the
// bsdiff <patch> to <file> to produce a new file (the type of patch
// - otherwise, if the sha1 hash of <src-file> is <src-sha1>, applies the
// bsdiff <patch> to <src-file> to produce a new file (the type of patch
// is automatically detected from the file header). If that new
// file has sha1 hash <tgt-sha1>, moves it to replace <file>, and
// exits successfully.
// file has sha1 hash <tgt-sha1>, moves it to replace <tgt-file>, and
// exits successfully. Note that if <src-file> and <tgt-file> are
// not the same, <src-file> is NOT deleted on success. <tgt-file>
// may be the string "-" to mean "the same as src-file".
//
// - otherwise, or if any error is encountered, exits with non-zero
// status.
//
// <src-file> (or <file> in check mode) may refer to an MTD partition
// to read the source data. See the comments for the
// LoadMTDContents() function above for the format of such a filename.
//
//
// As you might guess from the arguments, this function used to be
// main(); it was split out this way so applypatch could be built as a
// static library and linked into other executables as well. In the
// future only the library form will exist; we will not need to build
// this as a standalone executable.
//
// The arguments to this function are just the command-line of the
// standalone executable:
//
// <src-file> <tgt-file> <tgt-sha1> <tgt-size> [<src-sha1>:<patch> ...]
// to apply a patch. Returns 0 on success, 1 on failure.
//
// "-c" <file> [<sha1> ...]
// to check a file's contents against zero or more sha1s. Returns
// 0 if it matches any of them, 1 if it doesn't.
//
// "-s" <bytes>
// returns 0 if enough free space is available on /cache; 1 if it
// does not.
//
// "-l"
// shows open-source license information and returns 0.
//
// This function returns 2 if the arguments are not understood (in the
// standalone executable, this causes the usage message to be
// printed).
//
// TODO: make the interface more sensible for use as a library.
int main(int argc, char** argv) {
int applypatch(int argc, char** argv) {
if (argc < 2) {
usage:
fprintf(stderr, "usage: %s <file> <tgt-sha1> <tgt-size> [<src-sha1>:<patch> ...]\n"
" or %s -c <file> [<sha1> ...]\n"
" or %s -s <bytes>\n"
" or %s -l\n",
argv[0], argv[0], argv[0], argv[0]);
return 1;
return 2;
}
if (strncmp(argv[1], "-l", 3) == 0) {
@@ -259,7 +555,7 @@ int main(int argc, char** argv) {
if (strncmp(argv[1], "-s", 3) == 0) {
if (argc != 3) {
goto usage;
return 2;
}
size_t bytes = strtol(argv[2], NULL, 10);
if (MakeFreeSpaceOnCache(bytes) < 0) {
@@ -273,26 +569,22 @@ int main(int argc, char** argv) {
uint8_t target_sha1[SHA_DIGEST_SIZE];
const char* source_filename = argv[1];
// assume that source_filename (eg "/system/app/Foo.apk") is located
// on the same filesystem as its top-level directory ("/system").
// We need something that exists for calling statfs().
char* source_fs = strdup(argv[1]);
char* slash = strchr(source_fs+1, '/');
if (slash != NULL) {
*slash = '\0';
const char* target_filename = argv[2];
if (target_filename[0] == '-' &&
target_filename[1] == '\0') {
target_filename = source_filename;
}
if (ParseSha1(argv[2], target_sha1) != 0) {
fprintf(stderr, "failed to parse tgt-sha1 \"%s\"\n", argv[2]);
if (ParseSha1(argv[3], target_sha1) != 0) {
fprintf(stderr, "failed to parse tgt-sha1 \"%s\"\n", argv[3]);
return 1;
}
unsigned long target_size = strtoul(argv[3], NULL, 0);
unsigned long target_size = strtoul(argv[4], NULL, 0);
int num_patches;
Patch* patches;
if (ParseShaArgs(argc-4, argv+4, &patches, &num_patches) < 0) { return 1; }
if (ParseShaArgs(argc-5, argv+5, &patches, &num_patches) < 0) { return 1; }
FileContents copy_file;
FileContents source_file;
@@ -300,15 +592,27 @@ int main(int argc, char** argv) {
const char* copy_patch_filename = NULL;
int made_copy = 0;
if (LoadFileContents(source_filename, &source_file) == 0) {
// We try to load the target file into the source_file object.
if (LoadFileContents(target_filename, &source_file) == 0) {
if (memcmp(source_file.sha1, target_sha1, SHA_DIGEST_SIZE) == 0) {
// The early-exit case: the patch was already applied, this file
// has the desired hash, nothing for us to do.
fprintf(stderr, "\"%s\" is already target; no patch needed\n",
source_filename);
target_filename);
return 0;
}
}
if (source_file.data == NULL ||
(target_filename != source_filename &&
strcmp(target_filename, source_filename) != 0)) {
// Need to load the source file: either we failed to load the
// target file, or we did but it's different from the source file.
free(source_file.data);
LoadFileContents(source_filename, &source_file);
}
if (source_file.data != NULL) {
const Patch* to_use =
FindMatchingPatch(source_file.sha1, patches, num_patches);
if (to_use != NULL) {
@@ -339,30 +643,70 @@ int main(int argc, char** argv) {
}
}
// Is there enough room in the target filesystem to hold the patched file?
size_t free_space = FreeSpaceForFile(source_fs);
int enough_space = free_space > (target_size * 3 / 2); // 50% margin of error
printf("target %ld bytes; free space %ld bytes; enough %d\n",
(long)target_size, (long)free_space, enough_space);
// Is there enough room in the target filesystem to hold the patched
// file?
if (!enough_space && source_patch_filename != NULL) {
// Using the original source, but not enough free space. First
// copy the source file to cache, then delete it from the original
// location.
if (strncmp(target_filename, "MTD:", 4) == 0) {
// If the target is an MTD partition, we're actually going to
// write the output to /tmp and then copy it to the partition.
// statfs() always returns 0 blocks free for /tmp, so instead
// we'll just assume that /tmp has enough space to hold the file.
// We still write the original source to cache, in case the MTD
// write is interrupted.
if (MakeFreeSpaceOnCache(source_file.size) < 0) {
fprintf(stderr, "not enough free space on /cache\n");
return 1;
}
if (SaveFileContents(CACHE_TEMP_SOURCE, source_file) < 0) {
fprintf(stderr, "failed to back up source file\n");
return 1;
}
made_copy = 1;
unlink(source_filename);
} else {
// assume that target_filename (eg "/system/app/Foo.apk") is located
// on the same filesystem as its top-level directory ("/system").
// We need something that exists for calling statfs().
char* target_fs = strdup(target_filename);
char* slash = strchr(target_fs+1, '/');
if (slash != NULL) {
*slash = '\0';
}
size_t free_space = FreeSpaceForFile(source_fs);
printf("(now %ld bytes free for source)\n", (long)free_space);
size_t free_space = FreeSpaceForFile(target_fs);
int enough_space =
free_space > (target_size * 3 / 2); // 50% margin of error
printf("target %ld bytes; free space %ld bytes; enough %d\n",
(long)target_size, (long)free_space, enough_space);
if (!enough_space && source_patch_filename != NULL) {
// Using the original source, but not enough free space. First
// copy the source file to cache, then delete it from the original
// location.
if (strncmp(source_filename, "MTD:", 4) == 0) {
// It's impossible to free space on the target filesystem by
// deleting the source if the source is an MTD partition. If
// we're ever in a state where we need to do this, fail.
fprintf(stderr, "not enough free space for target but source is MTD\n");
return 1;
}
if (MakeFreeSpaceOnCache(source_file.size) < 0) {
fprintf(stderr, "not enough free space on /cache\n");
return 1;
}
if (SaveFileContents(CACHE_TEMP_SOURCE, source_file) < 0) {
fprintf(stderr, "failed to back up source file\n");
return 1;
}
made_copy = 1;
unlink(source_filename);
size_t free_space = FreeSpaceForFile(target_fs);
printf("(now %ld bytes free for target)\n", (long)free_space);
}
}
FileContents* source_to_use;
@@ -375,14 +719,19 @@ int main(int argc, char** argv) {
patch_filename = copy_patch_filename;
}
// We write the decoded output to "<file>.patch".
char* outname = (char*)malloc(strlen(source_filename) + 10);
strcpy(outname, source_filename);
strcat(outname, ".patch");
char* outname = NULL;
if (strncmp(target_filename, "MTD:", 4) == 0) {
outname = MTD_TARGET_TEMP_FILE;
} else {
// We write the decoded output to "<tgt-file>.patch".
outname = (char*)malloc(strlen(target_filename) + 10);
strcpy(outname, target_filename);
strcat(outname, ".patch");
}
FILE* output = fopen(outname, "wb");
if (output == NULL) {
fprintf(stderr, "failed to patch file %s: %s\n",
source_filename, strerror(errno));
fprintf(stderr, "failed to open output file %s: %s\n",
outname, strerror(errno));
return 1;
}
@@ -410,11 +759,19 @@ int main(int argc, char** argv) {
} else if (header_bytes_read >= 8 &&
memcmp(header, "BSDIFF40", 8) == 0) {
int result = ApplyBSDiffPatch(source_to_use->data, source_to_use->size,
patch_filename, output, &ctx);
patch_filename, 0, output, &ctx);
if (result != 0) {
fprintf(stderr, "ApplyBSDiffPatch failed\n");
return result;
}
} else if (header_bytes_read >= 8 &&
memcmp(header, "IMGDIFF1", 8) == 0) {
int result = ApplyImagePatch(source_to_use->data, source_to_use->size,
patch_filename, output, &ctx);
if (result != 0) {
fprintf(stderr, "ApplyImagePatch failed\n");
return result;
}
} else {
fprintf(stderr, "Unknown patch file format");
return 1;
@@ -430,22 +787,32 @@ int main(int argc, char** argv) {
return 1;
}
// Give the .patch file the same owner, group, and mode of the
// original source file.
if (chmod(outname, source_to_use->st.st_mode) != 0) {
fprintf(stderr, "chmod of \"%s\" failed: %s\n", outname, strerror(errno));
return 1;
}
if (chown(outname, source_to_use->st.st_uid, source_to_use->st.st_gid) != 0) {
fprintf(stderr, "chown of \"%s\" failed: %s\n", outname, strerror(errno));
return 1;
}
if (strcmp(outname, MTD_TARGET_TEMP_FILE) == 0) {
// Copy the temp file to the MTD partition.
if (CopyToMTDPartition(outname, target_filename) != 0) {
fprintf(stderr, "copy of %s to %s failed\n", outname, target_filename);
return 1;
}
unlink(outname);
} else {
// Give the .patch file the same owner, group, and mode of the
// original source file.
if (chmod(outname, source_to_use->st.st_mode) != 0) {
fprintf(stderr, "chmod of \"%s\" failed: %s\n", outname, strerror(errno));
return 1;
}
if (chown(outname, source_to_use->st.st_uid,
source_to_use->st.st_gid) != 0) {
fprintf(stderr, "chown of \"%s\" failed: %s\n", outname, strerror(errno));
return 1;
}
// Finally, rename the .patch file to replace the original source file.
if (rename(outname, source_filename) != 0) {
fprintf(stderr, "rename of .patch to \"%s\" failed: %s\n",
source_filename, strerror(errno));
return 1;
// Finally, rename the .patch file to replace the target file.
if (rename(outname, target_filename) != 0) {
fprintf(stderr, "rename of .patch to \"%s\" failed: %s\n",
target_filename, strerror(errno));
return 1;
}
}
// If this run of applypatch created the copy, and we're here, we

View File

@@ -17,6 +17,7 @@
#ifndef _APPLYPATCH_H
#define _APPLYPATCH_H
#include <sys/stat.h>
#include "mincrypt/sha.h"
typedef struct _Patch {
@@ -38,12 +39,26 @@ typedef struct _FileContents {
// and use it as the source instead.
#define CACHE_TEMP_SOURCE "/cache/saved.file"
// When writing to an MTD partition, we first put the output in this
// temp file, then copy it to the partition once the patching is
// finished (and the target sha1 verified).
#define MTD_TARGET_TEMP_FILE "/tmp/mtd-temp"
// applypatch.c
size_t FreeSpaceForFile(const char* filename);
int applypatch(int argc, char** argv);
// bsdiff.c
void ShowBSDiffLicense();
int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
const char* patch_filename, ssize_t offset,
FILE* output, SHA_CTX* ctx);
int ApplyBSDiffPatchMem(const unsigned char* old_data, ssize_t old_size,
const char* patch_filename, ssize_t patch_offset,
unsigned char** new_data, ssize_t* new_size);
// imgpatch.c
int ApplyImagePatch(const unsigned char* old_data, ssize_t old_size,
const char* patch_filename,
FILE* output, SHA_CTX* ctx);

View File

@@ -24,16 +24,22 @@ WORK_DIR=/system
# partition that WORK_DIR is located on, without the leading slash
WORK_FS=system
# set to 0 to use a device instead
USE_EMULATOR=1
# ------------------------
tmpdir=$(mktemp -d)
emulator -wipe-data -noaudio -no-window -port $EMULATOR_PORT &
pid_emulator=$!
if [ "$USE_EMULATOR" == 1 ]; then
emulator -wipe-data -noaudio -no-window -port $EMULATOR_PORT &
pid_emulator=$!
ADB="adb -s emulator-$EMULATOR_PORT "
else
ADB="adb -d "
fi
ADB="adb -s emulator-$EMULATOR_PORT "
echo "emulator is $pid_emulator; waiting for startup"
echo "waiting to connect to device"
$ADB wait-for-device
echo "device is available"
$ADB remount
@@ -56,7 +62,8 @@ fail() {
echo
echo FAIL: $testname
echo
kill $pid_emulator
[ "$open_pid" == "" ] || kill $open_pid
[ "$pid_emulator" == "" ] || kill $pid_emulator
exit 1
}
@@ -68,6 +75,23 @@ free_space() {
run_command df | awk "/$1/ {print gensub(/K/, \"\", \"g\", \$6)}"
}
cleanup() {
# not necessary if we're about to kill the emulator, but nice for
# running on real devices or already-running emulators.
testname "removing test files"
run_command rm $WORK_DIR/bloat.dat
run_command rm $WORK_DIR/old.file
run_command rm $WORK_DIR/patch.bsdiff
run_command rm $WORK_DIR/applypatch
run_command rm $CACHE_TEMP_SOURCE
run_command rm /cache/bloat*.dat
[ "$pid_emulator" == "" ] || kill $pid_emulator
rm -rf $tmpdir
}
cleanup
$ADB push $ANDROID_PRODUCT_OUT/system/bin/applypatch $WORK_DIR/applypatch
@@ -146,16 +170,71 @@ if (( free_kb * 1024 < NEW_SIZE * 3 / 2 )); then
fi
testname "apply bsdiff patch"
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
testname "reapply bsdiff patch"
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
# --------------- apply patch in new location ----------------------
$ADB push $DATA_DIR/old.file $WORK_DIR
$ADB push $DATA_DIR/patch.bsdiff $WORK_DIR
# Check that the partition has enough space to apply the patch without
# copying. If it doesn't, we'll be testing the low-space condition
# when we intend to test the not-low-space condition.
testname "apply patch to new location (with enough space)"
free_kb=$(free_space $WORK_FS)
echo "${free_kb}kb free on /$WORK_FS."
if (( free_kb * 1024 < NEW_SIZE * 3 / 2 )); then
echo "Not enough space on /$WORK_FS to patch test file."
echo
echo "This doesn't mean that applypatch is necessarily broken;"
echo "just that /$WORK_FS doesn't have enough free space to"
echo "properly run this test."
exit 1
fi
run_command rm $WORK_DIR/new.file
run_command rm $CACHE_TEMP_SOURCE
testname "apply bsdiff patch to new location"
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $WORK_DIR/new.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/new.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
testname "reapply bsdiff patch to new location"
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $WORK_DIR/new.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/new.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
$ADB push $DATA_DIR/old.file $CACHE_TEMP_SOURCE
# put some junk in the old file
run_command dd if=/dev/urandom of=$WORK_DIR/old.file count=100 bs=1024 || fail
testname "apply bsdiff patch to new location with corrupted source"
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $WORK_DIR/new.file $NEW_SHA1 $NEW_SIZE $OLD_SHA1:$WORK_DIR/patch.bsdiff $BAD1_SHA1:$WORK_DIR/foo || fail
$ADB pull $WORK_DIR/new.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
# put some junk in the cache copy, too
run_command dd if=/dev/urandom of=$CACHE_TEMP_SOURCE count=100 bs=1024 || fail
run_command rm $WORK_DIR/new.file
testname "apply bsdiff patch to new location with corrupted source and copy (no new file)"
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $WORK_DIR/new.file $NEW_SHA1 $NEW_SIZE $OLD_SHA1:$WORK_DIR/patch.bsdiff $BAD1_SHA1:$WORK_DIR/foo && fail
# put some junk in the new file
run_command dd if=/dev/urandom of=$WORK_DIR/new.file count=100 bs=1024 || fail
testname "apply bsdiff patch to new location with corrupted source and copy (bad new file)"
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $WORK_DIR/new.file $NEW_SHA1 $NEW_SIZE $OLD_SHA1:$WORK_DIR/patch.bsdiff $BAD1_SHA1:$WORK_DIR/foo && fail
# --------------- apply patch with low space on /system ----------------------
$ADB push $DATA_DIR/old.file $WORK_DIR
@@ -169,12 +248,12 @@ free_kb=$(free_space $WORK_FS)
echo "${free_kb}kb free on /$WORK_FS now."
testname "apply bsdiff patch with low space"
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
testname "reapply bsdiff patch with low space"
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
@@ -213,7 +292,7 @@ run_command ls /cache/subdir/a.file || fail # wasn't deleted because
run_command ls $CACHE_TEMP_SOURCE || fail # wasn't deleted because it's the source file copy
# should fail; not enough files can be deleted
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff && fail
run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff && fail
run_command ls /cache/bloat_large.dat || fail # wasn't deleted because it was open
run_command ls /cache/subdir/a.file || fail # wasn't deleted because it's in a subdir
run_command ls $CACHE_TEMP_SOURCE || fail # wasn't deleted because it's the source file copy
@@ -229,7 +308,7 @@ run_command ls /cache/subdir/a.file || fail # still wasn't deleted because i
run_command ls $CACHE_TEMP_SOURCE || fail # wasn't deleted because it's the source file copy
# should succeed
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
run_command ls /cache/subdir/a.file || fail # still wasn't deleted because it's in a subdir
@@ -242,7 +321,7 @@ $ADB push $DATA_DIR/old.file $CACHE_TEMP_SOURCE
run_command dd if=/dev/urandom of=$WORK_DIR/old.file count=100 bs=1024 || fail
testname "apply bsdiff patch from cache (corrupted source) with low space"
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
@@ -251,20 +330,14 @@ $ADB push $DATA_DIR/old.file $CACHE_TEMP_SOURCE
run_command rm $WORK_DIR/old.file
testname "apply bsdiff patch from cache (missing source) with low space"
run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
# --------------- cleanup ----------------------
# not necessary if we're about to kill the emulator, but nice for
# running on real devices or already-running emulators.
run_command rm /cache/bloat*.dat $WORK_DIR/bloat.dat $CACHE_TEMP_SOURCE $WORK_DIR/old.file $WORK_DIR/patch.xdelta3 $WORK_DIR/patch.bsdiff $WORK_DIR/applypatch
kill $pid_emulator
rm -rf $tmpdir
cleanup
echo
echo PASS

View File

@@ -29,6 +29,7 @@
#include <bzlib.h>
#include "mincrypt/sha.h"
#include "applypatch.h"
void ShowBSDiffLicense() {
puts("The bsdiff library used herein is:\n"
@@ -80,10 +81,34 @@ static off_t offtin(u_char *buf)
return y;
}
int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
const char* patch_filename,
const char* patch_filename, ssize_t patch_offset,
FILE* output, SHA_CTX* ctx) {
unsigned char* new_data;
ssize_t new_size;
if (ApplyBSDiffPatchMem(old_data, old_size, patch_filename, patch_offset,
&new_data, &new_size) != 0) {
return -1;
}
if (fwrite(new_data, 1, new_size, output) < new_size) {
fprintf(stderr, "short write of output: %d (%s)\n", errno, strerror(errno));
return 1;
}
if (ctx) {
SHA_update(ctx, new_data, new_size);
}
free(new_data);
return 0;
}
int ApplyBSDiffPatchMem(const unsigned char* old_data, ssize_t old_size,
const char* patch_filename, ssize_t patch_offset,
unsigned char** new_data, ssize_t* new_size) {
FILE* f;
if ((f = fopen(patch_filename, "rb")) == NULL) {
fprintf(stderr, "failed to open patch file\n");
@@ -102,6 +127,8 @@ int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
// from oldfile to x bytes from the diff block; copy y bytes from the
// extra block; seek forwards in oldfile by z bytes".
fseek(f, patch_offset, SEEK_SET);
unsigned char header[32];
if (fread(header, 1, 32, f) < 32) {
fprintf(stderr, "failed to read patch file header\n");
@@ -109,17 +136,16 @@ int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
}
if (memcmp(header, "BSDIFF40", 8) != 0) {
fprintf(stderr, "corrupt patch file header (magic number)\n");
fprintf(stderr, "corrupt bsdiff patch file header (magic number)\n");
return 1;
}
ssize_t ctrl_len, data_len;
ssize_t new_size;
ctrl_len = offtin(header+8);
data_len = offtin(header+16);
new_size = offtin(header+24);
*new_size = offtin(header+24);
if (ctrl_len < 0 || data_len < 0 || new_size < 0) {
if (ctrl_len < 0 || data_len < 0 || *new_size < 0) {
fprintf(stderr, "corrupt patch file header (data lengths)\n");
return 1;
}
@@ -135,7 +161,7 @@ int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
fprintf(stderr, "failed to open patch file\n"); \
return 1; \
} \
if (fseeko(f, offset, SEEK_SET)) { \
if (fseeko(f, offset+patch_offset, SEEK_SET)) { \
fprintf(stderr, "failed to seek in patch file\n"); \
return 1; \
} \
@@ -150,9 +176,10 @@ int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
#undef OPEN_AT
unsigned char* new_data = malloc(new_size);
if (new_data == NULL) {
fprintf(stderr, "failed to allocate memory for output file\n");
*new_data = malloc(*new_size);
if (*new_data == NULL) {
fprintf(stderr, "failed to allocate %d bytes of memory for output file\n",
(int)*new_size);
return 1;
}
@@ -161,7 +188,7 @@ int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
off_t len_read;
int i;
unsigned char buf[8];
while (newpos < new_size) {
while (newpos < *new_size) {
// Read control data
for (i = 0; i < 3; ++i) {
len_read = BZ2_bzRead(&bzerr, cpfbz2, buf, 8);
@@ -173,13 +200,13 @@ int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
}
// Sanity check
if (newpos + ctrl[0] > new_size) {
if (newpos + ctrl[0] > *new_size) {
fprintf(stderr, "corrupt patch (new file overrun)\n");
return 1;
}
// Read diff string
len_read = BZ2_bzRead(&bzerr, dpfbz2, new_data + newpos, ctrl[0]);
len_read = BZ2_bzRead(&bzerr, dpfbz2, *new_data + newpos, ctrl[0]);
if (len_read < ctrl[0] || !(bzerr == BZ_OK || bzerr == BZ_STREAM_END)) {
fprintf(stderr, "corrupt patch (read diff)\n");
return 1;
@@ -188,7 +215,7 @@ int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
// Add old data to diff string
for (i = 0; i < ctrl[0]; ++i) {
if ((oldpos+i >= 0) && (oldpos+i < old_size)) {
new_data[newpos+i] += old_data[oldpos+i];
(*new_data)[newpos+i] += old_data[oldpos+i];
}
}
@@ -197,13 +224,13 @@ int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
oldpos += ctrl[0];
// Sanity check
if (newpos + ctrl[1] > new_size) {
if (newpos + ctrl[1] > *new_size) {
fprintf(stderr, "corrupt patch (new file overrun)\n");
return 1;
}
// Read extra string
len_read = BZ2_bzRead(&bzerr, epfbz2, new_data + newpos, ctrl[1]);
len_read = BZ2_bzRead(&bzerr, epfbz2, *new_data + newpos, ctrl[1]);
if (len_read < ctrl[1] || !(bzerr == BZ_OK || bzerr == BZ_STREAM_END)) {
fprintf(stderr, "corrupt patch (read extra)\n");
return 1;
@@ -221,12 +248,5 @@ int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
fclose(dpf);
fclose(epf);
if (fwrite(new_data, 1, new_size, output) < new_size) {
fprintf(stderr, "short write of output: %d (%s)\n", errno, strerror(errno));
return 1;
}
SHA_update(ctx, new_data, new_size);
free(new_data);
return 0;
}

560
tools/applypatch/imgdiff.c Normal file
View File

@@ -0,0 +1,560 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* This program constructs binary patches for images -- such as boot.img
* and recovery.img -- that consist primarily of large chunks of gzipped
* data interspersed with uncompressed data. Doing a naive bsdiff of
* these files is not useful because small changes in the data lead to
* large changes in the compressed bitstream; bsdiff patches of gzipped
* data are typically as large as the data itself.
*
* To patch these usefully, we break the source and target images up into
* chunks of two types: "normal" and "gzip". Normal chunks are simply
* patched using a plain bsdiff. Gzip chunks are first expanded, then a
* bsdiff is applied to the uncompressed data, then the patched data is
* gzipped using the same encoder parameters. Patched chunks are
* concatenated together to create the output file; the output image
* should be *exactly* the same series of bytes as the target image used
* originally to generate the patch.
*
* To work well with this tool, the gzipped sections of the target
* image must have been generated using the same deflate encoder that
* is available in applypatch, namely, the one in the zlib library.
* In practice this means that images should be compressed using the
* "minigzip" tool included in the zlib distribution, not the GNU gzip
* program.
*
* An "imgdiff" patch consists of a header describing the chunk structure
* of the file and any encoding parameters needed for the gzipped
* chunks, followed by N bsdiff patches, one per chunk.
*
* For a diff to be generated, the source and target images must have the
* same "chunk" structure: that is, the same number of gzipped and normal
* chunks in the same order. Android boot and recovery images currently
* consist of five chunks: a small normal header, a gzipped kernel, a
* small normal section, a gzipped ramdisk, and finally a small normal
* footer.
*
* Caveats: we locate gzipped sections within the source and target
* images by searching for the byte sequence 1f8b0800: 1f8b is the gzip
* magic number; 08 specifies the "deflate" encoding [the only encoding
* supported by the gzip standard]; and 00 is the flags byte. We do not
* currently support any extra header fields (which would be indicated by
* a nonzero flags byte). We also don't handle the case when that byte
* sequence appears spuriously in the file. (Note that it would have to
* occur spuriously within a normal chunk to be a problem.)
*
*
* The imgdiff patch header looks like this:
*
* "IMGDIFF1" (8) [magic number and version]
* chunk count (4)
* for each chunk:
* chunk type (4) [CHUNK_NORMAL or CHUNK_GZIP]
* source start (8)
* source len (8)
* bsdiff patch offset (8) [from start of patch file]
* if chunk type == CHUNK_GZIP:
* source expanded len (8) [size of uncompressed source]
* target expected len (8) [size of uncompressed target]
* gzip level (4)
* method (4)
* windowBits (4)
* memLevel (4)
* strategy (4)
* gzip header len (4)
* gzip header (gzip header len)
* gzip footer (8)
*
* All integers are little-endian. "source start" and "source len"
* specify the section of the input image that comprises this chunk,
* including the gzip header and footer for gzip chunks. "source
* expanded len" is the size of the uncompressed source data. "target
* expected len" is the size of the uncompressed data after applying
* the bsdiff patch. The next five parameters specify the zlib
* parameters to be used when compressing the patched data, and the
* next three specify the header and footer to be wrapped around the
* compressed data to create the output chunk (so that header contents
* like the timestamp are recreated exactly).
*
* After the header there are 'chunk count' bsdiff patches; the offset
* of each from the beginning of the file is specified in the header.
*/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include "zlib.h"
#include "imgdiff.h"
typedef struct {
int type; // CHUNK_NORMAL or CHUNK_GZIP
size_t start; // offset of chunk in original image file
size_t len;
unsigned char* data; // data to be patched (ie, uncompressed, for
// gzip chunks)
// everything else is for CHUNK_GZIP chunks only:
size_t gzip_header_len;
unsigned char* gzip_header;
unsigned char* gzip_footer;
// original (compressed) gzip data, including header and footer
size_t gzip_len;
unsigned char* gzip_data;
// deflate encoder parameters
int level, method, windowBits, memLevel, strategy;
} ImageChunk;
/*
* Read the given file and break it up into chunks, putting the number
* of chunks and their info in *num_chunks and **chunks,
* respectively. Returns a malloc'd block of memory containing the
* contents of the file; various pointers in the output chunk array
* will point into this block of memory. The caller should free the
* return value when done with all the chunks. Returns NULL on
* failure.
*/
unsigned char* ReadImage(const char* filename,
int* num_chunks, ImageChunk** chunks) {
struct stat st;
if (stat(filename, &st) != 0) {
fprintf(stderr, "failed to stat \"%s\": %s\n", filename, strerror(errno));
return NULL;
}
unsigned char* img = malloc(st.st_size + 4);
FILE* f = fopen(filename, "rb");
if (fread(img, 1, st.st_size, f) != st.st_size) {
fprintf(stderr, "failed to read \"%s\" %s\n", filename, strerror(errno));
fclose(f);
return NULL;
}
fclose(f);
// append 4 zero bytes to the data so we can always search for the
// four-byte string 1f8b0800 starting at any point in the actual
// file data, without special-casing the end of the data.
memset(img+st.st_size, 0, 4);
size_t pos = 0;
*num_chunks = 0;
*chunks = NULL;
while (pos < st.st_size) {
unsigned char* p = img+pos;
// Reallocate the list for every chunk; we expect the number of
// chunks to be small (5 for typical boot and recovery images).
++*num_chunks;
*chunks = realloc(*chunks, *num_chunks * sizeof(ImageChunk));
ImageChunk* curr = *chunks + (*num_chunks-1);
curr->start = pos;
if (st.st_size - pos >= 4 &&
p[0] == 0x1f && p[1] == 0x8b &&
p[2] == 0x08 && // deflate compression
p[3] == 0x00) { // no header flags
// 'pos' is the offset of the start of a gzip chunk.
curr->type = CHUNK_GZIP;
curr->gzip_header_len = GZIP_HEADER_LEN;
curr->gzip_header = p;
// We must decompress this chunk in order to discover where it
// ends, and so we can put the uncompressed data and its length
// into curr->data and curr->len;
size_t allocated = 32768;
curr->len = 0;
curr->data = malloc(allocated);
curr->gzip_data = p;
z_stream strm;
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = st.st_size - (pos + curr->gzip_header_len);
strm.next_in = p + GZIP_HEADER_LEN;
// -15 means we are decoding a 'raw' deflate stream; zlib will
// not expect zlib headers.
int ret = inflateInit2(&strm, -15);
do {
strm.avail_out = allocated - curr->len;
strm.next_out = curr->data + curr->len;
ret = inflate(&strm, Z_NO_FLUSH);
curr->len = allocated - strm.avail_out;
if (strm.avail_out == 0) {
allocated *= 2;
curr->data = realloc(curr->data, allocated);
}
} while (ret != Z_STREAM_END);
curr->gzip_len = st.st_size - strm.avail_in - pos + GZIP_FOOTER_LEN;
pos = st.st_size - strm.avail_in;
inflateEnd(&strm);
// consume the gzip footer.
curr->gzip_footer = img+pos;
pos += GZIP_FOOTER_LEN;
p = img+pos;
// The footer (that we just skipped over) contains the size of
// the uncompressed data. Double-check to make sure that it
// matches the size of the data we got when we actually did
// the decompression.
size_t footer_size = p[-4] + (p[-3] << 8) + (p[-2] << 16) + (p[-1] << 24);
if (footer_size != curr->len) {
fprintf(stderr, "Error: footer size %d != decompressed size %d\n",
footer_size, curr->len);
free(img);
return NULL;
}
} else {
// 'pos' is not the offset of the start of a gzip chunk, so scan
// forward until we find a gzip header.
curr->type = CHUNK_NORMAL;
curr->data = p;
for (curr->len = 0; curr->len < (st.st_size - pos); ++curr->len) {
if (p[curr->len] == 0x1f &&
p[curr->len+1] == 0x8b &&
p[curr->len+2] == 0x08 &&
p[curr->len+3] == 0x00) {
break;
}
}
pos += curr->len;
}
}
return img;
}
#define BUFFER_SIZE 32768
/*
* Takes the uncompressed data stored in the chunk, compresses it
* using the zlib parameters stored in the chunk, and checks that it
* matches exactly the compressed data we started with (also stored in
* the chunk). Return 0 on success.
*/
int TryReconstruction(ImageChunk* chunk, unsigned char* out) {
size_t p = chunk->gzip_header_len;
z_stream strm;
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = chunk->len;
strm.next_in = chunk->data;
int ret;
ret = deflateInit2(&strm, chunk->level, chunk->method, chunk->windowBits,
chunk->memLevel, chunk->strategy);
do {
strm.avail_out = BUFFER_SIZE;
strm.next_out = out;
ret = deflate(&strm, Z_FINISH);
size_t have = BUFFER_SIZE - strm.avail_out;
if (memcmp(out, chunk->gzip_data+p, have) != 0) {
// mismatch; data isn't the same.
deflateEnd(&strm);
return -1;
}
p += have;
} while (ret != Z_STREAM_END);
deflateEnd(&strm);
if (p + GZIP_FOOTER_LEN != chunk->gzip_len) {
// mismatch; ran out of data before we should have.
return -1;
}
return 0;
}
/*
* Verify that we can reproduce exactly the same compressed data that
* we started with. Sets the level, method, windowBits, memLevel, and
* strategy fields in the chunk to the encoding parameters needed to
* produce the right output. Returns 0 on success.
*/
int ReconstructGzipChunk(ImageChunk* chunk) {
if (chunk->type != CHUNK_GZIP) {
fprintf(stderr, "attempt to reconstruct non-gzip chunk\n");
return -1;
}
size_t p = 0;
unsigned char* out = malloc(BUFFER_SIZE);
// We only check two combinations of encoder parameters: level 6
// (the default) and level 9 (the maximum).
for (chunk->level = 6; chunk->level <= 9; chunk->level += 3) {
chunk->windowBits = -15; // 32kb window; negative to indicate a raw stream.
chunk->memLevel = 8; // the default value.
chunk->method = Z_DEFLATED;
chunk->strategy = Z_DEFAULT_STRATEGY;
if (TryReconstruction(chunk, out) == 0) {
free(out);
return 0;
}
}
free(out);
return -1;
}
/** Write a 4-byte value to f in little-endian order. */
void Write4(int value, FILE* f) {
fputc(value & 0xff, f);
fputc((value >> 8) & 0xff, f);
fputc((value >> 16) & 0xff, f);
fputc((value >> 24) & 0xff, f);
}
/** Write an 8-byte value to f in little-endian order. */
void Write8(long long value, FILE* f) {
fputc(value & 0xff, f);
fputc((value >> 8) & 0xff, f);
fputc((value >> 16) & 0xff, f);
fputc((value >> 24) & 0xff, f);
fputc((value >> 32) & 0xff, f);
fputc((value >> 40) & 0xff, f);
fputc((value >> 48) & 0xff, f);
fputc((value >> 56) & 0xff, f);
}
/*
* Given source and target chunks, compute a bsdiff patch between them
* by running bsdiff in a subprocess. Return the patch data, placing
* its length in *size. Return NULL on failure. We expect the bsdiff
* program to be in the path.
*/
unsigned char* MakePatch(ImageChunk* src, ImageChunk* tgt, size_t* size) {
char stemp[] = "/tmp/imgdiff-src-XXXXXX";
char ttemp[] = "/tmp/imgdiff-tgt-XXXXXX";
char ptemp[] = "/tmp/imgdiff-patch-XXXXXX";
mkstemp(stemp);
mkstemp(ttemp);
mkstemp(ptemp);
FILE* f = fopen(stemp, "wb");
if (f == NULL) {
fprintf(stderr, "failed to open src chunk %s: %s\n",
stemp, strerror(errno));
return NULL;
}
if (fwrite(src->data, 1, src->len, f) != src->len) {
fprintf(stderr, "failed to write src chunk to %s: %s\n",
stemp, strerror(errno));
return NULL;
}
fclose(f);
f = fopen(ttemp, "wb");
if (f == NULL) {
fprintf(stderr, "failed to open tgt chunk %s: %s\n",
ttemp, strerror(errno));
return NULL;
}
if (fwrite(tgt->data, 1, tgt->len, f) != tgt->len) {
fprintf(stderr, "failed to write tgt chunk to %s: %s\n",
ttemp, strerror(errno));
return NULL;
}
fclose(f);
char cmd[200];
sprintf(cmd, "bsdiff %s %s %s", stemp, ttemp, ptemp);
if (system(cmd) != 0) {
fprintf(stderr, "failed to run bsdiff: %s\n", strerror(errno));
return NULL;
}
struct stat st;
if (stat(ptemp, &st) != 0) {
fprintf(stderr, "failed to stat patch file %s: %s\n",
ptemp, strerror(errno));
return NULL;
}
unsigned char* data = malloc(st.st_size);
*size = st.st_size;
f = fopen(ptemp, "rb");
if (f == NULL) {
fprintf(stderr, "failed to open patch %s: %s\n", ptemp, strerror(errno));
return NULL;
}
if (fread(data, 1, st.st_size, f) != st.st_size) {
fprintf(stderr, "failed to read patch %s: %s\n", ptemp, strerror(errno));
return NULL;
}
fclose(f);
unlink(stemp);
unlink(ttemp);
unlink(ptemp);
return data;
}
/*
* Cause a gzip chunk to be treated as a normal chunk (ie, as a blob
* of uninterpreted data). The resulting patch will likely be about
* as big as the target file, but it lets us handle the case of images
* where some gzip chunks are reconstructible but others aren't (by
* treating the ones that aren't as normal chunks).
*/
void ChangeGzipChunkToNormal(ImageChunk* ch) {
ch->type = CHUNK_NORMAL;
free(ch->data);
ch->data = ch->gzip_data;
ch->len = ch->gzip_len;
}
int main(int argc, char** argv) {
if (argc != 4) {
fprintf(stderr, "usage: %s <src-img> <tgt-img> <patch-file>\n", argv[0]);
return 2;
}
int num_src_chunks;
ImageChunk* src_chunks;
if (ReadImage(argv[1], &num_src_chunks, &src_chunks) == NULL) {
fprintf(stderr, "failed to break apart source image\n");
return 1;
}
int num_tgt_chunks;
ImageChunk* tgt_chunks;
if (ReadImage(argv[2], &num_tgt_chunks, &tgt_chunks) == NULL) {
fprintf(stderr, "failed to break apart target image\n");
return 1;
}
// Verify that the source and target images have the same chunk
// structure (ie, the same sequence of gzip and normal chunks).
if (num_src_chunks != num_tgt_chunks) {
fprintf(stderr, "source and target don't have same number of chunks!\n");
return 1;
}
int i;
for (i = 0; i < num_src_chunks; ++i) {
if (src_chunks[i].type != tgt_chunks[i].type) {
fprintf(stderr, "source and target don't have same chunk "
"structure! (chunk %d)\n", i);
return 1;
}
}
// Confirm that given the uncompressed chunk data in the target, we
// can recompress it and get exactly the same bits as are in the
// input target image. If this fails, treat the chunk as a normal
// non-gzipped chunk.
for (i = 0; i < num_tgt_chunks; ++i) {
if (tgt_chunks[i].type == CHUNK_GZIP) {
if (ReconstructGzipChunk(tgt_chunks+i) < 0) {
printf("failed to reconstruct target gzip chunk %d; "
"treating as normal chunk\n", i);
ChangeGzipChunkToNormal(tgt_chunks+i);
ChangeGzipChunkToNormal(src_chunks+i);
} else {
printf("reconstructed target gzip chunk %d\n", i);
}
}
}
// Compute bsdiff patches for each chunk's data (the uncompressed
// data, in the case of gzip chunks).
unsigned char** patch_data = malloc(num_src_chunks * sizeof(unsigned char*));
size_t* patch_size = malloc(num_src_chunks * sizeof(size_t));
for (i = 0; i < num_src_chunks; ++i) {
patch_data[i] = MakePatch(src_chunks+i, tgt_chunks+i, patch_size+i);
printf("patch %d is %d bytes (of %d)\n", i, patch_size[i],
tgt_chunks[i].type == CHUNK_NORMAL ? tgt_chunks[i].len : tgt_chunks[i].gzip_len);
}
// Figure out how big the imgdiff file header is going to be, so
// that we can correctly compute the offset of each bsdiff patch
// within the file.
size_t total_header_size = 12;
for (i = 0; i < num_src_chunks; ++i) {
total_header_size += 4 + 8*3;
if (src_chunks[i].type == CHUNK_GZIP) {
total_header_size += 8*2 + 4*6 + tgt_chunks[i].gzip_header_len + 8;
}
}
size_t offset = total_header_size;
FILE* f = fopen(argv[3], "wb");
// Write out the headers.
fwrite("IMGDIFF1", 1, 8, f);
Write4(num_src_chunks, f);
for (i = 0; i < num_tgt_chunks; ++i) {
Write4(tgt_chunks[i].type, f);
Write8(src_chunks[i].start, f);
Write8(src_chunks[i].type == CHUNK_NORMAL ? src_chunks[i].len :
(src_chunks[i].gzip_len + src_chunks[i].gzip_header_len + 8), f);
Write8(offset, f);
if (tgt_chunks[i].type == CHUNK_GZIP) {
Write8(src_chunks[i].len, f);
Write8(tgt_chunks[i].len, f);
Write4(tgt_chunks[i].level, f);
Write4(tgt_chunks[i].method, f);
Write4(tgt_chunks[i].windowBits, f);
Write4(tgt_chunks[i].memLevel, f);
Write4(tgt_chunks[i].strategy, f);
Write4(tgt_chunks[i].gzip_header_len, f);
fwrite(tgt_chunks[i].gzip_header, 1, tgt_chunks[i].gzip_header_len, f);
fwrite(tgt_chunks[i].gzip_footer, 1, GZIP_FOOTER_LEN, f);
}
offset += patch_size[i];
}
// Append each chunk's bsdiff patch, in order.
for (i = 0; i < num_tgt_chunks; ++i) {
fwrite(patch_data[i], 1, patch_size[i], f);
}
fclose(f);
return 0;
}

View File

@@ -0,0 +1,28 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Image patch chunk types
#define CHUNK_NORMAL 0
#define CHUNK_GZIP 1
// The gzip header size is actually variable, but we currently don't
// support gzipped data with any of the optional fields, so for now it
// will always be ten bytes. See RFC 1952 for the definition of the
// gzip format.
#define GZIP_HEADER_LEN 10
// The gzip footer size really is fixed.
#define GZIP_FOOTER_LEN 8

228
tools/applypatch/imgpatch.c Normal file
View File

@@ -0,0 +1,228 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// See imgdiff.c in this directory for a description of the patch file
// format.
#include <stdio.h>
#include <sys/stat.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include "zlib.h"
#include "mincrypt/sha.h"
#include "applypatch.h"
#include "imgdiff.h"
int Read4(unsigned char* p) {
return (int)(((unsigned int)p[3] << 24) |
((unsigned int)p[2] << 16) |
((unsigned int)p[1] << 8) |
(unsigned int)p[0]);
}
long long Read8(unsigned char* p) {
return (long long)(((unsigned long long)p[7] << 56) |
((unsigned long long)p[6] << 48) |
((unsigned long long)p[5] << 40) |
((unsigned long long)p[4] << 32) |
((unsigned long long)p[3] << 24) |
((unsigned long long)p[2] << 16) |
((unsigned long long)p[1] << 8) |
(unsigned long long)p[0]);
}
/*
* Apply the patch given in 'patch_filename' to the source data given
* by (old_data, old_size). Write the patched output to the 'output'
* file, and update the SHA context with the output data as well.
* Return 0 on success.
*/
int ApplyImagePatch(const unsigned char* old_data, ssize_t old_size,
const char* patch_filename,
FILE* output, SHA_CTX* ctx) {
FILE* f;
if ((f = fopen(patch_filename, "rb")) == NULL) {
fprintf(stderr, "failed to open patch file\n");
return -1;
}
unsigned char header[12];
if (fread(header, 1, 12, f) != 12) {
fprintf(stderr, "failed to read patch file header\n");
return -1;
}
if (memcmp(header, "IMGDIFF1", 8) != 0) {
fprintf(stderr, "corrupt patch file header (magic number)\n");
return -1;
}
int num_chunks = Read4(header+8);
int i;
for (i = 0; i < num_chunks; ++i) {
// each chunk's header record starts with 28 bytes (4 + 8*3).
unsigned char chunk[28];
if (fread(chunk, 1, 28, f) != 28) {
fprintf(stderr, "failed to read chunk %d record\n", i);
return -1;
}
int type = Read4(chunk);
size_t src_start = Read8(chunk+4);
size_t src_len = Read8(chunk+12);
size_t patch_offset = Read8(chunk+20);
if (type == CHUNK_NORMAL) {
fprintf(stderr, "CHUNK %d: normal patch offset %d\n", i, patch_offset);
ApplyBSDiffPatch(old_data + src_start, src_len,
patch_filename, patch_offset,
output, ctx);
} else if (type == CHUNK_GZIP) {
fprintf(stderr, "CHUNK %d: gzip patch offset %d\n", i, patch_offset);
// gzip chunks have an additional 40 + gzip_header_len + 8 bytes
// in their chunk header.
unsigned char* gzip = malloc(40);
if (fread(gzip, 1, 40, f) != 40) {
fprintf(stderr, "failed to read chunk %d initial gzip data\n", i);
return -1;
}
size_t gzip_header_len = Read4(gzip+36);
gzip = realloc(gzip, 40 + gzip_header_len + 8);
if (fread(gzip+40, 1, gzip_header_len+8, f) != gzip_header_len+8) {
fprintf(stderr, "failed to read chunk %d remaining gzip data\n", i);
return -1;
}
size_t expanded_len = Read8(gzip);
size_t target_len = Read8(gzip);
int gz_level = Read4(gzip+16);
int gz_method = Read4(gzip+20);
int gz_windowBits = Read4(gzip+24);
int gz_memLevel = Read4(gzip+28);
int gz_strategy = Read4(gzip+32);
// Decompress the source data; the chunk header tells us exactly
// how big we expect it to be when decompressed.
unsigned char* expanded_source = malloc(expanded_len);
if (expanded_source == NULL) {
fprintf(stderr, "failed to allocate %d bytes for expanded_source\n",
expanded_len);
return -1;
}
z_stream strm;
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = src_len - (gzip_header_len + 8);
strm.next_in = (unsigned char*)(old_data + src_start + gzip_header_len);
strm.avail_out = expanded_len;
strm.next_out = expanded_source;
int ret;
ret = inflateInit2(&strm, -15);
if (ret != Z_OK) {
fprintf(stderr, "failed to init source inflation: %d\n", ret);
return -1;
}
// Because we've provided enough room to accommodate the output
// data, we expect one call to inflate() to suffice.
ret = inflate(&strm, Z_SYNC_FLUSH);
if (ret != Z_STREAM_END) {
fprintf(stderr, "source inflation returned %d\n", ret);
return -1;
}
// We should have filled the output buffer exactly.
if (strm.avail_out != 0) {
fprintf(stderr, "source inflation short by %d bytes\n", strm.avail_out);
return -1;
}
inflateEnd(&strm);
// Next, apply the bsdiff patch (in memory) to the uncompressed
// data.
unsigned char* uncompressed_target_data;
ssize_t uncompressed_target_size;
if (ApplyBSDiffPatchMem(expanded_source, expanded_len,
patch_filename, patch_offset,
&uncompressed_target_data,
&uncompressed_target_size) != 0) {
return -1;
}
// Now compress the target data and append it to the output.
// start with the gzip header.
fwrite(gzip+40, 1, gzip_header_len, output);
SHA_update(ctx, gzip+40, gzip_header_len);
// we're done with the expanded_source data buffer, so we'll
// reuse that memory to receive the output of deflate.
unsigned char* temp_data = expanded_source;
ssize_t temp_size = expanded_len;
if (temp_size < 32768) {
// ... unless the buffer is too small, in which case we'll
// allocate a fresh one.
free(temp_data);
temp_data = malloc(32768);
temp_size = 32768;
}
// now the deflate stream
strm.zalloc = Z_NULL;
strm.zfree = Z_NULL;
strm.opaque = Z_NULL;
strm.avail_in = uncompressed_target_size;
strm.next_in = uncompressed_target_data;
ret = deflateInit2(&strm, gz_level, gz_method, gz_windowBits,
gz_memLevel, gz_strategy);
do {
strm.avail_out = temp_size;
strm.next_out = temp_data;
ret = deflate(&strm, Z_FINISH);
size_t have = temp_size - strm.avail_out;
if (fwrite(temp_data, 1, have, output) != have) {
fprintf(stderr, "failed to write %d compressed bytes to output\n",
have);
return -1;
}
SHA_update(ctx, temp_data, have);
} while (ret != Z_STREAM_END);
deflateEnd(&strm);
// lastly, the gzip footer.
fwrite(gzip+40+gzip_header_len, 1, 8, output);
SHA_update(ctx, gzip+40+gzip_header_len, 8);
free(temp_data);
free(uncompressed_target_data);
free(gzip);
} else {
fprintf(stderr, "patch chunk %d is unknown type %d\n", i, type);
return -1;
}
}
return 0;
}

60
tools/applypatch/main.c Normal file
View File

@@ -0,0 +1,60 @@
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
extern int applypatch(int argc, char** argv);
// This program applies binary patches to files in a way that is safe
// (the original file is not touched until we have the desired
// replacement for it) and idempotent (it's okay to run this program
// multiple times).
//
// - if the sha1 hash of <tgt-file> is <tgt-sha1>, does nothing and exits
// successfully.
//
// - otherwise, if the sha1 hash of <src-file> is <src-sha1>, applies the
// bsdiff <patch> to <src-file> to produce a new file (the type of patch
// is automatically detected from the file header). If that new
// file has sha1 hash <tgt-sha1>, moves it to replace <tgt-file>, and
// exits successfully. Note that if <src-file> and <tgt-file> are
// not the same, <src-file> is NOT deleted on success. <tgt-file>
// may be the string "-" to mean "the same as src-file".
//
// - otherwise, or if any error is encountered, exits with non-zero
// status.
//
// <src-file> (or <file> in check mode) may refer to an MTD partition
// to read the source data. See the comments for the
// LoadMTDContents() function above for the format of such a filename.
int main(int argc, char** argv) {
int result = applypatch(argc, argv);
if (result == 2) {
fprintf(stderr,
"usage: %s <src-file> <tgt-file> <tgt-sha1> <tgt-size> "
"[<src-sha1>:<patch> ...]\n"
" or %s -c <file> [<sha1> ...]\n"
" or %s -s <bytes>\n"
" or %s -l\n"
"\n"
"Filenames may be of the form\n"
" MTD:<partition>:<len_1>:<sha1_1>:<len_2>:<sha1_2>:...\n"
"to specify reading from or writing to an MTD partition.\n\n",
argv[0], argv[0], argv[0], argv[0]);
}
return result;
}

View File

@@ -7,6 +7,7 @@ echo "ro.build.id=$BUILD_ID"
echo "ro.build.display.id=$BUILD_DISPLAY_ID"
echo "ro.build.version.incremental=$BUILD_NUMBER"
echo "ro.build.version.sdk=$PLATFORM_SDK_VERSION"
echo "ro.build.version.codename=$PLATFORM_VERSION_CODENAME"
echo "ro.build.version.release=$PLATFORM_VERSION"
echo "ro.build.date=`date`"
echo "ro.build.date.utc=`date +%s`"
@@ -19,6 +20,7 @@ echo "ro.product.brand=$PRODUCT_BRAND"
echo "ro.product.name=$PRODUCT_NAME"
echo "ro.product.device=$TARGET_DEVICE"
echo "ro.product.board=$TARGET_BOOTLOADER_BOARD_NAME"
echo "ro.product.cpu.abi=$TARGET_CPU_ABI"
echo "ro.product.manufacturer=$PRODUCT_MANUFACTURER"
echo "ro.product.locale.language=$PRODUCT_DEFAULT_LANGUAGE"
echo "ro.product.locale.region=$PRODUCT_DEFAULT_REGION"

View File

@@ -24,6 +24,6 @@ LOCAL_C_INCLUDES := external/zlib
LOCAL_SHARED_LIBRARIES := libz
LOCAL_MODULE := afar
LOCAL_MODULE_TAGS := tests
LOCAL_MODULE_TAGS := optional
include $(BUILD_EXECUTABLE)

View File

@@ -31,6 +31,6 @@ LOCAL_SHARED_LIBRARIES := \
LOCAL_MODULE := dexopt-wrapper
LOCAL_MODULE_TAGS := tests
LOCAL_MODULE_TAGS := optional
include $(BUILD_EXECUTABLE)

View File

@@ -47,6 +47,7 @@ public class Comment
"@deprecated",
"@undeprecate",
"@docRoot",
"@sdkCurrent",
"@inheritDoc",
"@more",
"@code",

View File

@@ -115,23 +115,30 @@ public class DocFile
TagInfo.makeHDF(hdf, "root.descr", tags);
hdf.setValue("commentText", commentText);
if (outfile.indexOf("sdk/") != -1) {
hdf.setValue("sdk", "true");
if (outfile.indexOf("index.html") != -1) {
ClearPage.write(hdf, "sdkpage.cs", outfile);
} else {
ClearPage.write(hdf, "docpage.cs", outfile);
}
} else if (outfile.indexOf("guide/") != -1){
hdf.setValue("guide", "true");
ClearPage.write(hdf, "docpage.cs", outfile);
} else if (outfile.indexOf("publish/") != -1){
hdf.setValue("publish", "true");
// write the page using the appropriate root template, based on the
// whichdoc value supplied by build
String fromWhichmodule = hdf.getValue("android.whichmodule", "");
if (fromWhichmodule.equals("online-pdk")) {
//leaving this in just for temporary compatibility with pdk doc
hdf.setValue("online-pdk", "true");
// add any conditional login for root template here (such as
// for custom left nav based on tab etc.
ClearPage.write(hdf, "docpage.cs", outfile);
} else {
ClearPage.write(hdf, "nosidenavpage.cs", outfile);
if (outfile.indexOf("sdk/") != -1) {
hdf.setValue("sdk", "true");
if (outfile.indexOf("index.html") != -1) {
ClearPage.write(hdf, "sdkpage.cs", outfile);
} else {
ClearPage.write(hdf, "docpage.cs", outfile);
}
} else if (outfile.indexOf("guide/") != -1) {
hdf.setValue("guide", "true");
ClearPage.write(hdf, "docpage.cs", outfile);
} else {
ClearPage.write(hdf, "nosidenavpage.cs", outfile);
}
}
}
} //writePage
}

View File

@@ -5,8 +5,8 @@ left nav (toc) that gets placed on all pages. ?>
def:custom_masthead() ?>
<div id="header">
<div id="headerLeft">
<a href="<?cs var:toroot ?>index.html" tabindex="-1"><img
src="<?cs var:toroot ?>assets/images/bg_logo.png" alt="Android Porting Development Kit" /></a>
<a href="<?cs var:toroot ?>guide/index.html" tabindex="-1"><img
src="<?cs var:toroot ?>assets/images/open_source.png" alt="Open Source Project: Platform Development Kit" /></a>
<ul class="<?cs
if:reference ?> <?cs
elif:guide ?> <?cs
@@ -15,17 +15,10 @@ def:custom_masthead() ?>
elif:community ?> <?cs
elif:publish ?> <?cs
elif:about ?> <?cs /if ?>">
<li id="home-link"><a href="<?cs var:toroot ?><?cs
if:android.whichdoc != "online-pdk" ?>offline.html<?cs
else ?>index.html<?cs /if ?>">
<span>Home</span></a></li>
<!--<li id="sdk-link"><a href="<?cs var:toroot ?>index.html"><span>SDK</span></a></li>-->
<li id="guide-link"><a href="<?cs var:toroot ?>guide/index.html"
onClick="return loadLast('guide')"><span>Porting Guide</span></a></li>
<!--<li id="reference-link"><a href="<?cs var:toroot ?>reference/packages.html"
onClick="return loadLast('reference')"><span>Reference</span></a></li>
<li><a href="http://android-developers.blogspot.com"><span>Blog</span></a></li>
<li id="community-link"><a href="<?cs var:toroot ?>community/index.html"><span>Community</span></a></li>-->
<!--<li id="guide-link"><a href="<?cs var:toroot ?>guide/index.html"
onClick="return loadLast('guide)'"><span>Dev Guide</span></a></li>
<li id="opensource-link"><a href="http://source.android.com/"
onClick="return loadLast('open')"><span>Open Source</span></a></li>-->
</ul>
</div>
<div id="headerRight">
@@ -35,8 +28,7 @@ def:custom_masthead() ?>
<!-- &nbsp;<a href="#">English</a> | -->
<a href="http://www.android.com">Android.com</a>
</span>
</div><?cs
call:default_search_box() ?>
</div>
</div><!-- headerRight -->
</div><!-- header --><?cs
/def ?><?cs # custom_masthead ?>
@@ -47,7 +39,7 @@ def:guide_nav() ?>
<div class="g-section g-tpl-240" id="body-content">
<div class="g-unit g-first side-nav-resizable" id="side-nav">
<div id="devdoc-nav"><?cs
include:"../../../../development/pdk/docs/html/guide/guide_toc.cs" ?>
include:"../../../../development/pdk/docs/guide/pdk_toc.cs" ?>
</div>
</div> <!-- end side-nav -->
<script>
@@ -59,11 +51,7 @@ def:guide_nav() ?>
<?cs
def:custom_left_nav() ?><?cs
if:guide ?><?cs
call:guide_nav() ?><?cs
else ?><?cs
call:default_left_nav() ?><?cs
/if ?><?cs
/def ?>
<?cs # appears at the bottom of every page ?><?cs

View File

@@ -7,7 +7,7 @@
if:sdk.version ?> (<?cs
var:sdk.version ?>)<?cs
/if ?> | <?cs
/if ?>Android Developers</title>
/if ?>Android Open Source</title>
<link href="<?cs var:toroot ?>assets/android-developer-docs-devguide.css" rel="stylesheet" type="text/css" />
<link href="<?cs var:toroot ?>assets-pdk/pdk-local.css" rel="stylesheet" type="text/css" />
<script src="<?cs var:toroot ?>assets/search_autocomplete.js" type="text/javascript"></script>

View File

@@ -1,45 +1,74 @@
<?cs # This default template file is meant to be replaced. ?>
<?cs # Use the -tempatedir arg to javadoc to set your own directory with a replacement for this file in it. ?>
<?cs # The default search box that goes in the header ?><?cs
def:default_search_box() ?>
<div id="search" >
<div id="searchForm">
<form accept-charset="utf-8" class="gsc-search-box"
onsubmit="return submit_search()">
<table class="gsc-search-box" cellpadding="0" cellspacing="0"><tbody>
<tr>
<td class="gsc-input">
<input id="search_autocomplete" class="gsc-input" type="text" size="33" autocomplete="off"
title="search developer docs" name="q"
value="search developer docs"
onFocus="search_focus_changed(this, true)"
onBlur="search_focus_changed(this, false)"
onkeydown="return search_changed(event, true, '<?cs var:toroot?>')"
onkeyup="return search_changed(event, false, '<?cs var:toroot?>')" />
<div id="search_filtered_div" class="no-display">
<table id="search_filtered" cellspacing=0>
</table>
</div>
</td>
<td class="gsc-search-button">
<input type="submit" value="Search" title="search" id="search-button" class="gsc-search-button" />
</td>
<td class="gsc-clear-button">
<div title="clear results" class="gsc-clear-button">&nbsp;</div>
</td>
</tr></tbody>
</table>
</form>
</div><!-- searchForm -->
</div><!-- search --><?cs
/def ?>
<?cs
def:custom_masthead() ?>
<div id="header">
<div id="headerLeft">
<a href="<?cs var:toroot ?>index.html" tabindex="-1"><img
src="<?cs var:toroot ?>assets/images/bg_logo.png" alt="Android Developers" /></a>
<ul class="<?cs
if:reference ?>reference<?cs
elif:guide ?>guide<?cs
elif:sdk ?>sdk<?cs
elif:home ?>home<?cs
elif:community ?>community<?cs
elif:publish ?>publish<?cs
elif:about ?>about<?cs /if ?>">
<li id="home-link"><a href="<?cs var:toroot ?><?cs
if:android.whichdoc != "online" ?>offline.html<?cs
else ?>index.html<?cs /if ?>">
<span>Home</span></a></li>
<li id="sdk-link"><a href="<?cs var:toroot ?>sdk/1.1_r1/index.html"><span>SDK</span></a></li>
<li id="guide-link"><a href="<?cs var:toroot ?>guide/index.html"
onClick="return loadLast('guide')"><span>Dev Guide</span></a></li>
<li id="reference-link"><a href="<?cs var:toroot ?>reference/packages.html"
onClick="return loadLast('reference')"><span>Reference</span></a></li>
<li><a href="http://android-developers.blogspot.com"><span>Blog</span></a></li>
<li id="community-link"><a href="<?cs var:toroot ?>community/index.html"><span>Community</span></a></li>
</ul>
<?cs include:"header_tabs.cs" ?> <?cs # The links are extracted so we can better manage localization ?>
</div>
<div id="headerRight">
<div id="headerLinks">
<!-- <img src="<?cs var:toroot ?>assets/images/icon_world.jpg" alt="" /> -->
<span class="text">
<!-- &nbsp;<a href="#">English</a> | -->
<a href="http://www.android.com">Android.com</a>
</span>
<!-- <img src="<?cs var:toroot ?>assets/images/icon_world.jpg" alt="" /> -->
<span id="language">
<select name="language" onChange="changeLangPref(this.value)">
<option value="en">English</option>
<!-- <option value="ja"></option> -->
</select>
<script type="text/javascript">
<!--
loadLangPref();
//-->
</script>
</span>
<a href="http://www.android.com">Android.com</a>
</div><?cs
call:default_search_box() ?>
</div><!-- headerRight -->
<script type="text/javascript">
<!--
changeTabLang(getLangPref());
//-->
</script>
</div><!-- header --><?cs
/def ?><?cs # custom_masthead ?>
/def ?>
<?cs
def:sdk_nav() ?>
@@ -66,22 +95,80 @@ def:guide_nav() ?>
</script>
<?cs /def ?>
<?cs
def:publish_nav() ?>
<div class="g-section g-tpl-180" id="body-content">
<div class="g-unit g-first" id="side-nav">
<div id="devdoc-nav"><?cs
include:"../../../../frameworks/base/docs/html/publish/publish_toc.cs" ?>
</div>
<?cs # The default side navigation for the reference docs ?><?cs
def:default_left_nav() ?>
<div class="g-section g-tpl-240" id="body-content">
<div class="g-unit g-first side-nav-resizable" id="side-nav">
<div id="swapper">
<div id="nav-panels">
<div id="resize-packages-nav">
<div id="packages-nav">
<div id="index-links"><nobr>
<a href="<?cs var:toroot ?>reference/packages.html" <?cs if:(page.title == "Package Index") ?>class="selected"<?cs /if ?> >Package Index</a> |
<a href="<?cs var:toroot ?>reference/classes.html" <?cs if:(page.title == "Class Index") ?>class="selected"<?cs /if ?>>Class Index</a></nobr>
</div>
<ul><?cs
each:pkg=docs.packages ?>
<li <?cs if:(class.package.name == pkg.name) || (package.name == pkg.name)?>class="selected"<?cs /if ?>><?cs call:package_link(pkg) ?></li><?cs
/each ?>
</ul><br/>
</div> <!-- end packages -->
</div> <!-- end resize-packages -->
<div id="classes-nav"><?cs
if:subcount(class.package) ?>
<ul>
<?cs call:list("Interfaces", class.package.interfaces) ?>
<?cs call:list("Classes", class.package.classes) ?>
<?cs call:list("Enums", class.package.enums) ?>
<?cs call:list("Exceptions", class.package.exceptions) ?>
<?cs call:list("Errors", class.package.errors) ?>
</ul><?cs
elif:subcount(package) ?>
<ul>
<?cs call:class_link_list("Interfaces", package.interfaces) ?>
<?cs call:class_link_list("Classes", package.classes) ?>
<?cs call:class_link_list("Enums", package.enums) ?>
<?cs call:class_link_list("Exceptions", package.exceptions) ?>
<?cs call:class_link_list("Errors", package.errors) ?>
</ul><?cs
else ?>
<script>
/*addLoadEvent(maxPackageHeight);*/
</script>
<p style="padding:10px">Select a package to view its members</p><?cs
/if ?><br/>
</div><!-- end classes -->
</div><!-- end nav-panels -->
<div id="nav-tree" style="display:none">
<div id="index-links"><nobr>
<a href="<?cs var:toroot ?>reference/packages.html" <?cs if:(page.title == "Package Index") ?>class="selected"<?cs /if ?> >Package Index</a> |
<a href="<?cs var:toroot ?>reference/classes.html" <?cs if:(page.title == "Class Index") ?>class="selected"<?cs /if ?>>Class Index</a></nobr>
</div>
</div><!-- end nav-tree -->
</div><!-- end swapper -->
</div> <!-- end side-nav -->
<?cs /def ?>
<script>
if (!isMobile) {
$("<a href='#' id='nav-swap' onclick='swapNav();return false;' style='font-size:10px;line-height:9px;margin-left:1em;text-decoration:none;'><span id='tree-link'>Use Tree Navigation</span><span id='panel-link' style='display:none'>Use Panel Navigation</span></a>").appendTo("#side-nav");
chooseDefaultNav();
if ($("#nav-tree").is(':visible')) init_navtree("nav-tree", "<?cs var:toroot ?>", NAVTREE_DATA);
else {
addLoadEvent(function() {
scrollIntoView("packages-nav");
scrollIntoView("classes-nav");
});
}
$("#swapper").css({borderBottom:"2px solid #aaa"});
} else {
swapNav(); // tree view should be used on mobile
}
</script><?cs
/def ?>
<?cs
def:custom_left_nav() ?><?cs
if:guide ?><?cs
call:guide_nav() ?><?cs
elif:publish ?><?cs
call:publish_nav() ?><?cs
elif:sdk ?><?cs
call:sdk_nav() ?><?cs
else ?><?cs
@@ -115,7 +202,7 @@ def:custom_footerlinks() ?>
</p><?cs
/def ?>
<?cs # appears on the right side of the blue bar at the bottom of every page ?><?cs
<?cs # appears on the right side of the blue bar at the bottom off every page ?><?cs
def:custom_buildinfo() ?>
Android 1.1 r1 - <?cs var:page.now ?><?cs
/def ?>
Android <?cs var:sdk.version ?>&nbsp;r<?cs var:sdk.rel.id ?> - <?cs var:page.now ?>
<?cs /def ?>

View File

@@ -0,0 +1,35 @@
<ul id="header-tabs" class="<?cs
if:reference ?>reference<?cs
elif:guide ?>guide<?cs
elif:sdk ?>sdk<?cs
elif:home ?>home<?cs
elif:community ?>community<?cs
elif:publish ?>publish<?cs
elif:about ?>about<?cs /if ?>">
<li id="home-link"><a href="<?cs var:toroot ?><?cs if:android.whichdoc != "online" ?>offline.html<?cs else ?>index.html<?cs /if ?>">
<span class="en">Home</span>
<span class="ja"></span>
</a></li>
<li id="sdk-link"><a href="<?cs var:toroot ?>sdk/<?cs var:sdk.current ?>/index.html">
<span class="en">SDK</span>
<span class="ja"></span>
</a></li>
<li id="guide-link"><a href="<?cs var:toroot ?>guide/index.html" onClick="return loadLast('guide')">
<span class="en">Dev Guide</span>
<span class="ja"></span>
</a></li>
<li id="reference-link"><a href="<?cs var:toroot ?>reference/packages.html" onClick="return loadLast('reference')">
<span class="en">Reference</span>
<span class="ja"></span>
</a></li>
<li><a href="http://android-developers.blogspot.com">
<span class="en">Blog</span>
<span class="ja"></span>
</a></li>
<li id="community-link"><a href="<?cs var:toroot ?>community/index.html">
<span class="en">Community</span>
<span class="ja"></span>
</a></li>
</ul>

View File

@@ -4,7 +4,7 @@
<?cs if:sdk.redirect ?>
<head>
<title>Redirecting...</title>
<meta http-equiv="refresh" content="0;url=<?cs var:toroot ?>sdk/<?cs var:sdk.redirect ?>/index.html">
<meta http-equiv="refresh" content="0;url=<?cs var:toroot ?>sdk/<?cs var:sdk.current ?>/index.html">
<link href="<?cs var:toroot ?>assets/android-developer-docs.css" rel="stylesheet" type="text/css" />
</head>
<?cs else ?>
@@ -16,32 +16,33 @@
<?cs call:sdk_nav() ?>
<div class="g-unit" id="doc-content" >
<?cs if:sdk.redirect ?>
Redirecting to
<a href="<?cs var:toroot ?>sdk/<?cs var:sdk.redirect ?>/index.html">
<?cs var:toroot ?>sdk/<?cs var:sdk.redirect ?>/index.html
</a>...
<div class="g-unit">
<div id="jd-content">
<p>Redirecting to
<a href="/sdk/<?cs var:sdk.current ?>/index.html">
/sdk/<?cs var:sdk.current ?>/index.html
</a></p>
<?cs else ?>
<div class="g-unit" id="doc-content" >
<div id="jd-header" class="guide-header" >
<span class="crumb">&nbsp;</span>
<h1><?cs if:android.whichdoc == "online" ?>Download <?cs /if ?><?cs var:page.title ?></h1>
</div>
<div id="jd-content">
<p><em>
<?cs var:sdk.date ?>
</em></p>
<div id="jd-content">
<p><em><?cs
if:ndk ?><?cs
var:ndk.date ?><?cs
else ?><?cs
var:sdk.date ?><?cs
/if ?></em>
</p>
<?cs if:sdk.not_latest_version ?>
<div class="special">
<p><strong>This is NOT the current Android SDK release.</strong></p>
<p>Use the links under <strong>Current SDK Release</strong>, on the left, to be directed to the current SDK.</p>
<p><a href="/sdk/<?cs var:sdk.current ?>/index.html">Download the current Android SDK</a></p>
</div>
<?cs /if ?>
@@ -51,8 +52,63 @@
<p>The sections below provide an overview of the SDK package. </p>
<?cs else ?>
<?cs if:ndk ?>
<p>Before downloading, please read the <a href="<?cs var:toroot ?>sdk/<?cs var:sdk.version ?>/requirements.html">
<p>The Android NDK is a companion tool to the Android SDK that lets Android
application developers build performance-critical portions of their apps in
native code. It is designed for use <em>only</em> in conjunction with the
Android SDK, so if you have not already installed the Android 1.5 SDK, please do
so before downloading the NDK. Also, please read <a href="">What is the Android
NDK?</a> to get an understanding of what the NDK offers and whether it will be
useful to you.</p>
<p>Select the download package that is appropriate for your development
computer. Note that separate download packages are provided for 32- and 64-bit
Linux platforms.</p>
<table class="download">
<tr>
<th>Platform</th>
<th>Package</th>
<th>Size</th>
<th>MD5 Checksum</th>
</tr>
<tr>
<td>Windows</td>
<td>
<a href="http://dl.google.com/android/<?cs var:ndk.win_download ?>"><?cs var:ndk.win_download ?></a>
</td>
<td><?cs var:ndk.win_bytes ?> bytes</td>
<td><?cs var:ndk.win_checksum ?></td>
</tr>
<tr class="alt-color">
<td>Mac OS X (intel)</td>
<td>
<a href="http://dl.google.com/android/<?cs var:ndk.mac_download ?>"><?cs var:ndk.mac_download ?></a>
</td>
<td><?cs var:ndk.mac_bytes ?> bytes</td>
<td><?cs var:ndk.mac_checksum ?></td>
</tr>
<tr>
<td>Linux 32-bit (i386)</td>
<td>
<a href="http://dl.google.com/android/<?cs var:ndk.linux_download ?>"><?cs var:ndk.linux_download ?></a>
</td>
<td><?cs var:ndk.linux_bytes ?> bytes</td>
<td><?cs var:ndk.linux_checksum ?></td>
</tr>
<tr class="alt-color">
<td>Linux 64-bit (x86_64)</td>
<td>
<a href="http://dl.google.com/android/<?cs var:ndk.linux_64_download ?>"><?cs var:ndk.linux_64_download ?></a>
</td>
<td><?cs var:ndk.linux_64_bytes ?> bytes</td>
<td><?cs var:ndk.linux_64_checksum ?></td>
</tr>
</table>
<?cs else ?>
<p>Before downloading, please read the <a href="requirements.html">
System Requirements</a> document. As you start the download, you will also need to review and agree to
the Terms and Conditions that govern the use of the Android SDK. </p>
@@ -89,6 +145,7 @@ the Terms and Conditions that govern the use of the Android SDK. </p>
</tr>
</table>
<?cs /if ?>
<?cs /if ?>
<?cs call:tag_list(root.descr) ?>
@@ -96,7 +153,10 @@ the Terms and Conditions that govern the use of the Android SDK. </p>
<?cs /if ?>
</div><!-- end jd-content -->
<?cs include:"footer.cs" ?>
<?cs if:!sdk.redirect ?>
<?cs include:"footer.cs" ?>
<?cs /if ?>
</div><!-- end doc-content -->
<?cs include:"trailer.cs" ?>

View File

@@ -110,11 +110,18 @@ dd {
padding:0 0 0 2em;
}
li p, dd p {
li p {
margin:.5em 0 0;
}
dd p {
margin:1em 0 0;
}
li pre, li table, li img,
li pre, li table, li img {
margin:.5em 0 0 1em;
}
dd pre, dd table, dd img {
margin:1em 0 0 1em;
}
@@ -169,7 +176,6 @@ hr.blue {
margin:0;
position:relative;
width:100%;
background: url('images/preliminary.png');
}
#header {
@@ -262,20 +268,16 @@ hr.blue {
#headerLinks {
margin:10px 10px 0 0;
height:13px;
}
#headerLinks .text {
text-decoration: none;
color: #7FA9B5;
font-size: 11px;
vertical-align: top;
}
#headerLinks a {
text-decoration: underline;
color: #7FA9B5;
font-size: 11px;
vertical-align: top;
}
#language {
margin:0 10px;
}
#search {
@@ -717,14 +719,16 @@ td.gsc-search-button {
float: left;
width: 584px;
height: 580px;
background:url(images/home/bg_home_middle.png) no-repeat 0 0;
position:relative;
}
#topAnnouncement {
background:url(images/home/bg_home_announcement.png) no-repeat 0 0;
}
#homeTitle {
margin:15px 15px 0;
height:30px;
background:url(images/hr_gray_side.jpg) no-repeat 0 29px;
padding:15px 15px 0;
height:30px;
}
#homeTitle h2 {
@@ -732,8 +736,14 @@ td.gsc-search-button {
}
#announcement-block {
margin:15px 15px 0;
height:125px;
padding:0 15px 0;
overflow:hidden;
background: url(images/hr_gray_side.jpg) no-repeat 15px 0;
zoom:1;
}
#announcement-block>* {
padding:15px 0 0;
}
#announcement-block img {
@@ -746,6 +756,29 @@ td.gsc-search-button {
margin:0;
}
#carousel {
background:url(images/home/bg_home_carousel.png) no-repeat 0 0;
position:relative;
height:400px;
}
#carouselMain {
padding: 25px 21px 0;
height:185px;
background-position:top;
overflow:hidden;
position:relative;
}
#carouselMain img {
margin:0;
}
#homeMiddle p {
margin:0;
padding-bottom: 1em;
}
.clearer { clear:both; }
#arrow-left, #arrow-right {
@@ -824,6 +857,12 @@ div#app-list {
text-decoration:none;
text-align:center;
font-size:11px;
line-height:11px;
}
#app-list a span {
position:relative;
top:-4px;
}
#app-list img {
@@ -857,18 +896,6 @@ div#app-list {
padding-bottom:.25em;
}
#carouselMain {
margin: 25px 21px 0;
height:185px;
background-position:top;
background-repeat:no-repeat;
overflow:hidden;
}
#carouselMain img {
margin:0;
}
/*carousel bulletin layouts*/
/*460px width*/
/*185px height*/
@@ -877,24 +904,24 @@ div#app-list {
width:230px;
height:165px;
overflow:hidden;
margin:8px 0 8px 8px;
padding:8px 0 8px 8px;
}
.desc-right {
float:left;
width:270px;
margin:10px;
padding:10px;
}
.img-right {
float:right;
width:220px;
height:165px;
overflow:hidden;
margin:8px 8px 8px 0;
padding:8px 8px 8px 0;
}
.desc-left {
float:right;
width:280px;
margin:10px;
padding:10px;
text-align:right;
}
.img-top {

View File

@@ -433,11 +433,11 @@ hr {
.nolist {
list-style:none;
padding:0;
margin:0 0 0 1em;
margin:0 0 1em 1em;
}
.nolist li {
padding:0;
padding:0 0 2px;
margin:0;
}
@@ -570,15 +570,7 @@ div.special ol li {
margin:0 0 .5em;
padding:0;
}
/* old p.note, p.caution, p.warning {
margin:0 0 1em;
padding: 4px 10px;
background-color: #efefef;
border-top: 1px solid;
border-bottom: 1px solid;
}
*/
p.note, p.caution, p.warning {
margin: 1em;
padding: 0 0 0 .5em;
@@ -594,21 +586,20 @@ p.special-note {
p.note {
border-color: #99aacc;
}
p.caution {
border-color: #ffcc33;
}
p.warning {
border-color: #aa0033;
}
p.warning b, p.warning em, p.warning strong {
color: #aa0033;
p.caution {
border-color: #ffcf00;
}
p.warning b, p.warning strong {
font-weight: bold;
}
li p.note, li p.warning, li p.caution {
li p.note, li p.warning {
margin: .5em 0 0 0;
padding: .2em .5em .2em .9em;
}
@@ -681,7 +672,7 @@ pre.classic {
#qv ol ol{
list-style:none;
padding: 0 0 3px 12px;
padding: 0 0 0 12px;
margin:0;
}
@@ -690,11 +681,14 @@ pre.classic {
}
#qv li {
padding: 0 10px;
margin: 2 0 0;
padding: 0 10px 3px;
line-height: 1.2em;
}
#qv li li {
padding: 3px 10px 0;
}
#qv ul li {
padding: 0 10px 0 0;
}
@@ -810,6 +804,63 @@ padding:0 0 0 0em;
/* End sidebox sidebar element styles */
/* BEGIN image and caption styles (originally for UI Guidelines docs) */
table.image-caption {
padding:0;
margin:.5em 0;
border:0;
}
td.image-caption-i {
font-size:92%;
padding:0;
margin:0;
border:0;
}
td.image-caption-i img {
padding:0 1em;
margin:0;
}
.image-list {
width:24px;
text-align:center;
}
.image-list .caption {
margin:0 2px;
}
td.image-caption-c {
font-size:92%;
padding:1em 2px 2px 2px;
margin:0;
border:0;
width:350px;
}
.grad-rule-top {
background-image:url(images/grad-rule-qv.png);
background-repeat:no-repeat;
padding-top:1em;
margin-top:0;
}
.image-caption-nested {
margin-top:0;
padding:0 0 0 1em;
}
.image-caption-nested td {
padding:0 4px 2px 0;
margin:0;
border:0;
}
/* END image and caption styles */
/* table of contents */
ol.toc {

View File

@@ -4,7 +4,7 @@ var devdocNav;
var sidenav;
var content;
var HEADER_HEIGHT = 117;
var cookie_style = 'android_developer';
var cookie_namespace = 'android_developer';
var NAV_PREF_TREE = "tree";
var NAV_PREF_PANELS = "panels";
var nav_pref;
@@ -70,8 +70,8 @@ function restoreHeight(packageHeight) {
$("#nav-tree").css({height:swapperHeight + "px"});
}
function getCookie(cookie) {
var myCookie = cookie_style+"_"+cookie+"=";
function readCookie(cookie) {
var myCookie = cookie_namespace+"_"+cookie+"=";
if (document.cookie) {
var index = document.cookie.indexOf(myCookie);
if (index != -1) {
@@ -87,16 +87,15 @@ function getCookie(cookie) {
return 0;
}
function writeCookie(cookie, val, path, expiration) {
function writeCookie(cookie, val, section, expiration) {
if (!val) return;
var date = new Date();
date.setTime(date.getTime()+(10*365*24*60*60*1000)); // default expiration is one week
expiration = expiration ? expiration : date.toGMTString();
if (location.href.indexOf("/reference/") != -1) {
document.cookie = cookie_style+'_reference_'+cookie+'='+val+'; expires='+expiration+'; path='+'/'+path;
} else if (location.href.indexOf("/guide/") != -1) {
document.cookie = cookie_style+'_guide_'+cookie+'='+val+'; expires='+expiration+'; path='+'/'+path;
section = section == null ? "_" : "_"+section+"_";
if (expiration == null) {
var date = new Date();
date.setTime(date.getTime()+(10*365*24*60*60*1000)); // default expiration is one week
expiration = date.toGMTString();
}
document.cookie = cookie_namespace+section+cookie+"="+val+"; expires="+expiration+"; path=/";
}
function init() {
@@ -116,8 +115,8 @@ function init() {
if (!isMobile) {
$("#resize-packages-nav").resizable({handles: "s", resize: function(e, ui) { resizeHeight(); } });
$(".side-nav-resizable").resizable({handles: "e", resize: function(e, ui) { resizeWidth(); } });
var cookieWidth = getCookie(cookiePath+'width');
var cookieHeight = getCookie(cookiePath+'height');
var cookieWidth = readCookie(cookiePath+'width');
var cookieHeight = readCookie(cookiePath+'height');
if (cookieWidth) {
restoreWidth(cookieWidth);
} else if ($(".side-nav-resizable").length) {
@@ -175,7 +174,9 @@ function resizeHeight() {
$("#packages-nav").css({height:parseInt(resizePackagesNav.css("height")) - 6 + "px"}); //move 6px for handle
devdocNav.css({height:sidenav.css("height")});
$("#nav-tree").css({height:swapperHeight + "px"});
writeCookie("height", resizePackagesNav.css("height"), "", null);
var section = location.pathname.substring(1,location.pathname.indexOf("/",1));
writeCookie("height", resizePackagesNav.css("height"), section, null);
}
function resizeWidth() {
@@ -190,7 +191,9 @@ function resizeWidth() {
resizePackagesNav.css({width:sidenavWidth});
classesNav.css({width:sidenavWidth});
$("#packages-nav").css({width:sidenavWidth});
writeCookie("width", sidenavWidth, "", null);
var section = location.pathname.substring(1,location.pathname.indexOf("/",1));
writeCookie("width", sidenavWidth, section, null);
}
function resizeAll() {
@@ -207,7 +210,7 @@ function loadLast(cookiePath) {
if (location.indexOf("/"+cookiePath+"/") != -1) {
return true;
}
var lastPage = getCookie(cookiePath + "_lastpage");
var lastPage = readCookie(cookiePath + "_lastpage");
if (lastPage) {
window.location = lastPage;
return false;
@@ -216,11 +219,11 @@ function loadLast(cookiePath) {
}
$(window).unload(function(){
var href = location.href;
if (href.indexOf("/reference/") != -1) {
writeCookie("lastpage", href, "", null);
} else if (href.indexOf("/guide/") != -1) {
writeCookie("lastpage", href, "", null);
var path = location.pathname;
if (path.indexOf("/reference/") != -1) {
writeCookie("lastpage", path, "reference", null);
} else if (path.indexOf("/guide/") != -1) {
writeCookie("lastpage", path, "guide", null);
}
});
@@ -257,7 +260,7 @@ function buildToggleLists() {
}
function getNavPref() {
var v = getCookie('reference_nav');
var v = readCookie('reference_nav');
if (v != NAV_PREF_TREE) {
v = NAV_PREF_PANELS;
}
@@ -283,7 +286,7 @@ function swapNav() {
}
var date = new Date();
date.setTime(date.getTime()+(10*365*24*60*60*1000)); // keep this for 10 years
writeCookie("nav", nav_pref, "", date.toGMTString());
writeCookie("nav", nav_pref, null, date.toGMTString());
$("#nav-panels").toggle();
$("#panel-link").toggle();
@@ -349,3 +352,57 @@ function toggleAllSummaryInherited(linkObj) {
}
return false;
}
function changeTabLang(lang) {
var nodes = $("#header-tabs").find("."+lang);
for (i=0; i < nodes.length; i++) { // for each node in this language
var node = $(nodes[i]);
node.siblings().css("display","none"); // hide all siblings
if (node.not(":empty").length != 0) { //if this languages node has a translation, show it
node.css("display","inline");
} else { //otherwise, show English instead
node.css("display","none");
node.siblings().filter(".en").css("display","inline");
}
}
}
function changeNavLang(lang) {
var nodes = $("#side-nav").find("."+lang);
for (i=0; i < nodes.length; i++) { // for each node in this language
var node = $(nodes[i]);
node.siblings().css("display","none"); // hide all siblings
if (node.not(":empty").length != 0) { // if this languages node has a translation, show it
node.css("display","inline");
} else { // otherwise, show English instead
node.css("display","none");
node.siblings().filter(".en").css("display","inline");
}
}
}
function changeDocLang(lang) {
changeTabLang(lang);
changeNavLang(lang);
}
function changeLangPref(lang) {
var date = new Date();
date.setTime(date.getTime()+(50*365*24*60*60*1000)); // keep this for 50 years
writeCookie("pref_lang", lang, null, date);
changeDocLang(lang);
}
function loadLangPref() {
var lang = readCookie("pref_lang");
if (lang != 0) {
$("#language").find("option[value='"+lang+"']").attr("selected",true);
}
}
function getLangPref() {
return $("#language").find(":selected").attr("value");
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.1 KiB

View File

@@ -168,6 +168,6 @@ function search_focus_changed(obj, focused)
function submit_search() {
var query = document.getElementById('search_autocomplete').value;
document.location = '/search.html#q=' + query;
document.location = toRoot + 'search.html#q=' + query; // toRoot is initialized in android-developer-docs.js
return false;
}

View File

@@ -2,6 +2,10 @@
<?cs # Use the -templatedir arg to javadoc to set your own directory with a ?>
<?cs # replacement for this file in it. ?>
<?cs def:default_search_box() ?><?cs /def ?>
<?cs def:default_left_nav() ?><?cs /def ?>
<?cs # appears at the top of every page ?><?cs
def:custom_masthead() ?>
<div id="header">
@@ -9,7 +13,9 @@ def:custom_masthead() ?>
<a href="<?cs var:toroot ?>index.html" tabindex="-1"><?cs var:page.title ?></a>
</div>
<div id="headerRight">
<?cs call:default_search_box() ?>
<?cs if:!online-pdk ?>
<?cs call:default_search_box() ?>
<?cs /if ?>
</div><!-- headerRight -->
</div><!-- header --><?cs
/def ?>
@@ -21,4 +27,4 @@ def:custom_masthead() ?>
<?cs def:custom_buildinfo() ?>Build <?cs var:page.build ?> - <?cs var:page.now ?><?cs /def ?>
<?cs # appears on the side of the page ?>
<?cs def:custom_left_nav() ?><?cs call:default_left_nav() ?><?cs /def ?>
<?cs def:custom_left_nav() ?><?cs call:default_left_nav() ?><?cs /def ?>

View File

@@ -3,10 +3,7 @@
<link rel="shortcut icon" type="image/x-icon" href="<?cs var:toroot ?>favicon.ico" />
<title><?cs
if:page.title ?><?cs
var:page.title ?><?cs
if:sdk.version ?> (<?cs
var:sdk.version ?>)<?cs
/if ?> | <?cs
var:page.title ?> | <?cs
/if ?>Android Developers</title><?cs
if:guide||sdk ?>
<link href="<?cs var:toroot ?>assets/android-developer-docs-devguide.css" rel="stylesheet" type="text/css" /><?cs

View File

@@ -61,6 +61,9 @@ def:tag_list(tags) ?><?cs
elif:tag.name == "@sample" ?><pre class="Code prettyprint"><?cs var:tag.text ?></pre><?cs
elif:tag.name == "@include" ?><?cs var:tag.text ?><?cs
elif:tag.kind == "@docRoot" ?><?cs var:toroot ?><?cs
elif:tag.kind == "@sdkCurrent" ?><?cs var:sdk.current ?><?cs
elif:tag.kind == "@sdkCurrentVersion" ?><?cs var:sdk.version ?><?cs
elif:tag.kind == "@sdkCurrentRelId" ?><?cs var:sdk.rel.id ?><?cs
elif:tag.kind == "@inheritDoc" ?><?cs # This is the case when @inheritDoc is in something
that doesn't inherit from anything?><?cs
elif:tag.kind == "@attr" ?><?cs
@@ -230,108 +233,5 @@ def:expandable_class_list(id, classes, default) ?>
</div><?cs
/def ?>
<?cs # The default side navigation for the reference docs ?><?cs
def:default_left_nav() ?>
<div class="g-section g-tpl-240" id="body-content">
<div class="g-unit g-first side-nav-resizable" id="side-nav">
<div id="swapper">
<div id="nav-panels">
<div id="resize-packages-nav">
<div id="packages-nav">
<div id="index-links"><nobr>
<a href="<?cs var:toroot ?>reference/packages.html" <?cs if:(page.title == "Package Index") ?>class="selected"<?cs /if ?> >Package Index</a> |
<a href="<?cs var:toroot ?>reference/classes.html" <?cs if:(page.title == "Class Index") ?>class="selected"<?cs /if ?>>Class Index</a></nobr>
</div>
<ul><?cs
each:pkg=docs.packages ?>
<li <?cs if:(class.package.name == pkg.name) || (package.name == pkg.name)?>class="selected"<?cs /if ?>><?cs call:package_link(pkg) ?></li><?cs
/each ?>
</ul><br/>
</div> <!-- end packages -->
</div> <!-- end resize-packages -->
<div id="classes-nav"><?cs
if:subcount(class.package) ?>
<ul>
<?cs call:list("Interfaces", class.package.interfaces) ?>
<?cs call:list("Classes", class.package.classes) ?>
<?cs call:list("Enums", class.package.enums) ?>
<?cs call:list("Exceptions", class.package.exceptions) ?>
<?cs call:list("Errors", class.package.errors) ?>
</ul><?cs
elif:subcount(package) ?>
<ul>
<?cs call:class_link_list("Interfaces", package.interfaces) ?>
<?cs call:class_link_list("Classes", package.classes) ?>
<?cs call:class_link_list("Enums", package.enums) ?>
<?cs call:class_link_list("Exceptions", package.exceptions) ?>
<?cs call:class_link_list("Errors", package.errors) ?>
</ul><?cs
else ?>
<script>
/*addLoadEvent(maxPackageHeight);*/
</script>
<p style="padding:10px">Select a package to view its members</p><?cs
/if ?><br/>
</div><!-- end classes -->
</div><!-- end nav-panels -->
<div id="nav-tree" style="display:none">
<div id="index-links"><nobr>
<a href="<?cs var:toroot ?>reference/packages.html" <?cs if:(page.title == "Package Index") ?>class="selected"<?cs /if ?> >Package Index</a> |
<a href="<?cs var:toroot ?>reference/classes.html" <?cs if:(page.title == "Class Index") ?>class="selected"<?cs /if ?>>Class Index</a></nobr>
</div>
</div><!-- end nav-tree -->
</div><!-- end swapper -->
</div> <!-- end side-nav -->
<script>
if (!isMobile) {
$("<a href='#' id='nav-swap' onclick='swapNav();return false;' style='font-size:10px;line-height:9px;margin-left:1em;text-decoration:none;'><span id='tree-link'>Use Tree Navigation</span><span id='panel-link' style='display:none'>Use Panel Navigation</span></a>").appendTo("#side-nav");
chooseDefaultNav();
if ($("#nav-tree").is(':visible')) init_navtree("nav-tree", "<?cs var:toroot ?>", NAVTREE_DATA);
else {
addLoadEvent(function() {
scrollIntoView("packages-nav");
scrollIntoView("classes-nav");
});
}
$("#swapper").css({borderBottom:"2px solid #aaa"});
} else {
swapNav(); // tree view should be used on mobile
}
</script><?cs
/def ?>
<?cs # The default search box that goes in the header ?><?cs
def:default_search_box() ?>
<div id="search" >
<div id="searchForm">
<form accept-charset="utf-8" class="gsc-search-box"
onsubmit="return submit_search()">
<table class="gsc-search-box" cellpadding="0" cellspacing="0"><tbody>
<tr>
<td class="gsc-input">
<input id="search_autocomplete" class="gsc-input" type="text" size="33" autocomplete="off"
title="search developer docs" name="q"
value="search developer docs"
onFocus="search_focus_changed(this, true)"
onBlur="search_focus_changed(this, false)"
onkeydown="return search_changed(event, true, '<?cs var:toroot?>')"
onkeyup="return search_changed(event, false, '<?cs var:toroot?>')" />
<div id="search_filtered_div" class="no-display">
<table id="search_filtered" cellspacing=0>
</table>
</div>
</td>
<td class="gsc-search-button">
<input type="submit" value="Search" title="search" id="search-button" class="gsc-search-button" />
</td>
<td class="gsc-clear-button">
<div title="clear results" class="gsc-clear-button">&nbsp;</div>
</td>
</tr></tbody>
</table>
</form>
</div><!-- searchForm -->
</div><!-- search --><?cs
/def ?>
<?cs include:"customization.cs" ?>

View File

@@ -18,7 +18,7 @@
<div id="jd-content">
<p><a href="<?cs var:realFile ?>">Original <?cs var:realFile ?></a></p>
<p>The file containing the source code shown below is located in the corresponding directory in <code>&lt;sdk&gt;/platforms/android-&lt;version&gt;/samples/...</code></p>
<!-- begin file contents -->
<pre class="Code prettyprint"><?cs var:fileContents ?></pre>

View File

@@ -0,0 +1,205 @@
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import common
class AmendGenerator(object):
"""Class to generate scripts in the 'amend' recovery script language
used up through cupcake."""
def __init__(self):
self.script = ['assert compatible_with("0.2") == "true"']
self.included_files = set()
def MakeTemporary(self):
"""Make a temporary script object whose commands can latter be
appended to the parent script with AppendScript(). Used when the
caller wants to generate script commands out-of-order."""
x = AmendGenerator()
x.script = []
x.included_files = self.included_files
return x
@staticmethod
def _FileRoot(fn):
"""Convert a file path to the 'root' notation used by amend."""
if fn.startswith("/system/"):
return "SYSTEM:" + fn[8:]
elif fn == "/system":
return "SYSTEM:"
elif fn.startswith("/tmp/"):
return "CACHE:.." + fn
else:
raise ValueError("don't know root for \"%s\"" % (fn,))
@staticmethod
def _PartitionRoot(partition):
"""Convert a partition name to the 'root' notation used by amend."""
if partition == "userdata":
return "DATA:"
else:
return partition.upper() + ":"
def AppendScript(self, other):
"""Append the contents of another script (which should be created
with temporary=True) to this one."""
self.script.extend(other.script)
self.included_files.update(other.included_files)
def AssertSomeFingerprint(self, *fp):
"""Assert that the current fingerprint is one of *fp."""
x = [('file_contains("SYSTEM:build.prop", '
'"ro.build.fingerprint=%s") == "true"') % i for i in fp]
self.script.append("assert %s" % (" || ".join(x),))
def AssertOlderBuild(self, timestamp):
"""Assert that the build on the device is older (or the same as)
the given timestamp."""
self.script.append("run_program PACKAGE:check_prereq %s" % (timestamp,))
self.included_files.add("check_prereq")
def AssertDevice(self, device):
"""Assert that the device identifier is the given string."""
self.script.append('assert getprop("ro.product.device") == "%s" || '
'getprop("ro.build.product") == "%s"' % (device, device))
def AssertSomeBootloader(self, *bootloaders):
"""Asert that the bootloader version is one of *bootloaders."""
self.script.append("assert " +
" || ".join(['getprop("ro.bootloader") == "%s"' % (b,)
for b in bootloaders]))
def ShowProgress(self, frac, dur):
"""Update the progress bar, advancing it over 'frac' over the next
'dur' seconds."""
self.script.append("show_progress %f %d" % (frac, int(dur)))
def PatchCheck(self, filename, *sha1):
"""Check that the given file (or MTD reference) has one of the
given *sha1 hashes."""
out = ["run_program PACKAGE:applypatch -c %s" % (filename,)]
for i in sha1:
out.append(" " + i)
self.script.append("".join(out))
self.included_files.add("applypatch")
def CacheFreeSpaceCheck(self, amount):
"""Check that there's at least 'amount' space that can be made
available on /cache."""
self.script.append("run_program PACKAGE:applypatch -s %d" % (amount,))
self.included_files.add("applypatch")
def Mount(self, kind, what, path):
# no-op; amend uses it's 'roots' system to automatically mount
# things when they're referred to
pass
def UnpackPackageDir(self, src, dst):
"""Unpack a given directory from the OTA package into the given
destination directory."""
dst = self._FileRoot(dst)
self.script.append("copy_dir PACKAGE:%s %s" % (src, dst))
def Comment(self, comment):
"""Write a comment into the update script."""
self.script.append("")
for i in comment.split("\n"):
self.script.append("# " + i)
self.script.append("")
def Print(self, message):
"""Log a message to the screen (if the logs are visible)."""
# no way to do this from amend; substitute a script comment instead
self.Comment(message)
def FormatPartition(self, partition):
"""Format the given MTD partition."""
self.script.append("format %s" % (self._PartitionRoot(partition),))
def DeleteFiles(self, file_list):
"""Delete all files in file_list."""
line = []
t = 0
for i in file_list:
i = self._FileRoot(i)
line.append(i)
t += len(i) + 1
if t > 80:
self.script.append("delete " + " ".join(line))
line = []
t = 0
if line:
self.script.append("delete " + " ".join(line))
def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs):
"""Apply binary patches (in *patchpairs) to the given srcfile to
produce tgtfile (which may be "-" to indicate overwriting the
source file."""
if len(patchpairs) % 2 != 0:
raise ValueError("bad patches given to ApplyPatch")
self.script.append(
("run_program PACKAGE:applypatch %s %s %s %d " %
(srcfile, tgtfile, tgtsha1, tgtsize)) +
" ".join(["%s:%s" % patchpairs[i:i+2]
for i in range(0, len(patchpairs), 2)]))
self.included_files.add("applypatch")
def WriteFirmwareImage(self, kind, fn):
"""Arrange to update the given firmware image (kind must be
"hboot" or "radio") when recovery finishes."""
self.script.append("write_%s_image PACKAGE:%s" % (kind, fn))
def WriteRawImage(self, partition, fn):
"""Write the given file into the given MTD partition."""
self.script.append("write_raw_image PACKAGE:%s %s" %
(fn, self._PartitionRoot(partition)))
def SetPermissions(self, fn, uid, gid, mode):
"""Set file ownership and permissions."""
fn = self._FileRoot(fn)
self.script.append("set_perm %d %d 0%o %s" % (uid, gid, mode, fn))
def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode):
"""Recursively set path ownership and permissions."""
fn = self._FileRoot(fn)
self.script.append("set_perm_recursive %d %d 0%o 0%o %s" %
(uid, gid, dmode, fmode, fn))
def MakeSymlinks(self, symlink_list):
"""Create symlinks, given a list of (dest, link) pairs."""
self.script.extend(["symlink %s %s" % (i[0], self._FileRoot(i[1]))
for i in sorted(symlink_list)])
def AppendExtra(self, extra):
"""Append text verbatim to the output script."""
self.script.append(extra)
def AddToZip(self, input_zip, output_zip, input_path=None):
"""Write the accumulated script to the output_zip file. input_zip
is used as the source for any ancillary binaries needed by the
script. If input_path is not None, it will be used as a local
path for binaries instead of input_zip."""
common.ZipWriteStr(output_zip, "META-INF/com/google/android/update-script",
"\n".join(self.script) + "\n")
for i in self.included_files:
try:
if input_path is None:
data = input_zip.read(os.path.join("OTA/bin", i))
else:
data = open(os.path.join(input_path, i)).read()
common.ZipWriteStr(output_zip, i, data, perms=0755)
except (IOError, KeyError), e:
raise ExternalError("unable to include binary %s: %s" % (i, e))

View File

@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import getopt
import getpass
import os
@@ -20,6 +21,7 @@ import shutil
import subprocess
import sys
import tempfile
import zipfile
# missing in Python 2.4 and before
if not hasattr(os, "SEEK_SET"):
@@ -27,7 +29,7 @@ if not hasattr(os, "SEEK_SET"):
class Options(object): pass
OPTIONS = Options()
OPTIONS.signapk_jar = "out/host/linux-x86/framework/signapk.jar"
OPTIONS.search_path = "out/host/linux-x86"
OPTIONS.max_image_size = {}
OPTIONS.verbose = False
OPTIONS.tempfiles = []
@@ -61,40 +63,62 @@ def LoadBoardConfig(fn):
def BuildAndAddBootableImage(sourcedir, targetname, output_zip):
"""Take a kernel, cmdline, and ramdisk directory from the input (in
'sourcedir'), and turn them into a boot image. Put the boot image
into the output zip file under the name 'targetname'."""
into the output zip file under the name 'targetname'. Returns
targetname on success or None on failure (if sourcedir does not
appear to contain files for the requested image)."""
print "creating %s..." % (targetname,)
img = BuildBootableImage(sourcedir)
if img is None:
return None
CheckSize(img, targetname)
output_zip.writestr(targetname, img)
ZipWriteStr(output_zip, targetname, img)
return targetname
def BuildBootableImage(sourcedir):
"""Take a kernel, cmdline, and ramdisk directory from the input (in
'sourcedir'), and turn them into a boot image. Return the image data."""
'sourcedir'), and turn them into a boot image. Return the image
data, or None if sourcedir does not appear to contains files for
building the requested image."""
if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or
not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)):
return None
ramdisk_img = tempfile.NamedTemporaryFile()
img = tempfile.NamedTemporaryFile()
p1 = Run(["mkbootfs", os.path.join(sourcedir, "RAMDISK")],
stdout=subprocess.PIPE)
p2 = Run(["gzip", "-n"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
p2 = Run(["minigzip"],
stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
p2.wait()
p1.wait()
assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (targetname,)
assert p2.returncode == 0, "gzip of %s ramdisk failed" % (targetname,)
assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (targetname,)
cmdline = open(os.path.join(sourcedir, "cmdline")).read().rstrip("\n")
p = Run(["mkbootimg",
"--kernel", os.path.join(sourcedir, "kernel"),
"--cmdline", cmdline,
"--ramdisk", ramdisk_img.name,
"--output", img.name],
stdout=subprocess.PIPE)
cmd = ["mkbootimg", "--kernel", os.path.join(sourcedir, "kernel")]
fn = os.path.join(sourcedir, "cmdline")
if os.access(fn, os.F_OK):
cmd.append("--cmdline")
cmd.append(open(fn).read().rstrip("\n"))
fn = os.path.join(sourcedir, "base")
if os.access(fn, os.F_OK):
cmd.append("--base")
cmd.append(open(fn).read().rstrip("\n"))
cmd.extend(["--ramdisk", ramdisk_img.name,
"--output", img.name])
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "mkbootimg of %s image failed" % (targetname,)
assert p.returncode == 0, "mkbootimg of %s image failed" % (
os.path.basename(sourcedir),)
img.seek(os.SEEK_SET, 0)
data = img.read()
@@ -131,22 +155,30 @@ def GetKeyPasswords(keylist):
those which require them. Return a {key: password} dict. password
will be None if the key has no password."""
key_passwords = {}
no_passwords = []
need_passwords = []
devnull = open("/dev/null", "w+b")
for k in sorted(keylist):
p = subprocess.Popen(["openssl", "pkcs8", "-in", k+".pk8",
"-inform", "DER", "-nocrypt"],
stdin=devnull.fileno(),
stdout=devnull.fileno(),
stderr=subprocess.STDOUT)
# An empty-string key is used to mean don't re-sign this package.
# Obviously we don't need a password for this non-key.
if not k:
no_passwords.append(k)
continue
p = Run(["openssl", "pkcs8", "-in", k+".pk8",
"-inform", "DER", "-nocrypt"],
stdin=devnull.fileno(),
stdout=devnull.fileno(),
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 0:
print "%s.pk8 does not require a password" % (k,)
key_passwords[k] = None
no_passwords.append(k)
else:
key_passwords[k] = getpass.getpass("Enter password for %s.pk8> " % (k,))
need_passwords.append(k)
devnull.close()
print
key_passwords = PasswordManager().GetPasswords(need_passwords)
key_passwords.update(dict.fromkeys(no_passwords, None))
return key_passwords
@@ -167,12 +199,13 @@ def SignFile(input_name, output_name, key, password, align=None):
else:
sign_name = output_name
p = subprocess.Popen(["java", "-jar", OPTIONS.signapk_jar,
key + ".x509.pem",
key + ".pk8",
input_name, sign_name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
p = Run(["java", "-jar",
os.path.join(OPTIONS.search_path, "framework", "signapk.jar"),
key + ".x509.pem",
key + ".pk8",
input_name, sign_name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
if password is not None:
password += "\n"
p.communicate(password)
@@ -180,7 +213,7 @@ def SignFile(input_name, output_name, key, password, align=None):
raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
if align:
p = subprocess.Popen(["zipalign", "-f", str(align), sign_name, output_name])
p = Run(["zipalign", "-f", str(align), sign_name, output_name])
p.communicate()
if p.returncode != 0:
raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
@@ -209,8 +242,8 @@ def CheckSize(data, target):
COMMON_DOCSTRING = """
-p (--path) <dir>
Prepend <dir> to the list of places to search for binaries run
by this script.
Prepend <dir>/bin to the list of places to search for binaries
run by this script, and expect to find jars in <dir>/framework.
-v (--verbose)
Show command lines being executed.
@@ -252,15 +285,13 @@ def ParseOptions(argv,
elif o in ("-v", "--verbose"):
OPTIONS.verbose = True
elif o in ("-p", "--path"):
os.environ["PATH"] = a + os.pathsep + os.environ["PATH"]
path_specified = True
OPTIONS.search_path = a
else:
if extra_option_handler is None or not extra_option_handler(o, a):
assert False, "unknown option \"%s\"" % (o,)
if not path_specified:
os.environ["PATH"] = ("out/host/linux-x86/bin" + os.pathsep +
os.environ["PATH"])
os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
os.pathsep + os.environ["PATH"])
return args
@@ -271,3 +302,111 @@ def Cleanup():
shutil.rmtree(i)
else:
os.remove(i)
class PasswordManager(object):
def __init__(self):
self.editor = os.getenv("EDITOR", None)
self.pwfile = os.getenv("ANDROID_PW_FILE", None)
def GetPasswords(self, items):
"""Get passwords corresponding to each string in 'items',
returning a dict. (The dict may have keys in addition to the
values in 'items'.)
Uses the passwords in $ANDROID_PW_FILE if available, letting the
user edit that file to add more needed passwords. If no editor is
available, or $ANDROID_PW_FILE isn't define, prompts the user
interactively in the ordinary way.
"""
current = self.ReadFile()
first = True
while True:
missing = []
for i in items:
if i not in current or not current[i]:
missing.append(i)
# Are all the passwords already in the file?
if not missing: return current
for i in missing:
current[i] = ""
if not first:
print "key file %s still missing some passwords." % (self.pwfile,)
answer = raw_input("try to edit again? [y]> ").strip()
if answer and answer[0] not in 'yY':
raise RuntimeError("key passwords unavailable")
first = False
current = self.UpdateAndReadFile(current)
def PromptResult(self, current):
"""Prompt the user to enter a value (password) for each key in
'current' whose value is fales. Returns a new dict with all the
values.
"""
result = {}
for k, v in sorted(current.iteritems()):
if v:
result[k] = v
else:
while True:
result[k] = getpass.getpass("Enter password for %s key> "
% (k,)).strip()
if result[k]: break
return result
def UpdateAndReadFile(self, current):
if not self.editor or not self.pwfile:
return self.PromptResult(current)
f = open(self.pwfile, "w")
os.chmod(self.pwfile, 0600)
f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
f.write("# (Additional spaces are harmless.)\n\n")
first_line = None
sorted = [(not v, k, v) for (k, v) in current.iteritems()]
sorted.sort()
for i, (_, k, v) in enumerate(sorted):
f.write("[[[ %s ]]] %s\n" % (v, k))
if not v and first_line is None:
# position cursor on first line with no password.
first_line = i + 4
f.close()
p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
_, _ = p.communicate()
return self.ReadFile()
def ReadFile(self):
result = {}
if self.pwfile is None: return result
try:
f = open(self.pwfile, "r")
for line in f:
line = line.strip()
if not line or line[0] == '#': continue
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
if not m:
print "failed to parse password file: ", line
else:
result[m.group(2)] = m.group(1)
f.close()
except IOError, e:
if e.errno != errno.ENOENT:
print "error reading password file: ", str(e)
return result
def ZipWriteStr(zip, filename, data, perms=0644):
# use a fixed timestamp so the output is repeatable.
zinfo = zipfile.ZipInfo(filename=filename,
date_time=(2009, 1, 1, 0, 0, 0))
zinfo.compress_type = zip.compression
zinfo.external_attr = perms << 16
zip.writestr(zinfo, data)

View File

@@ -0,0 +1,226 @@
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import common
class EdifyGenerator(object):
"""Class to generate scripts in the 'edify' recovery script language
used from donut onwards."""
def __init__(self, version):
self.script = []
self.mounts = set()
self.version = version
def MakeTemporary(self):
"""Make a temporary script object whose commands can latter be
appended to the parent script with AppendScript(). Used when the
caller wants to generate script commands out-of-order."""
x = EdifyGenerator(self.version)
x.mounts = self.mounts
return x
@staticmethod
def _WordWrap(cmd, linelen=80):
"""'cmd' should be a function call with null characters after each
parameter (eg, "somefun(foo,\0bar,\0baz)"). This function wraps cmd
to a given line length, replacing nulls with spaces and/or newlines
to format it nicely."""
indent = cmd.index("(")+1
out = []
first = True
x = re.compile("^(.{,%d})\0" % (linelen-indent,))
while True:
if not first:
out.append(" " * indent)
first = False
m = x.search(cmd)
if not m:
parts = cmd.split("\0", 1)
out.append(parts[0]+"\n")
if len(parts) == 1:
break
else:
cmd = parts[1]
continue
out.append(m.group(1)+"\n")
cmd = cmd[m.end():]
return "".join(out).replace("\0", " ").rstrip("\n")
def AppendScript(self, other):
"""Append the contents of another script (which should be created
with temporary=True) to this one."""
self.script.extend(other.script)
def AssertSomeFingerprint(self, *fp):
"""Assert that the current system build fingerprint is one of *fp."""
if not fp:
raise ValueError("must specify some fingerprints")
cmd = ('assert(' +
' ||\0'.join([('file_getprop("/system/build.prop", '
'"ro.build.fingerprint") == "%s"')
% i for i in fp]) +
');')
self.script.append(self._WordWrap(cmd))
def AssertOlderBuild(self, timestamp):
"""Assert that the build on the device is older (or the same as)
the given timestamp."""
self.script.append(('assert(!less_than_int(%s, '
'getprop("ro.build.date.utc")));') % (timestamp,))
def AssertDevice(self, device):
"""Assert that the device identifier is the given string."""
cmd = ('assert(getprop("ro.product.device") == "%s" ||\0'
'getprop("ro.build.product") == "%s");' % (device, device))
self.script.append(self._WordWrap(cmd))
def AssertSomeBootloader(self, *bootloaders):
"""Asert that the bootloader version is one of *bootloaders."""
cmd = ("assert(" +
" ||\0".join(['getprop("ro.bootloader") == "%s"' % (b,)
for b in bootloaders]) +
");")
self.script.append(self._WordWrap(cmd))
def ShowProgress(self, frac, dur):
"""Update the progress bar, advancing it over 'frac' over the next
'dur' seconds."""
self.script.append("show_progress(%f, %d);" % (frac, int(dur)))
def PatchCheck(self, filename, *sha1):
"""Check that the given file (or MTD reference) has one of the
given *sha1 hashes."""
self.script.append('assert(apply_patch_check("%s"' % (filename,) +
"".join([', "%s"' % (i,) for i in sha1]) +
'));')
def CacheFreeSpaceCheck(self, amount):
"""Check that there's at least 'amount' space that can be made
available on /cache."""
self.script.append("assert(apply_patch_space(%d));" % (amount,))
def Mount(self, kind, what, path):
"""Mount the given 'what' at the given path. 'what' should be a
partition name if kind is "MTD", or a block device if kind is
"vfat". No other values of 'kind' are supported."""
self.script.append('mount("%s", "%s", "%s");' % (kind, what, path))
self.mounts.add(path)
def UnpackPackageDir(self, src, dst):
"""Unpack a given directory from the OTA package into the given
destination directory."""
self.script.append('package_extract_dir("%s", "%s");' % (src, dst))
def Comment(self, comment):
"""Write a comment into the update script."""
self.script.append("")
for i in comment.split("\n"):
self.script.append("# " + i)
self.script.append("")
def Print(self, message):
"""Log a message to the screen (if the logs are visible)."""
self.script.append('ui_print("%s");' % (message,))
def FormatPartition(self, partition):
"""Format the given MTD partition."""
self.script.append('format("MTD", "%s");' % (partition,))
def DeleteFiles(self, file_list):
"""Delete all files in file_list."""
if not file_list: return
cmd = "delete(" + ",\0".join(['"%s"' % (i,) for i in file_list]) + ");"
self.script.append(self._WordWrap(cmd))
def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs):
"""Apply binary patches (in *patchpairs) to the given srcfile to
produce tgtfile (which may be "-" to indicate overwriting the
source file."""
if len(patchpairs) % 2 != 0 or len(patchpairs) == 0:
raise ValueError("bad patches given to ApplyPatch")
cmd = ['apply_patch("%s",\0"%s",\0%s,\0%d'
% (srcfile, tgtfile, tgtsha1, tgtsize)]
for i in range(0, len(patchpairs), 2):
cmd.append(',\0"%s:%s"' % patchpairs[i:i+2])
cmd.append(');')
cmd = "".join(cmd)
self.script.append(self._WordWrap(cmd))
def WriteFirmwareImage(self, kind, fn):
"""Arrange to update the given firmware image (kind must be
"hboot" or "radio") when recovery finishes."""
if self.version == 1:
self.script.append(
('assert(package_extract_file("%(fn)s", "/tmp/%(kind)s.img"),\n'
' write_firmware_image("/tmp/%(kind)s.img", "%(kind)s"));')
% {'kind': kind, 'fn': fn})
else:
self.script.append(
'write_firmware_image("PACKAGE:%s", "%s");' % (fn, kind))
def WriteRawImage(self, partition, fn):
"""Write the given package file into the given MTD partition."""
self.script.append(
('assert(package_extract_file("%(fn)s", "/tmp/%(partition)s.img"),\n'
' write_raw_image("/tmp/%(partition)s.img", "%(partition)s"),\n'
' delete("/tmp/%(partition)s.img"));')
% {'partition': partition, 'fn': fn})
def SetPermissions(self, fn, uid, gid, mode):
"""Set file ownership and permissions."""
self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn))
def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode):
"""Recursively set path ownership and permissions."""
self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");'
% (uid, gid, dmode, fmode, fn))
def MakeSymlinks(self, symlink_list):
"""Create symlinks, given a list of (dest, link) pairs."""
by_dest = {}
for d, l in symlink_list:
by_dest.setdefault(d, []).append(l)
for dest, links in sorted(by_dest.iteritems()):
cmd = ('symlink("%s", ' % (dest,) +
",\0".join(['"' + i + '"' for i in sorted(links)]) + ");")
self.script.append(self._WordWrap(cmd))
def AppendExtra(self, extra):
"""Append text verbatim to the output script."""
self.script.append(extra)
def AddToZip(self, input_zip, output_zip, input_path=None):
"""Write the accumulated script to the output_zip file. input_zip
is used as the source for the 'updater' binary needed to run
script. If input_path is not None, it will be used as a local
path for the binary instead of input_zip."""
for p in sorted(self.mounts):
self.script.append('unmount("%s");' % (p,))
common.ZipWriteStr(output_zip, "META-INF/com/google/android/updater-script",
"\n".join(self.script) + "\n")
if input_path is None:
data = input_zip.read("OTA/bin/updater")
else:
data = open(os.path.join(input_path, "updater")).read()
common.ZipWriteStr(output_zip, "META-INF/com/google/android/update-binary",
data, perms=0755)

View File

@@ -96,7 +96,7 @@ def AddSystem(output_zip):
img.close()
common.CheckSize(data, "system.img")
output_zip.writestr("system.img", data)
common.ZipWriteStr(output_zip, "system.img", data)
def CopyInfo(output_zip):

View File

@@ -33,6 +33,22 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package
Generate an incremental OTA using the given target-files zip as
the starting build.
-w (--wipe_user_data)
Generate an OTA package that will wipe the user data partition
when installed.
-n (--no_prereq)
Omit the timestamp prereq check normally included at the top of
the build scripts (used for developer OTA packages which
legitimately need to go back and forth).
-e (--extra_script) <file>
Insert the contents of file at the end of the update script.
-m (--script_mode) <mode>
Specify 'amend' or 'edify' scripts, or 'auto' to pick
automatically (this is the default).
"""
import sys
@@ -51,6 +67,8 @@ import time
import zipfile
import common
import amend_generator
import edify_generator
OPTIONS = common.OPTIONS
OPTIONS.package_key = "build/target/product/security/testkey"
@@ -58,6 +76,10 @@ OPTIONS.incremental_source = None
OPTIONS.require_verbatim = set()
OPTIONS.prohibit_verbatim = set(("system/build.prop",))
OPTIONS.patch_threshold = 0.95
OPTIONS.wipe_user_data = False
OPTIONS.omit_prereq = False
OPTIONS.extra_script = None
OPTIONS.script_mode = 'auto'
def MostPopularKey(d, default):
"""Given a dict, return the key corresponding to the largest
@@ -178,11 +200,10 @@ class Item:
return d
def SetPermissions(self, script, renamer=lambda x: x):
def SetPermissions(self, script):
"""Append set_perm/set_perm_recursive commands to 'script' to
set all permissions, users, and groups for the tree of files
rooted at 'self'. 'renamer' turns the filenames stored in the
tree of Items into the strings used in the script."""
rooted at 'self'."""
self.CountChildMetadata()
@@ -193,22 +214,19 @@ class Item:
# supposed to be something different.
if item.dir:
if current != item.best_subtree:
script.append("set_perm_recursive %d %d 0%o 0%o %s" %
(item.best_subtree + (renamer(item.name),)))
script.SetPermissionsRecursive("/"+item.name, *item.best_subtree)
current = item.best_subtree
if item.uid != current[0] or item.gid != current[1] or \
item.mode != current[2]:
script.append("set_perm %d %d 0%o %s" %
(item.uid, item.gid, item.mode, renamer(item.name)))
script.SetPermissions("/"+item.name, item.uid, item.gid, item.mode)
for i in item.children:
recurse(i, current)
else:
if item.uid != current[0] or item.gid != current[1] or \
item.mode != current[3]:
script.append("set_perm %d %d 0%o %s" %
(item.uid, item.gid, item.mode, renamer(item.name)))
script.SetPermissions("/"+item.name, item.uid, item.gid, item.mode)
recurse(self, (-1, -1, -1, -1))
@@ -230,7 +248,7 @@ def CopySystemFiles(input_zip, output_zip=None,
basefilename = info.filename[7:]
if IsSymlink(info):
symlinks.append((input_zip.read(info.filename),
"SYSTEM:" + basefilename))
"/system/" + basefilename))
else:
info2 = copy.copy(info)
fn = info2.filename = "system/" + basefilename
@@ -251,14 +269,6 @@ def CopySystemFiles(input_zip, output_zip=None,
return symlinks
def AddScript(script, output_zip):
now = time.localtime()
i = zipfile.ZipInfo("META-INF/com/google/android/update-script",
(now.tm_year, now.tm_mon, now.tm_mday,
now.tm_hour, now.tm_min, now.tm_sec))
output_zip.writestr(i, "\n".join(script) + "\n")
def SignOutput(temp_zip_name, output_zip_name):
key_passwords = common.GetKeyPasswords([OPTIONS.package_key])
pw = key_passwords[OPTIONS.package_key]
@@ -266,89 +276,75 @@ def SignOutput(temp_zip_name, output_zip_name):
common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw)
def SubstituteRoot(s):
if s == "system": return "SYSTEM:"
assert s.startswith("system/")
return "SYSTEM:" + s[7:]
def FixPermissions(script):
Item.GetMetadata()
root = Item.Get("system")
root.SetPermissions(script, renamer=SubstituteRoot)
root.SetPermissions(script)
def DeleteFiles(script, to_delete):
line = []
t = 0
for i in to_delete:
line.append(i)
t += len(i) + 1
if t > 80:
script.append("delete " + " ".join(line))
line = []
t = 0
if line:
script.append("delete " + " ".join(line))
def AppendAssertions(script, input_zip):
script.append('assert compatible_with("0.2") == "true"')
device = GetBuildProp("ro.product.device", input_zip)
script.append('assert getprop("ro.product.device") == "%s" || '
'getprop("ro.build.product") == "%s"' % (device, device))
script.AssertDevice(device)
info = input_zip.read("OTA/android-info.txt")
m = re.search(r"require\s+version-bootloader\s*=\s*(\S+)", info)
if not m:
raise ExternalError("failed to find required bootloaders in "
"android-info.txt")
bootloaders = m.group(1).split("|")
script.append("assert " +
" || ".join(['getprop("ro.bootloader") == "%s"' % (b,)
for b in bootloaders]))
def IncludeBinary(name, input_zip, output_zip):
try:
data = input_zip.read(os.path.join("OTA/bin", name))
output_zip.writestr(name, data)
except IOError:
raise ExternalError('unable to include device binary "%s"' % (name,))
if m:
bootloaders = m.group(1).split("|")
script.AssertSomeBootloader(*bootloaders)
def WriteFullOTAPackage(input_zip, output_zip):
script = []
if OPTIONS.script_mode in ("amend", "auto"):
script = amend_generator.AmendGenerator()
else:
# TODO: how to determine this? We don't know what version it will
# be installed on top of. For now, we expect the API just won't
# change very often.
script = edify_generator.EdifyGenerator(1)
ts = GetBuildProp("ro.build.date.utc", input_zip)
script.append("run_program PACKAGE:check_prereq %s" % (ts,))
IncludeBinary("check_prereq", input_zip, output_zip)
if not OPTIONS.omit_prereq:
ts = GetBuildProp("ro.build.date.utc", input_zip)
script.AssertOlderBuild(ts)
AppendAssertions(script, input_zip)
script.append("format BOOT:")
script.append("show_progress 0.1 0")
script.ShowProgress(0.1, 0)
output_zip.writestr("radio.img", input_zip.read("RADIO/image"))
script.append("write_radio_image PACKAGE:radio.img")
script.append("show_progress 0.5 0")
try:
common.ZipWriteStr(output_zip, "radio.img", input_zip.read("RADIO/image"))
script.WriteFirmwareImage("radio", "radio.img")
except KeyError:
print "warning: no radio image in input target_files; not flashing radio"
script.append("format SYSTEM:")
script.append("copy_dir PACKAGE:system SYSTEM:")
script.ShowProgress(0.5, 0)
if OPTIONS.wipe_user_data:
script.FormatPartition("userdata")
script.FormatPartition("system")
script.Mount("MTD", "system", "/system")
script.UnpackPackageDir("system", "/system")
symlinks = CopySystemFiles(input_zip, output_zip)
script.extend(["symlink %s %s" % s for s in symlinks])
script.MakeSymlinks(symlinks)
common.BuildAndAddBootableImage(os.path.join(OPTIONS.input_tmp, "RECOVERY"),
"system/recovery.img", output_zip)
Item.Get("system/recovery.img", dir=False)
if common.BuildAndAddBootableImage(
os.path.join(OPTIONS.input_tmp, "RECOVERY"),
"system/recovery.img", output_zip):
Item.Get("system/recovery.img", dir=False)
FixPermissions(script)
common.AddBoot(output_zip)
script.append("show_progress 0.2 0")
script.append("write_raw_image PACKAGE:boot.img BOOT:")
script.append("show_progress 0.2 10")
script.ShowProgress(0.2, 0)
AddScript(script, output_zip)
script.WriteRawImage("boot", "boot.img")
script.ShowProgress(0.2, 10)
if OPTIONS.extra_script is not None:
script.AppendExtra(OPTIONS.extra_script)
script.AddToZip(input_zip, output_zip)
class File(object):
@@ -365,7 +361,7 @@ class File(object):
return t
def AddToZip(self, z):
z.writestr(self.name, self.data)
common.ZipWriteStr(z, self.name, self.data)
def LoadSystemFiles(z):
@@ -380,8 +376,11 @@ def LoadSystemFiles(z):
return out
def Difference(tf, sf):
"""Return the patch (as a string of data) needed to turn sf into tf."""
def Difference(tf, sf, diff_program):
"""Return the patch (as a string of data) needed to turn sf into tf.
diff_program is the name of an external program (or list, if
additional arguments are desired) to run to generate the diff.
"""
ttemp = tf.WriteToTemp()
stemp = sf.WriteToTemp()
@@ -390,13 +389,21 @@ def Difference(tf, sf):
try:
ptemp = tempfile.NamedTemporaryFile()
p = common.Run(["bsdiff", stemp.name, ttemp.name, ptemp.name])
if isinstance(diff_program, list):
cmd = copy.copy(diff_program)
else:
cmd = [diff_program]
cmd.append(stemp.name)
cmd.append(ttemp.name)
cmd.append(ptemp.name)
p = common.Run(cmd)
_, err = p.communicate()
if err:
raise ExternalError("failure running bsdiff:\n%s\n" % (err,))
if err or p.returncode != 0:
print "WARNING: failure running %s:\n%s\n" % (diff_program, err)
return None
diff = ptemp.read()
ptemp.close()
finally:
ptemp.close()
stemp.close()
ttemp.close()
@@ -411,12 +418,42 @@ def GetBuildProp(property, z):
return bp
m = re.search(re.escape(property) + r"=(.*)\n", bp)
if not m:
raise ExternalException("couldn't find %s in build.prop" % (property,))
raise common.ExternalError("couldn't find %s in build.prop" % (property,))
return m.group(1).strip()
def GetRecoveryAPIVersion(zip):
"""Returns the version of the recovery API. Version 0 is the older
amend code (no separate binary)."""
try:
version = zip.read("META/recovery-api-version.txt")
return int(version)
except KeyError:
try:
# version one didn't have the recovery-api-version.txt file, but
# it did include an updater binary.
zip.getinfo("OTA/bin/updater")
return 1
except KeyError:
return 0
def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
script = []
source_version = GetRecoveryAPIVersion(source_zip)
if OPTIONS.script_mode == 'amend':
script = amend_generator.AmendGenerator()
elif OPTIONS.script_mode == 'edify':
if source_version == 0:
print ("WARNING: generating edify script for a source that "
"can't install it.")
script = edify_generator.EdifyGenerator(source_version)
elif OPTIONS.script_mode == 'auto':
if source_version > 0:
script = edify_generator.EdifyGenerator(source_version)
else:
script = amend_generator.AmendGenerator()
else:
raise ValueError('unknown script mode "%s"' % (OPTIONS.script_mode,))
print "Loading target..."
target_data = LoadSystemFiles(target_zip)
@@ -433,20 +470,24 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
if sf is None or fn in OPTIONS.require_verbatim:
# This file should be included verbatim
if fn in OPTIONS.prohibit_verbatim:
raise ExternalError("\"%s\" must be sent verbatim" % (fn,))
raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
print "send", fn, "verbatim"
tf.AddToZip(output_zip)
verbatim_targets.append((fn, tf.size))
elif tf.sha1 != sf.sha1:
# File is different; consider sending as a patch
d = Difference(tf, sf)
print fn, tf.size, len(d), (float(len(d)) / tf.size)
if len(d) > tf.size * OPTIONS.patch_threshold:
diff_method = "bsdiff"
if tf.name.endswith(".gz"):
diff_method = "imgdiff"
d = Difference(tf, sf, diff_method)
if d is not None:
print fn, tf.size, len(d), (float(len(d)) / tf.size)
if d is None or len(d) > tf.size * OPTIONS.patch_threshold:
# patch is almost as big as the file; don't bother patching
tf.AddToZip(output_zip)
verbatim_targets.append((fn, tf.size))
else:
output_zip.writestr("patch/" + fn + ".p", d)
common.ZipWriteStr(output_zip, "patch/" + fn + ".p", d)
patch_list.append((fn, tf, sf, tf.size))
largest_source_size = max(largest_source_size, sf.size)
else:
@@ -459,23 +500,24 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
source_fp = GetBuildProp("ro.build.fingerprint", source_zip)
target_fp = GetBuildProp("ro.build.fingerprint", target_zip)
script.append(('assert file_contains("SYSTEM:build.prop", '
'"ro.build.fingerprint=%s") == "true" || '
'file_contains("SYSTEM:build.prop", '
'"ro.build.fingerprint=%s") == "true"') %
(source_fp, target_fp))
script.Mount("MTD", "system", "/system")
script.AssertSomeFingerprint(source_fp, target_fp)
source_boot = common.BuildBootableImage(
os.path.join(OPTIONS.source_tmp, "BOOT"))
target_boot = common.BuildBootableImage(
os.path.join(OPTIONS.target_tmp, "BOOT"))
updating_boot = (source_boot != target_boot)
source_boot = File("/tmp/boot.img",
common.BuildBootableImage(
os.path.join(OPTIONS.source_tmp, "BOOT")))
target_boot = File("/tmp/boot.img",
common.BuildBootableImage(
os.path.join(OPTIONS.target_tmp, "BOOT")))
updating_boot = (source_boot.data != target_boot.data)
source_recovery = common.BuildBootableImage(
os.path.join(OPTIONS.source_tmp, "RECOVERY"))
target_recovery = common.BuildBootableImage(
os.path.join(OPTIONS.target_tmp, "RECOVERY"))
updating_recovery = (source_recovery != target_recovery)
source_recovery = File("system/recovery.img",
common.BuildBootableImage(
os.path.join(OPTIONS.source_tmp, "RECOVERY")))
target_recovery = File("system/recovery.img",
common.BuildBootableImage(
os.path.join(OPTIONS.target_tmp, "RECOVERY")))
updating_recovery = (source_recovery.data != target_recovery.data)
source_radio = source_zip.read("RADIO/image")
target_radio = target_zip.read("RADIO/image")
@@ -491,65 +533,110 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
AppendAssertions(script, target_zip)
script.Print("Verifying current system...")
pb_verify = progress_bar_total * 0.3 * \
(total_patched_size /
float(total_patched_size+total_verbatim_size))
float(total_patched_size+total_verbatim_size+1))
for i, (fn, tf, sf, size) in enumerate(patch_list):
if i % 5 == 0:
next_sizes = sum([i[3] for i in patch_list[i:i+5]])
script.append("show_progress %f 1" %
(next_sizes * pb_verify / total_patched_size,))
script.append("run_program PACKAGE:applypatch -c /%s %s %s" %
(fn, tf.sha1, sf.sha1))
script.ShowProgress(next_sizes * pb_verify / (total_patched_size+1), 1)
if patch_list:
script.append("run_program PACKAGE:applypatch -s %d" %
(largest_source_size,))
script.append("copy_dir PACKAGE:patch CACHE:../tmp/patchtmp")
IncludeBinary("applypatch", target_zip, output_zip)
script.PatchCheck("/"+fn, tf.sha1, sf.sha1)
script.append("\n# ---- start making changes here\n")
if updating_recovery:
d = Difference(target_recovery, source_recovery, "imgdiff")
print "recovery target: %d source: %d diff: %d" % (
target_recovery.size, source_recovery.size, len(d))
DeleteFiles(script, [SubstituteRoot(i[0]) for i in verbatim_targets])
common.ZipWriteStr(output_zip, "patch/recovery.img.p", d)
script.PatchCheck("MTD:recovery:%d:%s:%d:%s" %
(source_recovery.size, source_recovery.sha1,
target_recovery.size, target_recovery.sha1))
if updating_boot:
script.append("format BOOT:")
output_zip.writestr("boot.img", target_boot)
d = Difference(target_boot, source_boot, "imgdiff")
print "boot target: %d source: %d diff: %d" % (
target_boot.size, source_boot.size, len(d))
common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
script.PatchCheck("MTD:boot:%d:%s:%d:%s" %
(source_boot.size, source_boot.sha1,
target_boot.size, target_boot.sha1))
if patch_list or updating_recovery or updating_boot:
script.CacheFreeSpaceCheck(largest_source_size)
script.Print("Unpacking patches...")
script.UnpackPackageDir("patch", "/tmp/patchtmp")
script.Comment("---- start making changes here ----")
if OPTIONS.wipe_user_data:
script.Print("Erasing user data...")
script.FormatPartition("userdata")
script.Print("Removing unneeded files...")
script.DeleteFiles(["/"+i[0] for i in verbatim_targets] +
["/"+i for i in sorted(source_data)
if i not in target_data])
if updating_boot:
# Produce the boot image by applying a patch to the current
# contents of the boot partition, and write it back to the
# partition.
script.Print("Patching boot image...")
script.ApplyPatch("MTD:boot:%d:%s:%d:%s"
% (source_boot.size, source_boot.sha1,
target_boot.size, target_boot.sha1),
"-",
target_boot.size, target_boot.sha1,
source_boot.sha1, "/tmp/patchtmp/boot.img.p")
print "boot image changed; including."
else:
print "boot image unchanged; skipping."
if updating_recovery:
output_zip.writestr("system/recovery.img", target_recovery)
# Produce /system/recovery.img by applying a patch to the current
# contents of the recovery partition.
script.Print("Patching recovery image...")
script.ApplyPatch("MTD:recovery:%d:%s:%d:%s"
% (source_recovery.size, source_recovery.sha1,
target_recovery.size, target_recovery.sha1),
"/system/recovery.img",
target_recovery.size, target_recovery.sha1,
source_recovery.sha1, "/tmp/patchtmp/recovery.img.p")
print "recovery image changed; including."
else:
print "recovery image unchanged; skipping."
if updating_radio:
script.append("show_progress 0.3 10")
script.append("write_radio_image PACKAGE:radio.img")
output_zip.writestr("radio.img", target_radio)
script.ShowProgress(0.3, 10)
script.Print("Writing radio image...")
script.WriteFirmwareImage("radio", "radio.img")
common.ZipWriteStr(output_zip, "radio.img", target_radio)
print "radio image changed; including."
else:
print "radio image unchanged; skipping."
script.Print("Patching system files...")
pb_apply = progress_bar_total * 0.7 * \
(total_patched_size /
float(total_patched_size+total_verbatim_size))
float(total_patched_size+total_verbatim_size+1))
for i, (fn, tf, sf, size) in enumerate(patch_list):
if i % 5 == 0:
next_sizes = sum([i[3] for i in patch_list[i:i+5]])
script.append("show_progress %f 1" %
(next_sizes * pb_apply / total_patched_size,))
script.append(("run_program PACKAGE:applypatch "
"/%s %s %d %s:/tmp/patchtmp/%s.p") %
(fn, tf.sha1, tf.size, sf.sha1, fn))
script.ShowProgress(next_sizes * pb_apply / (total_patched_size+1), 1)
script.ApplyPatch("/"+fn, "-", tf.size, tf.sha1,
sf.sha1, "/tmp/patchtmp/"+fn+".p")
target_symlinks = CopySystemFiles(target_zip, None)
target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks])
temp_script = []
temp_script = script.MakeTemporary()
FixPermissions(temp_script)
# Note that this call will mess up the tree of Items, so make sure
@@ -564,14 +651,17 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
for dest, link in source_symlinks:
if link not in target_symlinks_d:
to_delete.append(link)
DeleteFiles(script, to_delete)
script.DeleteFiles(to_delete)
if verbatim_targets:
pb_verbatim = progress_bar_total * \
(total_verbatim_size /
float(total_patched_size+total_verbatim_size))
script.append("show_progress %f 5" % (pb_verbatim,))
script.append("copy_dir PACKAGE:system SYSTEM:")
float(total_patched_size+total_verbatim_size+1))
script.ShowProgress(pb_verbatim, 5)
script.Print("Unpacking new files...")
script.UnpackPackageDir("system", "/system")
script.Print("Finishing up...")
# Create all the symlinks that don't already exist, or point to
# somewhere different than what we want. Delete each symlink before
@@ -583,18 +673,17 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
to_create.append((dest, link))
else:
to_create.append((dest, link))
DeleteFiles(script, [i[1] for i in to_create])
script.extend(["symlink %s %s" % s for s in to_create])
script.DeleteFiles([i[1] for i in to_create])
script.MakeSymlinks(to_create)
# Now that the symlinks are created, we can set all the
# permissions.
script.extend(temp_script)
script.AppendScript(temp_script)
if updating_boot:
script.append("show_progress 0.1 5")
script.append("write_raw_image PACKAGE:boot.img BOOT:")
if OPTIONS.extra_script is not None:
scirpt.AppendExtra(OPTIONS.extra_script)
AddScript(script, output_zip)
script.AddToZip(target_zip, output_zip)
def main(argv):
@@ -602,21 +691,31 @@ def main(argv):
def option_handler(o, a):
if o in ("-b", "--board_config"):
common.LoadBoardConfig(a)
return True
elif o in ("-k", "--package_key"):
OPTIONS.package_key = a
return True
elif o in ("-i", "--incremental_from"):
OPTIONS.incremental_source = a
return True
elif o in ("-w", "--wipe_user_data"):
OPTIONS.wipe_user_data = True
elif o in ("-n", "--no_prereq"):
OPTIONS.omit_prereq = True
elif o in ("-e", "--extra_script"):
OPTIONS.extra_script = a
elif o in ("-m", "--script_mode"):
OPTIONS.script_mode = a
else:
return False
return True
args = common.ParseOptions(argv, __doc__,
extra_opts="b:k:i:d:",
extra_opts="b:k:i:d:wne:m:",
extra_long_opts=["board_config=",
"package_key=",
"incremental_from="],
"incremental_from=",
"wipe_user_data",
"no_prereq",
"extra_script=",
"script_mode="],
extra_option_handler=option_handler)
if len(args) != 2:
@@ -630,6 +729,12 @@ def main(argv):
print " images don't exceed partition sizes."
print
if OPTIONS.script_mode not in ("amend", "edify", "auto"):
raise ValueError('unknown script mode "%s"' % (OPTIONS.script_mode,))
if OPTIONS.extra_script is not None:
OPTIONS.extra_script = open(OPTIONS.extra_script).read()
print "unzipping target target-files..."
OPTIONS.input_tmp = common.UnzipTemp(args[0])
OPTIONS.target_tmp = OPTIONS.input_tmp

View File

@@ -47,6 +47,20 @@ Usage: sign_target_files_apks [flags] input_target_files output_target_files
-d and -k options are added to the set of mappings in the order
in which they appear on the command line.
-o (--replace_ota_keys)
Replace the certificate (public key) used by OTA package
verification with the one specified in the input target_files
zip (in the META/otakeys.txt file). Key remapping (-k and -d)
is performed on this key.
-t (--tag_changes) <+tag>,<-tag>,...
Comma-separated list of changes to make to the set of tags (in
the last component of the build fingerprint). Prefix each with
'+' or '-' to indicate whether that tag should be added or
removed. Changes are processed in the order they appear.
Default value is "-test-keys,+ota-rel-keys,+release-keys".
"""
import sys
@@ -55,6 +69,8 @@ if sys.hexversion < 0x02040000:
print >> sys.stderr, "Python 2.4 or newer is required."
sys.exit(1)
import cStringIO
import copy
import os
import re
import subprocess
@@ -67,7 +83,8 @@ OPTIONS = common.OPTIONS
OPTIONS.extra_apks = {}
OPTIONS.key_map = {}
OPTIONS.replace_ota_keys = False
OPTIONS.tag_changes = ("-test-keys", "+ota-rel-keys", "+release-keys")
def GetApkCerts(tf_zip):
certmap = {}
@@ -84,6 +101,85 @@ def GetApkCerts(tf_zip):
return certmap
def CheckAllApksSigned(input_tf_zip, apk_key_map):
"""Check that all the APKs we want to sign have keys specified, and
error out if they don't."""
unknown_apks = []
for info in input_tf_zip.infolist():
if info.filename.endswith(".apk"):
name = os.path.basename(info.filename)
if name not in apk_key_map:
unknown_apks.append(name)
if unknown_apks:
print "ERROR: no key specified for:\n\n ",
print "\n ".join(unknown_apks)
print "\nUse '-e <apkname>=' to specify a key (which may be an"
print "empty string to not sign this apk)."
sys.exit(1)
def SharedUserForApk(data):
tmp = tempfile.NamedTemporaryFile()
tmp.write(data)
tmp.flush()
p = common.Run(["aapt", "dump", "xmltree", tmp.name, "AndroidManifest.xml"],
stdout=subprocess.PIPE)
data, _ = p.communicate()
if p.returncode != 0:
raise ExternalError("failed to run aapt dump")
lines = data.split("\n")
for i in lines:
m = re.match(r'^\s*A: android:sharedUserId\([0-9a-fx]*\)="([^"]*)" .*$', i)
if m:
return m.group(1)
return None
def CheckSharedUserIdsConsistent(input_tf_zip, apk_key_map):
"""Check that all packages that request the same shared user id are
going to be signed with the same key."""
shared_user_apks = {}
maxlen = len("(unknown key)")
for info in input_tf_zip.infolist():
if info.filename.endswith(".apk"):
data = input_tf_zip.read(info.filename)
name = os.path.basename(info.filename)
shared_user = SharedUserForApk(data)
key = apk_key_map[name]
maxlen = max(maxlen, len(key))
if shared_user is not None:
shared_user_apks.setdefault(
shared_user, {}).setdefault(key, []).append(name)
errors = []
for k, v in shared_user_apks.iteritems():
# each shared user should have exactly one key used for all the
# apks that want that user.
if len(v) > 1:
errors.append((k, v))
if not errors: return
print "ERROR: shared user inconsistency. All apks wanting to use"
print " a given shared user must be signed with the same key."
print
errors.sort()
for user, keys in errors:
print 'shared user id "%s":' % (user,)
for key, apps in keys.iteritems():
print ' %-*s %s' % (maxlen, key or "(unknown key)", apps[0])
for a in apps[1:]:
print (' ' * (maxlen+5)) + a
print
sys.exit(1)
def SignApk(data, keyname, pw):
unsigned = tempfile.NamedTemporaryFile()
unsigned.write(data)
@@ -100,44 +196,107 @@ def SignApk(data, keyname, pw):
return data
def SignApks(input_tf_zip, output_tf_zip):
apk_key_map = GetApkCerts(input_tf_zip)
key_passwords = common.GetKeyPasswords(set(apk_key_map.values()))
def SignApks(input_tf_zip, output_tf_zip, apk_key_map, key_passwords):
maxsize = max([len(os.path.basename(i.filename))
for i in input_tf_zip.infolist()
if i.filename.endswith('.apk')])
for info in input_tf_zip.infolist():
data = input_tf_zip.read(info.filename)
out_info = copy.copy(info)
if info.filename.endswith(".apk"):
name = os.path.basename(info.filename)
key = apk_key_map.get(name, None)
if key is not None:
print "signing: %-*s (%s)" % (maxsize, name, key)
key = apk_key_map[name]
if key:
print " signing: %-*s (%s)" % (maxsize, name, key)
signed_data = SignApk(data, key, key_passwords[key])
output_tf_zip.writestr(info, signed_data)
output_tf_zip.writestr(out_info, signed_data)
else:
# an APK we're not supposed to sign.
print "skipping: %s" % (name,)
output_tf_zip.writestr(info, data)
elif info.filename == "SYSTEM/build.prop":
# Change build fingerprint to reflect the fact that apps are signed.
m = re.search(r"ro\.build\.fingerprint=.*\b(test-keys)\b.*", data)
if not m:
print 'WARNING: ro.build.fingerprint does not contain "test-keys"'
else:
data = data[:m.start(1)] + "release-keys" + data[m.end(1):]
m = re.search(r"ro\.build\.description=.*\b(test-keys)\b.*", data)
if not m:
print 'WARNING: ro.build.description does not contain "test-keys"'
else:
data = data[:m.start(1)] + "release-keys" + data[m.end(1):]
output_tf_zip.writestr(info, data)
print "NOT signing: %s" % (name,)
output_tf_zip.writestr(out_info, data)
elif info.filename in ("SYSTEM/build.prop",
"RECOVERY/RAMDISK/default.prop"):
print "rewriting %s:" % (info.filename,)
new_data = RewriteProps(data)
output_tf_zip.writestr(out_info, new_data)
else:
# a non-APK file; copy it verbatim
output_tf_zip.writestr(info, data)
output_tf_zip.writestr(out_info, data)
def RewriteProps(data):
output = []
for line in data.split("\n"):
line = line.strip()
original_line = line
if line and line[0] != '#':
key, value = line.split("=", 1)
if key == "ro.build.fingerprint":
pieces = line.split("/")
tags = set(pieces[-1].split(","))
for ch in OPTIONS.tag_changes:
if ch[0] == "-":
tags.discard(ch[1:])
elif ch[0] == "+":
tags.add(ch[1:])
line = "/".join(pieces[:-1] + [",".join(sorted(tags))])
elif key == "ro.build.description":
pieces = line.split(" ")
assert len(pieces) == 5
tags = set(pieces[-1].split(","))
for ch in OPTIONS.tag_changes:
if ch[0] == "-":
tags.discard(ch[1:])
elif ch[0] == "+":
tags.add(ch[1:])
line = " ".join(pieces[:-1] + [",".join(sorted(tags))])
if line != original_line:
print " replace: ", original_line
print " with: ", line
output.append(line)
return "\n".join(output) + "\n"
def ReplaceOtaKeys(input_tf_zip, output_tf_zip):
try:
keylist = input_tf_zip.read("META/otakeys.txt").split()
except KeyError:
raise ExternalError("can't read META/otakeys.txt from input")
mapped_keys = []
for k in keylist:
m = re.match(r"^(.*)\.x509\.pem$", k)
if not m:
raise ExternalError("can't parse \"%s\" from META/otakeys.txt" % (k,))
k = m.group(1)
mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem")
print "using:\n ", "\n ".join(mapped_keys)
print "for OTA package verification"
# recovery uses a version of the key that has been slightly
# predigested (by DumpPublicKey.java) and put in res/keys.
p = common.Run(["java", "-jar",
os.path.join(OPTIONS.search_path, "framework", "dumpkey.jar")]
+ mapped_keys,
stdout=subprocess.PIPE)
data, _ = p.communicate()
if p.returncode != 0:
raise ExternalError("failed to run dumpkeys")
common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys", data)
# SystemUpdateActivity uses the x509.pem version of the keys, but
# put into a zipfile system/etc/security/otacerts.zip.
tempfile = cStringIO.StringIO()
certs_zip = zipfile.ZipFile(tempfile, "w")
for k in mapped_keys:
certs_zip.write(k)
certs_zip.close()
common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip",
tempfile.getvalue())
def main(argv):
@@ -160,16 +319,28 @@ def main(argv):
elif o in ("-k", "--key_mapping"):
s, d = a.split("=")
OPTIONS.key_map[s] = d
elif o in ("-o", "--replace_ota_keys"):
OPTIONS.replace_ota_keys = True
elif o in ("-t", "--tag_changes"):
new = []
for i in a.split(","):
i = i.strip()
if not i or i[0] not in "-+":
raise ValueError("Bad tag change '%s'" % (i,))
new.append(i[0] + i[1:].strip())
OPTIONS.tag_changes = tuple(new)
else:
return False
return True
args = common.ParseOptions(argv, __doc__,
extra_opts="s:e:d:k:",
extra_opts="s:e:d:k:ot:",
extra_long_opts=["signapk_jar=",
"extra_apks=",
"default_key_mappings=",
"key_mapping="],
"key_mapping=",
"replace_ota_keys",
"tag_changes="],
extra_option_handler=option_handler)
if len(args) != 2:
@@ -179,7 +350,15 @@ def main(argv):
input_zip = zipfile.ZipFile(args[0], "r")
output_zip = zipfile.ZipFile(args[1], "w")
SignApks(input_zip, output_zip)
apk_key_map = GetApkCerts(input_zip)
CheckAllApksSigned(input_zip, apk_key_map)
CheckSharedUserIdsConsistent(input_zip, apk_key_map)
key_passwords = common.GetKeyPasswords(set(apk_key_map.values()))
SignApks(input_zip, output_zip, apk_key_map, key_passwords)
if OPTIONS.replace_ota_keys:
ReplaceOtaKeys(input_zip, output_zip)
input_zip.close()
output_zip.close()

View File

@@ -62,6 +62,7 @@ import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
import java.util.regex.Pattern;
import javax.crypto.Cipher;
import javax.crypto.EncryptedPrivateKeyInfo;
import javax.crypto.SecretKeyFactory;
@@ -75,6 +76,10 @@ class SignApk {
private static final String CERT_SF_NAME = "META-INF/CERT.SF";
private static final String CERT_RSA_NAME = "META-INF/CERT.RSA";
// Files matching this pattern are not copied to the output.
private static Pattern stripPattern =
Pattern.compile("^META-INF/(.*)[.](SF|RSA|DSA)$");
private static X509Certificate readPublicKey(File file)
throws IOException, GeneralSecurityException {
FileInputStream input = new FileInputStream(file);
@@ -193,7 +198,9 @@ class SignApk {
for (JarEntry entry: byName.values()) {
String name = entry.getName();
if (!entry.isDirectory() && !name.equals(JarFile.MANIFEST_NAME) &&
!name.equals(CERT_SF_NAME) && !name.equals(CERT_RSA_NAME)) {
!name.equals(CERT_SF_NAME) && !name.equals(CERT_RSA_NAME) &&
(stripPattern == null ||
!stripPattern.matcher(name).matches())) {
InputStream data = jar.getInputStream(entry);
while ((num = data.read(buffer)) > 0) {
md.update(buffer, 0, num);

View File

@@ -30,7 +30,8 @@ void usage(void)
{
fprintf(stderr, "Zip alignment utility\n");
fprintf(stderr,
"Usage: zipalign [-f] [-v] <align> infile.zip outfile.zip\n");
"Usage: zipalign [-f] [-v] <align> infile.zip outfile.zip\n"
" zipalign -c [-v] <align> infile.zip\n" );
}
/*
@@ -152,14 +153,14 @@ static int verify(const char* fileName, int alignment, bool verbose)
pEntry = zipFile.getEntryByIndex(i);
if (pEntry->isCompressed()) {
if (verbose) {
printf("%8ld %s (OK - compressed)\n",
printf("%8ld %s (OK - compressed)\n",
(long) pEntry->getFileOffset(), pEntry->getFileName());
}
} else {
long offset = pEntry->getFileOffset();
if ((offset % alignment) != 0) {
if (verbose) {
printf("%8ld %s (BAD - %ld)\n",
printf("%8ld %s (BAD - %ld)\n",
(long) offset, pEntry->getFileName(),
offset % alignment);
}
@@ -185,6 +186,7 @@ static int verify(const char* fileName, int alignment, bool verbose)
int main(int argc, char* const argv[])
{
bool wantUsage = false;
bool check = false;
bool force = false;
bool verbose = false;
int result = 1;
@@ -204,6 +206,9 @@ int main(int argc, char* const argv[])
while (*cp != '\0') {
switch (*cp) {
case 'c':
check = true;
break;
case 'f':
force = true;
break;
@@ -223,7 +228,7 @@ int main(int argc, char* const argv[])
argv++;
}
if (argc != 3) {
if (!((check && argc == 2) || (!check && argc == 3))) {
wantUsage = true;
goto bail;
}
@@ -235,12 +240,17 @@ int main(int argc, char* const argv[])
goto bail;
}
/* create the new archive */
result = process(argv[1], argv[2], alignment, force);
if (check) {
/* check existing archive for correct alignment */
result = verify(argv[1], alignment, verbose);
} else {
/* create the new archive */
result = process(argv[1], argv[2], alignment, force);
/* trust, but verify */
if (result == 0)
result = verify(argv[2], alignment, verbose);
/* trust, but verify */
if (result == 0)
result = verify(argv[2], alignment, verbose);
}
bail:
if (wantUsage) {
@@ -250,4 +260,3 @@ bail:
return result;
}