Merge "blockimgdiff: selectively convert 'diff' commands to 'new' to reduce stash size" am: ffc87ac0a7

am: 4046b8f741

Change-Id: If6ad6cc16e5758f48ed79c3291e77c2321bd5a86
This commit is contained in:
Tianjie Xu
2019-01-17 09:22:55 -08:00
committed by android-build-merger

View File

@@ -483,7 +483,8 @@ class BlockImageDiff(object):
# choose the transfers for conversion. The number '1024' can be further # choose the transfers for conversion. The number '1024' can be further
# tweaked here to balance the package size and build time. # tweaked here to balance the package size and build time.
if max_stashed_blocks > stash_limit + 1024: if max_stashed_blocks > stash_limit + 1024:
self.SelectAndConvertDiffTransfersToNew() self.SelectAndConvertDiffTransfersToNew(
max_stashed_blocks - stash_limit)
# Regenerate the sequence as the graph has changed. # Regenerate the sequence as the graph has changed.
self.FindSequenceForTransfers() self.FindSequenceForTransfers()
@@ -830,7 +831,8 @@ class BlockImageDiff(object):
num_of_bytes = new_blocks * self.tgt.blocksize num_of_bytes = new_blocks * self.tgt.blocksize
logger.info( logger.info(
" Total %d blocks (%d bytes) are packed as new blocks due to " " Total %d blocks (%d bytes) are packed as new blocks due to "
"insufficient cache size.", new_blocks, num_of_bytes) "insufficient cache size. Maximum blocks stashed simultaneously: %d",
new_blocks, num_of_bytes, max_stashed_blocks)
return new_blocks, max_stashed_blocks return new_blocks, max_stashed_blocks
def ComputePatches(self, prefix): def ComputePatches(self, prefix):
@@ -1339,7 +1341,7 @@ class BlockImageDiff(object):
return patches return patches
def SelectAndConvertDiffTransfersToNew(self): def SelectAndConvertDiffTransfersToNew(self, violated_stash_blocks):
"""Converts the diff transfers to reduce the max simultaneous stash. """Converts the diff transfers to reduce the max simultaneous stash.
Since the 'new' data is compressed with deflate, we can select the 'diff' Since the 'new' data is compressed with deflate, we can select the 'diff'
@@ -1347,6 +1349,8 @@ class BlockImageDiff(object):
compressed data. Ideally, we want to convert the transfers with a small compressed data. Ideally, we want to convert the transfers with a small
size increase, but using a large number of stashed blocks. size increase, but using a large number of stashed blocks.
""" """
TransferSizeScore = namedtuple("TransferSizeScore",
"xf, used_stash_blocks, score")
logger.info("Selecting diff commands to convert to new.") logger.info("Selecting diff commands to convert to new.")
diff_queue = [] diff_queue = []
@@ -1360,7 +1364,7 @@ class BlockImageDiff(object):
# for the remaining. # for the remaining.
result = self.ComputePatchesForInputList(diff_queue, True) result = self.ComputePatchesForInputList(diff_queue, True)
removed_stashed_blocks = 0 conversion_candidates = []
for xf_index, patch_info, compressed_size in result: for xf_index, patch_info, compressed_size in result:
xf = self.transfers[xf_index] xf = self.transfers[xf_index]
if not xf.patch_info: if not xf.patch_info:
@@ -1368,21 +1372,43 @@ class BlockImageDiff(object):
size_ratio = len(xf.patch_info.content) * 100.0 / compressed_size size_ratio = len(xf.patch_info.content) * 100.0 / compressed_size
diff_style = "imgdiff" if xf.patch_info.imgdiff else "bsdiff" diff_style = "imgdiff" if xf.patch_info.imgdiff else "bsdiff"
logger.info("%s, target size: %d, style: %s, patch size: %d," logger.info("%s, target size: %d blocks, style: %s, patch size: %d,"
" compression_size: %d, ratio %.2f%%", xf.tgt_name, " compression_size: %d, ratio %.2f%%", xf.tgt_name,
xf.tgt_ranges.size(), diff_style, xf.tgt_ranges.size(), diff_style,
len(xf.patch_info.content), compressed_size, size_ratio) len(xf.patch_info.content), compressed_size, size_ratio)
used_stash_blocks = sum(sr.size() for _, sr in xf.use_stash)
# Convert the transfer to new if the compressed size is smaller or equal. # Convert the transfer to new if the compressed size is smaller or equal.
# We don't need to maintain the stash_before lists here because the # We don't need to maintain the stash_before lists here because the
# graph will be regenerated later. # graph will be regenerated later.
if len(xf.patch_info.content) >= compressed_size: if len(xf.patch_info.content) >= compressed_size:
removed_stashed_blocks += sum(sr.size() for _, sr in xf.use_stash) # Add the transfer to the candidate list with negative score. And it
# will be converted later.
conversion_candidates.append(TransferSizeScore(xf, used_stash_blocks,
-1))
elif used_stash_blocks > 0:
# This heuristic represents the size increase in the final package to
# remove per unit of stashed data.
score = ((compressed_size - len(xf.patch_info.content)) * 100.0
/ used_stash_blocks)
conversion_candidates.append(TransferSizeScore(xf, used_stash_blocks,
score))
# Transfers with lower score (i.e. less expensive to convert) will be
# converted first.
conversion_candidates.sort(key=lambda x: x.score)
# TODO(xunchang), improve the logic to find the transfers to convert, e.g.
# convert the ones that contribute to the max stash, run ReviseStashSize
# multiple times etc.
removed_stashed_blocks = 0
for xf, used_stash_blocks, _ in conversion_candidates:
logger.info("Converting %s to new", xf.tgt_name) logger.info("Converting %s to new", xf.tgt_name)
xf.ConvertToNew() xf.ConvertToNew()
removed_stashed_blocks += used_stash_blocks
# TODO(xunchang) convert more transfers by sorting: # Experiments show that we will get a smaller package size if we remove
# (compressed size - patch_size) / used_stashed_blocks # slightly more stashed blocks than the violated stash blocks.
if removed_stashed_blocks >= violated_stash_blocks:
break
logger.info("Removed %d stashed blocks", removed_stashed_blocks) logger.info("Removed %d stashed blocks", removed_stashed_blocks)