Home | History | Annotate | Download | only in releasetools
      1 #!/usr/bin/env python
      2 #
      3 # Copyright (C) 2014 The Android Open Source Project
      4 #
      5 # Licensed under the Apache License, Version 2.0 (the "License");
      6 # you may not use this file except in compliance with the License.
      7 # You may obtain a copy of the License at
      8 #
      9 #      http://www.apache.org/licenses/LICENSE-2.0
     10 #
     11 # Unless required by applicable law or agreed to in writing, software
     12 # distributed under the License is distributed on an "AS IS" BASIS,
     13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     14 # See the License for the specific language governing permissions and
     15 # limitations under the License.
     16 
     17 from __future__ import print_function
     18 
     19 import argparse
     20 import bisect
     21 import logging
     22 import os
     23 import struct
     24 import threading
     25 from hashlib import sha1
     26 
     27 import rangelib
     28 
     29 logger = logging.getLogger(__name__)
     30 
     31 
     32 class SparseImage(object):
     33   """Wraps a sparse image file into an image object.
     34 
     35   Wraps a sparse image file (and optional file map and clobbered_blocks) into
     36   an image object suitable for passing to BlockImageDiff. file_map contains
     37   the mapping between files and their blocks. clobbered_blocks contains the set
     38   of blocks that should be always written to the target regardless of the old
     39   contents (i.e. copying instead of patching). clobbered_blocks should be in
     40   the form of a string like "0" or "0 1-5 8".
     41   """
     42 
     43   def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
     44                mode="rb", build_map=True, allow_shared_blocks=False,
     45                hashtree_info_generator=None):
     46     self.simg_f = f = open(simg_fn, mode)
     47 
     48     header_bin = f.read(28)
     49     header = struct.unpack("<I4H4I", header_bin)
     50 
     51     magic = header[0]
     52     major_version = header[1]
     53     minor_version = header[2]
     54     file_hdr_sz = header[3]
     55     chunk_hdr_sz = header[4]
     56     self.blocksize = blk_sz = header[5]
     57     self.total_blocks = total_blks = header[6]
     58     self.total_chunks = total_chunks = header[7]
     59 
     60     if magic != 0xED26FF3A:
     61       raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
     62     if major_version != 1 or minor_version != 0:
     63       raise ValueError("I know about version 1.0, but this is version %u.%u" %
     64                        (major_version, minor_version))
     65     if file_hdr_sz != 28:
     66       raise ValueError("File header size was expected to be 28, but is %u." %
     67                        (file_hdr_sz,))
     68     if chunk_hdr_sz != 12:
     69       raise ValueError("Chunk header size was expected to be 12, but is %u." %
     70                        (chunk_hdr_sz,))
     71 
     72     logger.info(
     73         "Total of %u %u-byte output blocks in %u input chunks.", total_blks,
     74         blk_sz, total_chunks)
     75 
     76     if not build_map:
     77       assert not hashtree_info_generator, \
     78         "Cannot generate the hashtree info without building the offset map."
     79       return
     80 
     81     pos = 0   # in blocks
     82     care_data = []
     83     self.offset_map = offset_map = []
     84     self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
     85 
     86     for i in range(total_chunks):
     87       header_bin = f.read(12)
     88       header = struct.unpack("<2H2I", header_bin)
     89       chunk_type = header[0]
     90       chunk_sz = header[2]
     91       total_sz = header[3]
     92       data_sz = total_sz - 12
     93 
     94       if chunk_type == 0xCAC1:
     95         if data_sz != (chunk_sz * blk_sz):
     96           raise ValueError(
     97               "Raw chunk input size (%u) does not match output size (%u)" %
     98               (data_sz, chunk_sz * blk_sz))
     99         else:
    100           care_data.append(pos)
    101           care_data.append(pos + chunk_sz)
    102           offset_map.append((pos, chunk_sz, f.tell(), None))
    103           pos += chunk_sz
    104           f.seek(data_sz, os.SEEK_CUR)
    105 
    106       elif chunk_type == 0xCAC2:
    107         fill_data = f.read(4)
    108         care_data.append(pos)
    109         care_data.append(pos + chunk_sz)
    110         offset_map.append((pos, chunk_sz, None, fill_data))
    111         pos += chunk_sz
    112 
    113       elif chunk_type == 0xCAC3:
    114         if data_sz != 0:
    115           raise ValueError("Don't care chunk input size is non-zero (%u)" %
    116                            (data_sz))
    117         # Fills the don't care data ranges with zeros.
    118         # TODO(xunchang) pass the care_map to hashtree info generator.
    119         if hashtree_info_generator:
    120           fill_data = '\x00' * 4
    121           # In order to compute verity hashtree on device, we need to write
    122           # zeros explicitly to the don't care ranges. Because these ranges may
    123           # contain non-zero data from the previous build.
    124           care_data.append(pos)
    125           care_data.append(pos + chunk_sz)
    126           offset_map.append((pos, chunk_sz, None, fill_data))
    127 
    128         pos += chunk_sz
    129 
    130       elif chunk_type == 0xCAC4:
    131         raise ValueError("CRC32 chunks are not supported")
    132 
    133       else:
    134         raise ValueError("Unknown chunk type 0x%04X not supported" %
    135                          (chunk_type,))
    136 
    137     self.generator_lock = threading.Lock()
    138 
    139     self.care_map = rangelib.RangeSet(care_data)
    140     self.offset_index = [i[0] for i in offset_map]
    141 
    142     # Bug: 20881595
    143     # Introduce extended blocks as a workaround for the bug. dm-verity may
    144     # touch blocks that are not in the care_map due to block device
    145     # read-ahead. It will fail if such blocks contain non-zeroes. We zero out
    146     # the extended blocks explicitly to avoid dm-verity failures. 512 blocks
    147     # are the maximum read-ahead we configure for dm-verity block devices.
    148     extended = self.care_map.extend(512)
    149     all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
    150     extended = extended.intersect(all_blocks).subtract(self.care_map)
    151     self.extended = extended
    152 
    153     self.hashtree_info = None
    154     if hashtree_info_generator:
    155       self.hashtree_info = hashtree_info_generator.Generate(self)
    156 
    157     if file_map_fn:
    158       self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks,
    159                             allow_shared_blocks)
    160     else:
    161       self.file_map = {"__DATA": self.care_map}
    162 
    163   def AppendFillChunk(self, data, blocks):
    164     f = self.simg_f
    165 
    166     # Append a fill chunk
    167     f.seek(0, os.SEEK_END)
    168     f.write(struct.pack("<2H3I", 0xCAC2, 0, blocks, 16, data))
    169 
    170     # Update the sparse header
    171     self.total_blocks += blocks
    172     self.total_chunks += 1
    173 
    174     f.seek(16, os.SEEK_SET)
    175     f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
    176 
    177   def RangeSha1(self, ranges):
    178     h = sha1()
    179     for data in self._GetRangeData(ranges):
    180       h.update(data)
    181     return h.hexdigest()
    182 
    183   def ReadRangeSet(self, ranges):
    184     return [d for d in self._GetRangeData(ranges)]
    185 
    186   def TotalSha1(self, include_clobbered_blocks=False):
    187     """Return the SHA-1 hash of all data in the 'care' regions.
    188 
    189     If include_clobbered_blocks is True, it returns the hash including the
    190     clobbered_blocks."""
    191     ranges = self.care_map
    192     if not include_clobbered_blocks:
    193       ranges = ranges.subtract(self.clobbered_blocks)
    194     return self.RangeSha1(ranges)
    195 
    196   def WriteRangeDataToFd(self, ranges, fd):
    197     for data in self._GetRangeData(ranges):
    198       fd.write(data)
    199 
    200   def _GetRangeData(self, ranges):
    201     """Generator that produces all the image data in 'ranges'.  The
    202     number of individual pieces returned is arbitrary (and in
    203     particular is not necessarily equal to the number of ranges in
    204     'ranges'.
    205 
    206     Use a lock to protect the generator so that we will not run two
    207     instances of this generator on the same object simultaneously."""
    208 
    209     f = self.simg_f
    210     with self.generator_lock:
    211       for s, e in ranges:
    212         to_read = e-s
    213         idx = bisect.bisect_right(self.offset_index, s) - 1
    214         chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
    215 
    216         # for the first chunk we may be starting partway through it.
    217         remain = chunk_len - (s - chunk_start)
    218         this_read = min(remain, to_read)
    219         if filepos is not None:
    220           p = filepos + ((s - chunk_start) * self.blocksize)
    221           f.seek(p, os.SEEK_SET)
    222           yield f.read(this_read * self.blocksize)
    223         else:
    224           yield fill_data * (this_read * (self.blocksize >> 2))
    225         to_read -= this_read
    226 
    227         while to_read > 0:
    228           # continue with following chunks if this range spans multiple chunks.
    229           idx += 1
    230           chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
    231           this_read = min(chunk_len, to_read)
    232           if filepos is not None:
    233             f.seek(filepos, os.SEEK_SET)
    234             yield f.read(this_read * self.blocksize)
    235           else:
    236             yield fill_data * (this_read * (self.blocksize >> 2))
    237           to_read -= this_read
    238 
    239   def LoadFileBlockMap(self, fn, clobbered_blocks, allow_shared_blocks):
    240     """Loads the given block map file.
    241 
    242     Args:
    243       fn: The filename of the block map file.
    244       clobbered_blocks: A RangeSet instance for the clobbered blocks.
    245       allow_shared_blocks: Whether having shared blocks is allowed.
    246     """
    247     remaining = self.care_map
    248     self.file_map = out = {}
    249 
    250     with open(fn) as f:
    251       for line in f:
    252         fn, ranges = line.split(None, 1)
    253         ranges = rangelib.RangeSet.parse(ranges)
    254 
    255         if allow_shared_blocks:
    256           # Find the shared blocks that have been claimed by others. If so, tag
    257           # the entry so that we can skip applying imgdiff on this file.
    258           shared_blocks = ranges.subtract(remaining)
    259           if shared_blocks:
    260             non_shared = ranges.subtract(shared_blocks)
    261             if not non_shared:
    262               continue
    263 
    264             # There shouldn't anything in the extra dict yet.
    265             assert not ranges.extra, "Non-empty RangeSet.extra"
    266 
    267             # Put the non-shared RangeSet as the value in the block map, which
    268             # has a copy of the original RangeSet.
    269             non_shared.extra['uses_shared_blocks'] = ranges
    270             ranges = non_shared
    271 
    272         out[fn] = ranges
    273         assert ranges.size() == ranges.intersect(remaining).size()
    274 
    275         # Currently we assume that blocks in clobbered_blocks are not part of
    276         # any file.
    277         assert not clobbered_blocks.overlaps(ranges)
    278         remaining = remaining.subtract(ranges)
    279 
    280     remaining = remaining.subtract(clobbered_blocks)
    281     if self.hashtree_info:
    282       remaining = remaining.subtract(self.hashtree_info.hashtree_range)
    283 
    284     # For all the remaining blocks in the care_map (ie, those that
    285     # aren't part of the data for any file nor part of the clobbered_blocks),
    286     # divide them into blocks that are all zero and blocks that aren't.
    287     # (Zero blocks are handled specially because (1) there are usually
    288     # a lot of them and (2) bsdiff handles files with long sequences of
    289     # repeated bytes especially poorly.)
    290 
    291     zero_blocks = []
    292     nonzero_blocks = []
    293     reference = '\0' * self.blocksize
    294 
    295     # Workaround for bug 23227672. For squashfs, we don't have a system.map. So
    296     # the whole system image will be treated as a single file. But for some
    297     # unknown bug, the updater will be killed due to OOM when writing back the
    298     # patched image to flash (observed on lenok-userdebug MEA49). Prior to
    299     # getting a real fix, we evenly divide the non-zero blocks into smaller
    300     # groups (currently 1024 blocks or 4MB per group).
    301     # Bug: 23227672
    302     MAX_BLOCKS_PER_GROUP = 1024
    303     nonzero_groups = []
    304 
    305     f = self.simg_f
    306     for s, e in remaining:
    307       for b in range(s, e):
    308         idx = bisect.bisect_right(self.offset_index, b) - 1
    309         chunk_start, _, filepos, fill_data = self.offset_map[idx]
    310         if filepos is not None:
    311           filepos += (b-chunk_start) * self.blocksize
    312           f.seek(filepos, os.SEEK_SET)
    313           data = f.read(self.blocksize)
    314         else:
    315           if fill_data == reference[:4]:   # fill with all zeros
    316             data = reference
    317           else:
    318             data = None
    319 
    320         if data == reference:
    321           zero_blocks.append(b)
    322           zero_blocks.append(b+1)
    323         else:
    324           nonzero_blocks.append(b)
    325           nonzero_blocks.append(b+1)
    326 
    327           if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP:
    328             nonzero_groups.append(nonzero_blocks)
    329             # Clear the list.
    330             nonzero_blocks = []
    331 
    332     if nonzero_blocks:
    333       nonzero_groups.append(nonzero_blocks)
    334       nonzero_blocks = []
    335 
    336     assert zero_blocks or nonzero_groups or clobbered_blocks
    337 
    338     if zero_blocks:
    339       out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
    340     if nonzero_groups:
    341       for i, blocks in enumerate(nonzero_groups):
    342         out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
    343     if clobbered_blocks:
    344       out["__COPY"] = clobbered_blocks
    345     if self.hashtree_info:
    346       out["__HASHTREE"] = self.hashtree_info.hashtree_range
    347 
    348   def ResetFileMap(self):
    349     """Throw away the file map and treat the entire image as
    350     undifferentiated data."""
    351     self.file_map = {"__DATA": self.care_map}
    352 
    353 
    354 def GetImagePartitionSize(img):
    355   try:
    356     simg = SparseImage(img, build_map=False)
    357     return simg.blocksize * simg.total_blocks
    358   except ValueError:
    359     return os.path.getsize(img)
    360 
    361 
    362 if __name__ == '__main__':
    363   parser = argparse.ArgumentParser()
    364   parser.add_argument('image')
    365   parser.add_argument('--get_partition_size', action='store_true',
    366                       help='Return partition size of the image')
    367   args = parser.parse_args()
    368   if args.get_partition_size:
    369     print(GetImagePartitionSize(args.image))
    370