Home | History | Annotate | Download | only in releasetools
      1 # Copyright (C) 2014 The Android Open Source Project
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #      http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 
     15 import bisect
     16 import os
     17 import struct
     18 from hashlib import sha1
     19 
     20 import rangelib
     21 
     22 
     23 class SparseImage(object):
     24   """Wraps a sparse image file into an image object.
     25 
     26   Wraps a sparse image file (and optional file map and clobbered_blocks) into
     27   an image object suitable for passing to BlockImageDiff. file_map contains
     28   the mapping between files and their blocks. clobbered_blocks contains the set
     29   of blocks that should be always written to the target regardless of the old
     30   contents (i.e. copying instead of patching). clobbered_blocks should be in
     31   the form of a string like "0" or "0 1-5 8".
     32   """
     33 
     34   def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
     35                mode="rb", build_map=True):
     36     self.simg_f = f = open(simg_fn, mode)
     37 
     38     header_bin = f.read(28)
     39     header = struct.unpack("<I4H4I", header_bin)
     40 
     41     magic = header[0]
     42     major_version = header[1]
     43     minor_version = header[2]
     44     file_hdr_sz = header[3]
     45     chunk_hdr_sz = header[4]
     46     self.blocksize = blk_sz = header[5]
     47     self.total_blocks = total_blks = header[6]
     48     self.total_chunks = total_chunks = header[7]
     49 
     50     if magic != 0xED26FF3A:
     51       raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
     52     if major_version != 1 or minor_version != 0:
     53       raise ValueError("I know about version 1.0, but this is version %u.%u" %
     54                        (major_version, minor_version))
     55     if file_hdr_sz != 28:
     56       raise ValueError("File header size was expected to be 28, but is %u." %
     57                        (file_hdr_sz,))
     58     if chunk_hdr_sz != 12:
     59       raise ValueError("Chunk header size was expected to be 12, but is %u." %
     60                        (chunk_hdr_sz,))
     61 
     62     print("Total of %u %u-byte output blocks in %u input chunks."
     63           % (total_blks, blk_sz, total_chunks))
     64 
     65     if not build_map:
     66       return
     67 
     68     pos = 0   # in blocks
     69     care_data = []
     70     self.offset_map = offset_map = []
     71     self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
     72 
     73     for i in range(total_chunks):
     74       header_bin = f.read(12)
     75       header = struct.unpack("<2H2I", header_bin)
     76       chunk_type = header[0]
     77       chunk_sz = header[2]
     78       total_sz = header[3]
     79       data_sz = total_sz - 12
     80 
     81       if chunk_type == 0xCAC1:
     82         if data_sz != (chunk_sz * blk_sz):
     83           raise ValueError(
     84               "Raw chunk input size (%u) does not match output size (%u)" %
     85               (data_sz, chunk_sz * blk_sz))
     86         else:
     87           care_data.append(pos)
     88           care_data.append(pos + chunk_sz)
     89           offset_map.append((pos, chunk_sz, f.tell(), None))
     90           pos += chunk_sz
     91           f.seek(data_sz, os.SEEK_CUR)
     92 
     93       elif chunk_type == 0xCAC2:
     94         fill_data = f.read(4)
     95         care_data.append(pos)
     96         care_data.append(pos + chunk_sz)
     97         offset_map.append((pos, chunk_sz, None, fill_data))
     98         pos += chunk_sz
     99 
    100       elif chunk_type == 0xCAC3:
    101         if data_sz != 0:
    102           raise ValueError("Don't care chunk input size is non-zero (%u)" %
    103                            (data_sz))
    104         else:
    105           pos += chunk_sz
    106 
    107       elif chunk_type == 0xCAC4:
    108         raise ValueError("CRC32 chunks are not supported")
    109 
    110       else:
    111         raise ValueError("Unknown chunk type 0x%04X not supported" %
    112                          (chunk_type,))
    113 
    114     self.care_map = rangelib.RangeSet(care_data)
    115     self.offset_index = [i[0] for i in offset_map]
    116 
    117     # Bug: 20881595
    118     # Introduce extended blocks as a workaround for the bug. dm-verity may
    119     # touch blocks that are not in the care_map due to block device
    120     # read-ahead. It will fail if such blocks contain non-zeroes. We zero out
    121     # the extended blocks explicitly to avoid dm-verity failures. 512 blocks
    122     # are the maximum read-ahead we configure for dm-verity block devices.
    123     extended = self.care_map.extend(512)
    124     all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
    125     extended = extended.intersect(all_blocks).subtract(self.care_map)
    126     self.extended = extended
    127 
    128     if file_map_fn:
    129       self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks)
    130     else:
    131       self.file_map = {"__DATA": self.care_map}
    132 
    133   def AppendFillChunk(self, data, blocks):
    134     f = self.simg_f
    135 
    136     # Append a fill chunk
    137     f.seek(0, os.SEEK_END)
    138     f.write(struct.pack("<2H3I", 0xCAC2, 0, blocks, 16, data))
    139 
    140     # Update the sparse header
    141     self.total_blocks += blocks
    142     self.total_chunks += 1
    143 
    144     f.seek(16, os.SEEK_SET)
    145     f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
    146 
    147   def RangeSha1(self, ranges):
    148     h = sha1()
    149     for data in self._GetRangeData(ranges):
    150       h.update(data)
    151     return h.hexdigest()
    152 
    153   def ReadRangeSet(self, ranges):
    154     return [d for d in self._GetRangeData(ranges)]
    155 
    156   def TotalSha1(self, include_clobbered_blocks=False):
    157     """Return the SHA-1 hash of all data in the 'care' regions.
    158 
    159     If include_clobbered_blocks is True, it returns the hash including the
    160     clobbered_blocks."""
    161     ranges = self.care_map
    162     if not include_clobbered_blocks:
    163       ranges = ranges.subtract(self.clobbered_blocks)
    164     return self.RangeSha1(ranges)
    165 
    166   def WriteRangeDataToFd(self, ranges, fd):
    167     for data in self._GetRangeData(ranges):
    168       fd.write(data)
    169 
    170   def _GetRangeData(self, ranges):
    171     """Generator that produces all the image data in 'ranges'.  The
    172     number of individual pieces returned is arbitrary (and in
    173     particular is not necessarily equal to the number of ranges in
    174     'ranges'.
    175 
    176     This generator is stateful -- it depends on the open file object
    177     contained in this SparseImage, so you should not try to run two
    178     instances of this generator on the same object simultaneously."""
    179 
    180     f = self.simg_f
    181     for s, e in ranges:
    182       to_read = e-s
    183       idx = bisect.bisect_right(self.offset_index, s) - 1
    184       chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
    185 
    186       # for the first chunk we may be starting partway through it.
    187       remain = chunk_len - (s - chunk_start)
    188       this_read = min(remain, to_read)
    189       if filepos is not None:
    190         p = filepos + ((s - chunk_start) * self.blocksize)
    191         f.seek(p, os.SEEK_SET)
    192         yield f.read(this_read * self.blocksize)
    193       else:
    194         yield fill_data * (this_read * (self.blocksize >> 2))
    195       to_read -= this_read
    196 
    197       while to_read > 0:
    198         # continue with following chunks if this range spans multiple chunks.
    199         idx += 1
    200         chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
    201         this_read = min(chunk_len, to_read)
    202         if filepos is not None:
    203           f.seek(filepos, os.SEEK_SET)
    204           yield f.read(this_read * self.blocksize)
    205         else:
    206           yield fill_data * (this_read * (self.blocksize >> 2))
    207         to_read -= this_read
    208 
    209   def LoadFileBlockMap(self, fn, clobbered_blocks):
    210     remaining = self.care_map
    211     self.file_map = out = {}
    212 
    213     with open(fn) as f:
    214       for line in f:
    215         fn, ranges = line.split(None, 1)
    216         ranges = rangelib.RangeSet.parse(ranges)
    217         out[fn] = ranges
    218         assert ranges.size() == ranges.intersect(remaining).size()
    219 
    220         # Currently we assume that blocks in clobbered_blocks are not part of
    221         # any file.
    222         assert not clobbered_blocks.overlaps(ranges)
    223         remaining = remaining.subtract(ranges)
    224 
    225     remaining = remaining.subtract(clobbered_blocks)
    226 
    227     # For all the remaining blocks in the care_map (ie, those that
    228     # aren't part of the data for any file nor part of the clobbered_blocks),
    229     # divide them into blocks that are all zero and blocks that aren't.
    230     # (Zero blocks are handled specially because (1) there are usually
    231     # a lot of them and (2) bsdiff handles files with long sequences of
    232     # repeated bytes especially poorly.)
    233 
    234     zero_blocks = []
    235     nonzero_blocks = []
    236     reference = '\0' * self.blocksize
    237 
    238     # Workaround for bug 23227672. For squashfs, we don't have a system.map. So
    239     # the whole system image will be treated as a single file. But for some
    240     # unknown bug, the updater will be killed due to OOM when writing back the
    241     # patched image to flash (observed on lenok-userdebug MEA49). Prior to
    242     # getting a real fix, we evenly divide the non-zero blocks into smaller
    243     # groups (currently 1024 blocks or 4MB per group).
    244     # Bug: 23227672
    245     MAX_BLOCKS_PER_GROUP = 1024
    246     nonzero_groups = []
    247 
    248     f = self.simg_f
    249     for s, e in remaining:
    250       for b in range(s, e):
    251         idx = bisect.bisect_right(self.offset_index, b) - 1
    252         chunk_start, _, filepos, fill_data = self.offset_map[idx]
    253         if filepos is not None:
    254           filepos += (b-chunk_start) * self.blocksize
    255           f.seek(filepos, os.SEEK_SET)
    256           data = f.read(self.blocksize)
    257         else:
    258           if fill_data == reference[:4]:   # fill with all zeros
    259             data = reference
    260           else:
    261             data = None
    262 
    263         if data == reference:
    264           zero_blocks.append(b)
    265           zero_blocks.append(b+1)
    266         else:
    267           nonzero_blocks.append(b)
    268           nonzero_blocks.append(b+1)
    269 
    270           if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP:
    271             nonzero_groups.append(nonzero_blocks)
    272             # Clear the list.
    273             nonzero_blocks = []
    274 
    275     if nonzero_blocks:
    276       nonzero_groups.append(nonzero_blocks)
    277       nonzero_blocks = []
    278 
    279     assert zero_blocks or nonzero_groups or clobbered_blocks
    280 
    281     if zero_blocks:
    282       out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
    283     if nonzero_groups:
    284       for i, blocks in enumerate(nonzero_groups):
    285         out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
    286     if clobbered_blocks:
    287       out["__COPY"] = clobbered_blocks
    288 
    289   def ResetFileMap(self):
    290     """Throw away the file map and treat the entire image as
    291     undifferentiated data."""
    292     self.file_map = {"__DATA": self.care_map}
    293