1 # Copyright (C) 2014 The Android Open Source Project 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import bisect 16 import os 17 import struct 18 from hashlib import sha1 19 20 import rangelib 21 22 23 class SparseImage(object): 24 """Wraps a sparse image file into an image object. 25 26 Wraps a sparse image file (and optional file map and clobbered_blocks) into 27 an image object suitable for passing to BlockImageDiff. file_map contains 28 the mapping between files and their blocks. clobbered_blocks contains the set 29 of blocks that should be always written to the target regardless of the old 30 contents (i.e. copying instead of patching). clobbered_blocks should be in 31 the form of a string like "0" or "0 1-5 8". 32 """ 33 34 def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None, 35 mode="rb", build_map=True): 36 self.simg_f = f = open(simg_fn, mode) 37 38 header_bin = f.read(28) 39 header = struct.unpack("<I4H4I", header_bin) 40 41 magic = header[0] 42 major_version = header[1] 43 minor_version = header[2] 44 file_hdr_sz = header[3] 45 chunk_hdr_sz = header[4] 46 self.blocksize = blk_sz = header[5] 47 self.total_blocks = total_blks = header[6] 48 self.total_chunks = total_chunks = header[7] 49 50 if magic != 0xED26FF3A: 51 raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,)) 52 if major_version != 1 or minor_version != 0: 53 raise ValueError("I know about version 1.0, but this is version %u.%u" % 54 (major_version, minor_version)) 55 if file_hdr_sz != 28: 56 raise ValueError("File header size was expected to be 28, but is %u." % 57 (file_hdr_sz,)) 58 if chunk_hdr_sz != 12: 59 raise ValueError("Chunk header size was expected to be 12, but is %u." % 60 (chunk_hdr_sz,)) 61 62 print("Total of %u %u-byte output blocks in %u input chunks." 63 % (total_blks, blk_sz, total_chunks)) 64 65 if not build_map: 66 return 67 68 pos = 0 # in blocks 69 care_data = [] 70 self.offset_map = offset_map = [] 71 self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks) 72 73 for i in range(total_chunks): 74 header_bin = f.read(12) 75 header = struct.unpack("<2H2I", header_bin) 76 chunk_type = header[0] 77 chunk_sz = header[2] 78 total_sz = header[3] 79 data_sz = total_sz - 12 80 81 if chunk_type == 0xCAC1: 82 if data_sz != (chunk_sz * blk_sz): 83 raise ValueError( 84 "Raw chunk input size (%u) does not match output size (%u)" % 85 (data_sz, chunk_sz * blk_sz)) 86 else: 87 care_data.append(pos) 88 care_data.append(pos + chunk_sz) 89 offset_map.append((pos, chunk_sz, f.tell(), None)) 90 pos += chunk_sz 91 f.seek(data_sz, os.SEEK_CUR) 92 93 elif chunk_type == 0xCAC2: 94 fill_data = f.read(4) 95 care_data.append(pos) 96 care_data.append(pos + chunk_sz) 97 offset_map.append((pos, chunk_sz, None, fill_data)) 98 pos += chunk_sz 99 100 elif chunk_type == 0xCAC3: 101 if data_sz != 0: 102 raise ValueError("Don't care chunk input size is non-zero (%u)" % 103 (data_sz)) 104 else: 105 pos += chunk_sz 106 107 elif chunk_type == 0xCAC4: 108 raise ValueError("CRC32 chunks are not supported") 109 110 else: 111 raise ValueError("Unknown chunk type 0x%04X not supported" % 112 (chunk_type,)) 113 114 self.care_map = rangelib.RangeSet(care_data) 115 self.offset_index = [i[0] for i in offset_map] 116 117 # Bug: 20881595 118 # Introduce extended blocks as a workaround for the bug. dm-verity may 119 # touch blocks that are not in the care_map due to block device 120 # read-ahead. It will fail if such blocks contain non-zeroes. We zero out 121 # the extended blocks explicitly to avoid dm-verity failures. 512 blocks 122 # are the maximum read-ahead we configure for dm-verity block devices. 123 extended = self.care_map.extend(512) 124 all_blocks = rangelib.RangeSet(data=(0, self.total_blocks)) 125 extended = extended.intersect(all_blocks).subtract(self.care_map) 126 self.extended = extended 127 128 if file_map_fn: 129 self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks) 130 else: 131 self.file_map = {"__DATA": self.care_map} 132 133 def AppendFillChunk(self, data, blocks): 134 f = self.simg_f 135 136 # Append a fill chunk 137 f.seek(0, os.SEEK_END) 138 f.write(struct.pack("<2H3I", 0xCAC2, 0, blocks, 16, data)) 139 140 # Update the sparse header 141 self.total_blocks += blocks 142 self.total_chunks += 1 143 144 f.seek(16, os.SEEK_SET) 145 f.write(struct.pack("<2I", self.total_blocks, self.total_chunks)) 146 147 def ReadRangeSet(self, ranges): 148 return [d for d in self._GetRangeData(ranges)] 149 150 def TotalSha1(self, include_clobbered_blocks=False): 151 """Return the SHA-1 hash of all data in the 'care' regions. 152 153 If include_clobbered_blocks is True, it returns the hash including the 154 clobbered_blocks.""" 155 ranges = self.care_map 156 if not include_clobbered_blocks: 157 ranges = ranges.subtract(self.clobbered_blocks) 158 h = sha1() 159 for d in self._GetRangeData(ranges): 160 h.update(d) 161 return h.hexdigest() 162 163 def _GetRangeData(self, ranges): 164 """Generator that produces all the image data in 'ranges'. The 165 number of individual pieces returned is arbitrary (and in 166 particular is not necessarily equal to the number of ranges in 167 'ranges'. 168 169 This generator is stateful -- it depends on the open file object 170 contained in this SparseImage, so you should not try to run two 171 instances of this generator on the same object simultaneously.""" 172 173 f = self.simg_f 174 for s, e in ranges: 175 to_read = e-s 176 idx = bisect.bisect_right(self.offset_index, s) - 1 177 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx] 178 179 # for the first chunk we may be starting partway through it. 180 remain = chunk_len - (s - chunk_start) 181 this_read = min(remain, to_read) 182 if filepos is not None: 183 p = filepos + ((s - chunk_start) * self.blocksize) 184 f.seek(p, os.SEEK_SET) 185 yield f.read(this_read * self.blocksize) 186 else: 187 yield fill_data * (this_read * (self.blocksize >> 2)) 188 to_read -= this_read 189 190 while to_read > 0: 191 # continue with following chunks if this range spans multiple chunks. 192 idx += 1 193 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx] 194 this_read = min(chunk_len, to_read) 195 if filepos is not None: 196 f.seek(filepos, os.SEEK_SET) 197 yield f.read(this_read * self.blocksize) 198 else: 199 yield fill_data * (this_read * (self.blocksize >> 2)) 200 to_read -= this_read 201 202 def LoadFileBlockMap(self, fn, clobbered_blocks): 203 remaining = self.care_map 204 self.file_map = out = {} 205 206 with open(fn) as f: 207 for line in f: 208 fn, ranges = line.split(None, 1) 209 ranges = rangelib.RangeSet.parse(ranges) 210 out[fn] = ranges 211 assert ranges.size() == ranges.intersect(remaining).size() 212 213 # Currently we assume that blocks in clobbered_blocks are not part of 214 # any file. 215 assert not clobbered_blocks.overlaps(ranges) 216 remaining = remaining.subtract(ranges) 217 218 remaining = remaining.subtract(clobbered_blocks) 219 220 # For all the remaining blocks in the care_map (ie, those that 221 # aren't part of the data for any file nor part of the clobbered_blocks), 222 # divide them into blocks that are all zero and blocks that aren't. 223 # (Zero blocks are handled specially because (1) there are usually 224 # a lot of them and (2) bsdiff handles files with long sequences of 225 # repeated bytes especially poorly.) 226 227 zero_blocks = [] 228 nonzero_blocks = [] 229 reference = '\0' * self.blocksize 230 231 # Workaround for bug 23227672. For squashfs, we don't have a system.map. So 232 # the whole system image will be treated as a single file. But for some 233 # unknown bug, the updater will be killed due to OOM when writing back the 234 # patched image to flash (observed on lenok-userdebug MEA49). Prior to 235 # getting a real fix, we evenly divide the non-zero blocks into smaller 236 # groups (currently 1024 blocks or 4MB per group). 237 # Bug: 23227672 238 MAX_BLOCKS_PER_GROUP = 1024 239 nonzero_groups = [] 240 241 f = self.simg_f 242 for s, e in remaining: 243 for b in range(s, e): 244 idx = bisect.bisect_right(self.offset_index, b) - 1 245 chunk_start, _, filepos, fill_data = self.offset_map[idx] 246 if filepos is not None: 247 filepos += (b-chunk_start) * self.blocksize 248 f.seek(filepos, os.SEEK_SET) 249 data = f.read(self.blocksize) 250 else: 251 if fill_data == reference[:4]: # fill with all zeros 252 data = reference 253 else: 254 data = None 255 256 if data == reference: 257 zero_blocks.append(b) 258 zero_blocks.append(b+1) 259 else: 260 nonzero_blocks.append(b) 261 nonzero_blocks.append(b+1) 262 263 if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP: 264 nonzero_groups.append(nonzero_blocks) 265 # Clear the list. 266 nonzero_blocks = [] 267 268 if nonzero_blocks: 269 nonzero_groups.append(nonzero_blocks) 270 nonzero_blocks = [] 271 272 assert zero_blocks or nonzero_groups or clobbered_blocks 273 274 if zero_blocks: 275 out["__ZERO"] = rangelib.RangeSet(data=zero_blocks) 276 if nonzero_groups: 277 for i, blocks in enumerate(nonzero_groups): 278 out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks) 279 if clobbered_blocks: 280 out["__COPY"] = clobbered_blocks 281 282 def ResetFileMap(self): 283 """Throw away the file map and treat the entire image as 284 undifferentiated data.""" 285 self.file_map = {"__DATA": self.care_map} 286