1 /* 2 * bmap.c --- logical to physical block mapping 3 * 4 * Copyright (C) 1997 Theodore Ts'o. 5 * 6 * %Begin-Header% 7 * This file may be redistributed under the terms of the GNU Library 8 * General Public License, version 2. 9 * %End-Header% 10 */ 11 12 #include <stdio.h> 13 #include <string.h> 14 #if HAVE_UNISTD_H 15 #include <unistd.h> 16 #endif 17 #include <errno.h> 18 19 #include "ext2_fs.h" 20 #include "ext2fsP.h" 21 22 #if defined(__GNUC__) && !defined(NO_INLINE_FUNCS) 23 #define _BMAP_INLINE_ __inline__ 24 #else 25 #define _BMAP_INLINE_ 26 #endif 27 28 extern errcode_t ext2fs_bmap(ext2_filsys fs, ext2_ino_t ino, 29 struct ext2_inode *inode, 30 char *block_buf, int bmap_flags, 31 blk_t block, blk_t *phys_blk); 32 33 #define inode_bmap(inode, nr) ((inode)->i_block[(nr)]) 34 35 static _BMAP_INLINE_ errcode_t block_ind_bmap(ext2_filsys fs, int flags, 36 blk_t ind, char *block_buf, 37 int *blocks_alloc, 38 blk_t nr, blk_t *ret_blk) 39 { 40 errcode_t retval; 41 blk_t b; 42 43 if (!ind) { 44 if (flags & BMAP_SET) 45 return EXT2_ET_SET_BMAP_NO_IND; 46 *ret_blk = 0; 47 return 0; 48 } 49 retval = io_channel_read_blk(fs->io, ind, 1, block_buf); 50 if (retval) 51 return retval; 52 53 if (flags & BMAP_SET) { 54 b = *ret_blk; 55 #ifdef WORDS_BIGENDIAN 56 b = ext2fs_swab32(b); 57 #endif 58 ((blk_t *) block_buf)[nr] = b; 59 return io_channel_write_blk(fs->io, ind, 1, block_buf); 60 } 61 62 b = ((blk_t *) block_buf)[nr]; 63 64 #ifdef WORDS_BIGENDIAN 65 b = ext2fs_swab32(b); 66 #endif 67 68 if (!b && (flags & BMAP_ALLOC)) { 69 b = nr ? ((blk_t *) block_buf)[nr-1] : 0; 70 retval = ext2fs_alloc_block(fs, b, 71 block_buf + fs->blocksize, &b); 72 if (retval) 73 return retval; 74 75 #ifdef WORDS_BIGENDIAN 76 ((blk_t *) block_buf)[nr] = ext2fs_swab32(b); 77 #else 78 ((blk_t *) block_buf)[nr] = b; 79 #endif 80 81 retval = io_channel_write_blk(fs->io, ind, 1, block_buf); 82 if (retval) 83 return retval; 84 85 (*blocks_alloc)++; 86 } 87 88 *ret_blk = b; 89 return 0; 90 } 91 92 static _BMAP_INLINE_ errcode_t block_dind_bmap(ext2_filsys fs, int flags, 93 blk_t dind, char *block_buf, 94 int *blocks_alloc, 95 blk_t nr, blk_t *ret_blk) 96 { 97 blk_t b = 0; 98 errcode_t retval; 99 blk_t addr_per_block; 100 101 addr_per_block = (blk_t) fs->blocksize >> 2; 102 103 retval = block_ind_bmap(fs, flags & ~BMAP_SET, dind, block_buf, 104 blocks_alloc, nr / addr_per_block, &b); 105 if (retval) 106 return retval; 107 retval = block_ind_bmap(fs, flags, b, block_buf, blocks_alloc, 108 nr % addr_per_block, ret_blk); 109 return retval; 110 } 111 112 static _BMAP_INLINE_ errcode_t block_tind_bmap(ext2_filsys fs, int flags, 113 blk_t tind, char *block_buf, 114 int *blocks_alloc, 115 blk_t nr, blk_t *ret_blk) 116 { 117 blk_t b = 0; 118 errcode_t retval; 119 blk_t addr_per_block; 120 121 addr_per_block = (blk_t) fs->blocksize >> 2; 122 123 retval = block_dind_bmap(fs, flags & ~BMAP_SET, tind, block_buf, 124 blocks_alloc, nr / addr_per_block, &b); 125 if (retval) 126 return retval; 127 retval = block_ind_bmap(fs, flags, b, block_buf, blocks_alloc, 128 nr % addr_per_block, ret_blk); 129 return retval; 130 } 131 132 static errcode_t extent_bmap(ext2_filsys fs, ext2_ino_t ino, 133 struct ext2_inode *inode, 134 ext2_extent_handle_t handle, 135 char *block_buf, int bmap_flags, blk64_t block, 136 int *ret_flags, int *blocks_alloc, 137 blk64_t *phys_blk); 138 139 static errcode_t implied_cluster_alloc(ext2_filsys fs, ext2_ino_t ino, 140 struct ext2_inode *inode, 141 ext2_extent_handle_t handle, 142 blk64_t lblk, blk64_t *phys_blk) 143 { 144 blk64_t base_block, pblock = 0; 145 int i; 146 147 if (!EXT2_HAS_RO_COMPAT_FEATURE(fs->super, 148 EXT4_FEATURE_RO_COMPAT_BIGALLOC)) 149 return 0; 150 151 base_block = lblk & ~EXT2FS_CLUSTER_MASK(fs); 152 /* 153 * Except for the logical block (lblk) that was passed in, search all 154 * blocks in this logical cluster for a mapping to a physical cluster. 155 * If any such map exists, calculate the physical block that maps to 156 * the logical block and return that. 157 * 158 * The old code wouldn't even look if (block % cluster_ratio) == 0; 159 * this is incorrect if we're allocating blocks in reverse order. 160 */ 161 for (i = 0; i < EXT2FS_CLUSTER_RATIO(fs); i++) { 162 if (base_block + i == lblk) 163 continue; 164 extent_bmap(fs, ino, inode, handle, 0, 0, 165 base_block + i, 0, 0, &pblock); 166 if (pblock) 167 break; 168 } 169 if (pblock == 0) 170 return 0; 171 *phys_blk = pblock - i + (lblk - base_block); 172 return 0; 173 } 174 175 /* Try to map a logical block to an already-allocated physical cluster. */ 176 errcode_t ext2fs_map_cluster_block(ext2_filsys fs, ext2_ino_t ino, 177 struct ext2_inode *inode, blk64_t lblk, 178 blk64_t *pblk) 179 { 180 ext2_extent_handle_t handle; 181 errcode_t retval; 182 183 /* Need bigalloc and extents to be enabled */ 184 *pblk = 0; 185 if (!EXT2_HAS_RO_COMPAT_FEATURE(fs->super, 186 EXT4_FEATURE_RO_COMPAT_BIGALLOC) || 187 !(inode->i_flags & EXT4_EXTENTS_FL)) 188 return 0; 189 190 retval = ext2fs_extent_open2(fs, ino, inode, &handle); 191 if (retval) 192 goto out; 193 194 retval = implied_cluster_alloc(fs, ino, inode, handle, lblk, pblk); 195 if (retval) 196 goto out2; 197 198 out2: 199 ext2fs_extent_free(handle); 200 out: 201 return retval; 202 } 203 204 static errcode_t extent_bmap(ext2_filsys fs, ext2_ino_t ino, 205 struct ext2_inode *inode, 206 ext2_extent_handle_t handle, 207 char *block_buf, int bmap_flags, blk64_t block, 208 int *ret_flags, int *blocks_alloc, 209 blk64_t *phys_blk) 210 { 211 struct ext2fs_extent extent; 212 unsigned int offset; 213 errcode_t retval = 0; 214 blk64_t blk64 = 0; 215 int alloc = 0; 216 217 if (bmap_flags & BMAP_SET) { 218 retval = ext2fs_extent_set_bmap(handle, block, 219 *phys_blk, 0); 220 return retval; 221 } 222 retval = ext2fs_extent_goto(handle, block); 223 if (retval) { 224 /* If the extent is not found, return phys_blk = 0 */ 225 if (retval == EXT2_ET_EXTENT_NOT_FOUND) 226 goto got_block; 227 return retval; 228 } 229 retval = ext2fs_extent_get(handle, EXT2_EXTENT_CURRENT, &extent); 230 if (retval) 231 return retval; 232 offset = block - extent.e_lblk; 233 if (block >= extent.e_lblk && (offset <= extent.e_len)) { 234 *phys_blk = extent.e_pblk + offset; 235 if (ret_flags && extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT) 236 *ret_flags |= BMAP_RET_UNINIT; 237 } 238 got_block: 239 if ((*phys_blk == 0) && (bmap_flags & BMAP_ALLOC)) { 240 implied_cluster_alloc(fs, ino, inode, handle, block, &blk64); 241 if (blk64) 242 goto set_extent; 243 retval = extent_bmap(fs, ino, inode, handle, block_buf, 244 0, block-1, 0, blocks_alloc, &blk64); 245 if (retval) 246 blk64 = 0; 247 retval = ext2fs_alloc_block2(fs, blk64, block_buf, 248 &blk64); 249 if (retval) 250 return retval; 251 blk64 &= ~EXT2FS_CLUSTER_MASK(fs); 252 blk64 += EXT2FS_CLUSTER_MASK(fs) & block; 253 alloc++; 254 set_extent: 255 retval = ext2fs_extent_set_bmap(handle, block, 256 blk64, 0); 257 if (retval) 258 return retval; 259 /* Update inode after setting extent */ 260 retval = ext2fs_read_inode(fs, ino, inode); 261 if (retval) 262 return retval; 263 *blocks_alloc += alloc; 264 *phys_blk = blk64; 265 } 266 return 0; 267 } 268 269 int ext2fs_file_block_offset_too_big(ext2_filsys fs, 270 struct ext2_inode *inode, 271 blk64_t offset) 272 { 273 blk64_t addr_per_block, max_map_block; 274 275 /* Kernel seems to cut us off at 4294967294 blocks */ 276 if (offset >= (1ULL << 32) - 1) 277 return 1; 278 279 if (inode->i_flags & EXT4_EXTENTS_FL) 280 return 0; 281 282 addr_per_block = fs->blocksize >> 2; 283 max_map_block = addr_per_block; 284 max_map_block += addr_per_block * addr_per_block; 285 max_map_block += addr_per_block * addr_per_block * addr_per_block; 286 max_map_block += 12; 287 288 return offset >= max_map_block; 289 } 290 291 errcode_t ext2fs_bmap2(ext2_filsys fs, ext2_ino_t ino, struct ext2_inode *inode, 292 char *block_buf, int bmap_flags, blk64_t block, 293 int *ret_flags, blk64_t *phys_blk) 294 { 295 struct ext2_inode inode_buf; 296 ext2_extent_handle_t handle = 0; 297 blk_t addr_per_block; 298 blk_t b, blk32; 299 char *buf = 0; 300 errcode_t retval = 0; 301 int blocks_alloc = 0, inode_dirty = 0; 302 303 if (!(bmap_flags & BMAP_SET)) 304 *phys_blk = 0; 305 306 if (ret_flags) 307 *ret_flags = 0; 308 309 /* Read inode structure if necessary */ 310 if (!inode) { 311 retval = ext2fs_read_inode(fs, ino, &inode_buf); 312 if (retval) 313 return retval; 314 inode = &inode_buf; 315 } 316 addr_per_block = (blk_t) fs->blocksize >> 2; 317 318 if (ext2fs_file_block_offset_too_big(fs, inode, block)) 319 return EXT2_ET_FILE_TOO_BIG; 320 321 if (!block_buf) { 322 retval = ext2fs_get_array(2, fs->blocksize, &buf); 323 if (retval) 324 return retval; 325 block_buf = buf; 326 } 327 328 if (inode->i_flags & EXT4_EXTENTS_FL) { 329 retval = ext2fs_extent_open2(fs, ino, inode, &handle); 330 if (retval) 331 goto done; 332 retval = extent_bmap(fs, ino, inode, handle, block_buf, 333 bmap_flags, block, ret_flags, 334 &blocks_alloc, phys_blk); 335 goto done; 336 } 337 338 if (block < EXT2_NDIR_BLOCKS) { 339 if (bmap_flags & BMAP_SET) { 340 b = *phys_blk; 341 inode_bmap(inode, block) = b; 342 inode_dirty++; 343 goto done; 344 } 345 346 *phys_blk = inode_bmap(inode, block); 347 b = block ? inode_bmap(inode, block-1) : 0; 348 349 if ((*phys_blk == 0) && (bmap_flags & BMAP_ALLOC)) { 350 retval = ext2fs_alloc_block(fs, b, block_buf, &b); 351 if (retval) 352 goto done; 353 inode_bmap(inode, block) = b; 354 blocks_alloc++; 355 *phys_blk = b; 356 } 357 goto done; 358 } 359 360 /* Indirect block */ 361 block -= EXT2_NDIR_BLOCKS; 362 blk32 = *phys_blk; 363 if (block < addr_per_block) { 364 b = inode_bmap(inode, EXT2_IND_BLOCK); 365 if (!b) { 366 if (!(bmap_flags & BMAP_ALLOC)) { 367 if (bmap_flags & BMAP_SET) 368 retval = EXT2_ET_SET_BMAP_NO_IND; 369 goto done; 370 } 371 372 b = inode_bmap(inode, EXT2_IND_BLOCK-1); 373 retval = ext2fs_alloc_block(fs, b, block_buf, &b); 374 if (retval) 375 goto done; 376 inode_bmap(inode, EXT2_IND_BLOCK) = b; 377 blocks_alloc++; 378 } 379 retval = block_ind_bmap(fs, bmap_flags, b, block_buf, 380 &blocks_alloc, block, &blk32); 381 if (retval == 0) 382 *phys_blk = blk32; 383 goto done; 384 } 385 386 /* Doubly indirect block */ 387 block -= addr_per_block; 388 if (block < addr_per_block * addr_per_block) { 389 b = inode_bmap(inode, EXT2_DIND_BLOCK); 390 if (!b) { 391 if (!(bmap_flags & BMAP_ALLOC)) { 392 if (bmap_flags & BMAP_SET) 393 retval = EXT2_ET_SET_BMAP_NO_IND; 394 goto done; 395 } 396 397 b = inode_bmap(inode, EXT2_IND_BLOCK); 398 retval = ext2fs_alloc_block(fs, b, block_buf, &b); 399 if (retval) 400 goto done; 401 inode_bmap(inode, EXT2_DIND_BLOCK) = b; 402 blocks_alloc++; 403 } 404 retval = block_dind_bmap(fs, bmap_flags, b, block_buf, 405 &blocks_alloc, block, &blk32); 406 if (retval == 0) 407 *phys_blk = blk32; 408 goto done; 409 } 410 411 /* Triply indirect block */ 412 block -= addr_per_block * addr_per_block; 413 b = inode_bmap(inode, EXT2_TIND_BLOCK); 414 if (!b) { 415 if (!(bmap_flags & BMAP_ALLOC)) { 416 if (bmap_flags & BMAP_SET) 417 retval = EXT2_ET_SET_BMAP_NO_IND; 418 goto done; 419 } 420 421 b = inode_bmap(inode, EXT2_DIND_BLOCK); 422 retval = ext2fs_alloc_block(fs, b, block_buf, &b); 423 if (retval) 424 goto done; 425 inode_bmap(inode, EXT2_TIND_BLOCK) = b; 426 blocks_alloc++; 427 } 428 retval = block_tind_bmap(fs, bmap_flags, b, block_buf, 429 &blocks_alloc, block, &blk32); 430 if (retval == 0) 431 *phys_blk = blk32; 432 done: 433 if (buf) 434 ext2fs_free_mem(&buf); 435 if (handle) 436 ext2fs_extent_free(handle); 437 if ((retval == 0) && (blocks_alloc || inode_dirty)) { 438 ext2fs_iblk_add_blocks(fs, inode, blocks_alloc); 439 retval = ext2fs_write_inode(fs, ino, inode); 440 } 441 return retval; 442 } 443 444 errcode_t ext2fs_bmap(ext2_filsys fs, ext2_ino_t ino, struct ext2_inode *inode, 445 char *block_buf, int bmap_flags, blk_t block, 446 blk_t *phys_blk) 447 { 448 errcode_t ret; 449 blk64_t ret_blk = *phys_blk; 450 451 ret = ext2fs_bmap2(fs, ino, inode, block_buf, bmap_flags, block, 452 0, &ret_blk); 453 if (ret) 454 return ret; 455 if (ret_blk >= ((long long) 1 << 32)) 456 return EOVERFLOW; 457 *phys_blk = ret_blk; 458 return 0; 459 } 460