Home | History | Annotate | Download | only in ext2fs
      1 /*
      2  * block.c --- iterate over all blocks in an inode
      3  *
      4  * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
      5  *
      6  * %Begin-Header%
      7  * This file may be redistributed under the terms of the GNU Library
      8  * General Public License, version 2.
      9  * %End-Header%
     10  */
     11 
     12 #include <stdio.h>
     13 #include <string.h>
     14 #if HAVE_UNISTD_H
     15 #include <unistd.h>
     16 #endif
     17 
     18 #include "ext2_fs.h"
     19 #include "ext2fs.h"
     20 
     21 struct block_context {
     22 	ext2_filsys	fs;
     23 	int (*func)(ext2_filsys	fs,
     24 		    blk_t	*blocknr,
     25 		    e2_blkcnt_t	bcount,
     26 		    blk_t	ref_blk,
     27 		    int		ref_offset,
     28 		    void	*priv_data);
     29 	e2_blkcnt_t	bcount;
     30 	int		bsize;
     31 	int		flags;
     32 	errcode_t	errcode;
     33 	char	*ind_buf;
     34 	char	*dind_buf;
     35 	char	*tind_buf;
     36 	void	*priv_data;
     37 };
     38 
     39 #define check_for_ro_violation_return(ctx, ret)				\
     40 	do {								\
     41 		if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) &&		\
     42 		    ((ret) & BLOCK_CHANGED)) {				\
     43 			(ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE;	\
     44 			ret |= BLOCK_ABORT | BLOCK_ERROR;		\
     45 			return ret;					\
     46 		}							\
     47 	} while (0)
     48 
     49 #define check_for_ro_violation_goto(ctx, ret, label)			\
     50 	do {								\
     51 		if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) &&		\
     52 		    ((ret) & BLOCK_CHANGED)) {				\
     53 			(ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE;	\
     54 			ret |= BLOCK_ABORT | BLOCK_ERROR;		\
     55 			goto label;					\
     56 		}							\
     57 	} while (0)
     58 
     59 static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
     60 			     int ref_offset, struct block_context *ctx)
     61 {
     62 	int	ret = 0, changed = 0;
     63 	int	i, flags, limit, offset;
     64 	blk_t	*block_nr;
     65 
     66 	limit = ctx->fs->blocksize >> 2;
     67 	if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
     68 	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
     69 		ret = (*ctx->func)(ctx->fs, ind_block,
     70 				   BLOCK_COUNT_IND, ref_block,
     71 				   ref_offset, ctx->priv_data);
     72 	check_for_ro_violation_return(ctx, ret);
     73 	if (!*ind_block || (ret & BLOCK_ABORT)) {
     74 		ctx->bcount += limit;
     75 		return ret;
     76 	}
     77 	if (*ind_block >= ctx->fs->super->s_blocks_count ||
     78 	    *ind_block < ctx->fs->super->s_first_data_block) {
     79 		ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
     80 		ret |= BLOCK_ERROR;
     81 		return ret;
     82 	}
     83 	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
     84 					     ctx->ind_buf);
     85 	if (ctx->errcode) {
     86 		ret |= BLOCK_ERROR;
     87 		return ret;
     88 	}
     89 
     90 	block_nr = (blk_t *) ctx->ind_buf;
     91 	offset = 0;
     92 	if (ctx->flags & BLOCK_FLAG_APPEND) {
     93 		for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
     94 			flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
     95 					     *ind_block, offset,
     96 					     ctx->priv_data);
     97 			changed	|= flags;
     98 			if (flags & BLOCK_ABORT) {
     99 				ret |= BLOCK_ABORT;
    100 				break;
    101 			}
    102 			offset += sizeof(blk_t);
    103 		}
    104 	} else {
    105 		for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
    106 			if (*block_nr == 0)
    107 				goto skip_sparse;
    108 			flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
    109 					     *ind_block, offset,
    110 					     ctx->priv_data);
    111 			changed	|= flags;
    112 			if (flags & BLOCK_ABORT) {
    113 				ret |= BLOCK_ABORT;
    114 				break;
    115 			}
    116 		skip_sparse:
    117 			offset += sizeof(blk_t);
    118 		}
    119 	}
    120 	check_for_ro_violation_return(ctx, changed);
    121 	if (changed & BLOCK_CHANGED) {
    122 		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
    123 						      ctx->ind_buf);
    124 		if (ctx->errcode)
    125 			ret |= BLOCK_ERROR | BLOCK_ABORT;
    126 	}
    127 	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
    128 	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
    129 	    !(ret & BLOCK_ABORT))
    130 		ret |= (*ctx->func)(ctx->fs, ind_block,
    131 				    BLOCK_COUNT_IND, ref_block,
    132 				    ref_offset, ctx->priv_data);
    133 	check_for_ro_violation_return(ctx, ret);
    134 	return ret;
    135 }
    136 
    137 static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
    138 			      int ref_offset, struct block_context *ctx)
    139 {
    140 	int	ret = 0, changed = 0;
    141 	int	i, flags, limit, offset;
    142 	blk_t	*block_nr;
    143 
    144 	limit = ctx->fs->blocksize >> 2;
    145 	if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
    146 			    BLOCK_FLAG_DATA_ONLY)))
    147 		ret = (*ctx->func)(ctx->fs, dind_block,
    148 				   BLOCK_COUNT_DIND, ref_block,
    149 				   ref_offset, ctx->priv_data);
    150 	check_for_ro_violation_return(ctx, ret);
    151 	if (!*dind_block || (ret & BLOCK_ABORT)) {
    152 		ctx->bcount += limit*limit;
    153 		return ret;
    154 	}
    155 	if (*dind_block >= ctx->fs->super->s_blocks_count ||
    156 	    *dind_block < ctx->fs->super->s_first_data_block) {
    157 		ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
    158 		ret |= BLOCK_ERROR;
    159 		return ret;
    160 	}
    161 	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
    162 					     ctx->dind_buf);
    163 	if (ctx->errcode) {
    164 		ret |= BLOCK_ERROR;
    165 		return ret;
    166 	}
    167 
    168 	block_nr = (blk_t *) ctx->dind_buf;
    169 	offset = 0;
    170 	if (ctx->flags & BLOCK_FLAG_APPEND) {
    171 		for (i = 0; i < limit; i++, block_nr++) {
    172 			flags = block_iterate_ind(block_nr,
    173 						  *dind_block, offset,
    174 						  ctx);
    175 			changed |= flags;
    176 			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
    177 				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
    178 				break;
    179 			}
    180 			offset += sizeof(blk_t);
    181 		}
    182 	} else {
    183 		for (i = 0; i < limit; i++, block_nr++) {
    184 			if (*block_nr == 0) {
    185 				ctx->bcount += limit;
    186 				continue;
    187 			}
    188 			flags = block_iterate_ind(block_nr,
    189 						  *dind_block, offset,
    190 						  ctx);
    191 			changed |= flags;
    192 			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
    193 				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
    194 				break;
    195 			}
    196 			offset += sizeof(blk_t);
    197 		}
    198 	}
    199 	check_for_ro_violation_return(ctx, changed);
    200 	if (changed & BLOCK_CHANGED) {
    201 		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
    202 						      ctx->dind_buf);
    203 		if (ctx->errcode)
    204 			ret |= BLOCK_ERROR | BLOCK_ABORT;
    205 	}
    206 	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
    207 	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
    208 	    !(ret & BLOCK_ABORT))
    209 		ret |= (*ctx->func)(ctx->fs, dind_block,
    210 				    BLOCK_COUNT_DIND, ref_block,
    211 				    ref_offset, ctx->priv_data);
    212 	check_for_ro_violation_return(ctx, ret);
    213 	return ret;
    214 }
    215 
    216 static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
    217 			      int ref_offset, struct block_context *ctx)
    218 {
    219 	int	ret = 0, changed = 0;
    220 	int	i, flags, limit, offset;
    221 	blk_t	*block_nr;
    222 
    223 	limit = ctx->fs->blocksize >> 2;
    224 	if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
    225 			    BLOCK_FLAG_DATA_ONLY)))
    226 		ret = (*ctx->func)(ctx->fs, tind_block,
    227 				   BLOCK_COUNT_TIND, ref_block,
    228 				   ref_offset, ctx->priv_data);
    229 	check_for_ro_violation_return(ctx, ret);
    230 	if (!*tind_block || (ret & BLOCK_ABORT)) {
    231 		ctx->bcount += limit*limit*limit;
    232 		return ret;
    233 	}
    234 	if (*tind_block >= ctx->fs->super->s_blocks_count ||
    235 	    *tind_block < ctx->fs->super->s_first_data_block) {
    236 		ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
    237 		ret |= BLOCK_ERROR;
    238 		return ret;
    239 	}
    240 	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
    241 					     ctx->tind_buf);
    242 	if (ctx->errcode) {
    243 		ret |= BLOCK_ERROR;
    244 		return ret;
    245 	}
    246 
    247 	block_nr = (blk_t *) ctx->tind_buf;
    248 	offset = 0;
    249 	if (ctx->flags & BLOCK_FLAG_APPEND) {
    250 		for (i = 0; i < limit; i++, block_nr++) {
    251 			flags = block_iterate_dind(block_nr,
    252 						   *tind_block,
    253 						   offset, ctx);
    254 			changed |= flags;
    255 			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
    256 				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
    257 				break;
    258 			}
    259 			offset += sizeof(blk_t);
    260 		}
    261 	} else {
    262 		for (i = 0; i < limit; i++, block_nr++) {
    263 			if (*block_nr == 0) {
    264 				ctx->bcount += limit*limit;
    265 				continue;
    266 			}
    267 			flags = block_iterate_dind(block_nr,
    268 						   *tind_block,
    269 						   offset, ctx);
    270 			changed |= flags;
    271 			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
    272 				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
    273 				break;
    274 			}
    275 			offset += sizeof(blk_t);
    276 		}
    277 	}
    278 	check_for_ro_violation_return(ctx, changed);
    279 	if (changed & BLOCK_CHANGED) {
    280 		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
    281 						      ctx->tind_buf);
    282 		if (ctx->errcode)
    283 			ret |= BLOCK_ERROR | BLOCK_ABORT;
    284 	}
    285 	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
    286 	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
    287 	    !(ret & BLOCK_ABORT))
    288 		ret |= (*ctx->func)(ctx->fs, tind_block,
    289 				    BLOCK_COUNT_TIND, ref_block,
    290 				    ref_offset, ctx->priv_data);
    291 	check_for_ro_violation_return(ctx, ret);
    292 	return ret;
    293 }
    294 
    295 errcode_t ext2fs_block_iterate2(ext2_filsys fs,
    296 				ext2_ino_t ino,
    297 				int	flags,
    298 				char *block_buf,
    299 				int (*func)(ext2_filsys fs,
    300 					    blk_t	*blocknr,
    301 					    e2_blkcnt_t	blockcnt,
    302 					    blk_t	ref_blk,
    303 					    int		ref_offset,
    304 					    void	*priv_data),
    305 				void *priv_data)
    306 {
    307 	int	i;
    308 	int	r, ret = 0;
    309 	struct ext2_inode inode;
    310 	errcode_t	retval;
    311 	struct block_context ctx;
    312 	int	limit;
    313 
    314 	EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
    315 
    316 	ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
    317 	if (ctx.errcode)
    318 		return ctx.errcode;
    319 
    320 	/*
    321 	 * Check to see if we need to limit large files
    322 	 */
    323 	if (flags & BLOCK_FLAG_NO_LARGE) {
    324 		if (!LINUX_S_ISDIR(inode.i_mode) &&
    325 		    (inode.i_size_high != 0))
    326 			return EXT2_ET_FILE_TOO_BIG;
    327 	}
    328 
    329 	limit = fs->blocksize >> 2;
    330 
    331 	ctx.fs = fs;
    332 	ctx.func = func;
    333 	ctx.priv_data = priv_data;
    334 	ctx.flags = flags;
    335 	ctx.bcount = 0;
    336 	if (block_buf) {
    337 		ctx.ind_buf = block_buf;
    338 	} else {
    339 		retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf);
    340 		if (retval)
    341 			return retval;
    342 	}
    343 	ctx.dind_buf = ctx.ind_buf + fs->blocksize;
    344 	ctx.tind_buf = ctx.dind_buf + fs->blocksize;
    345 
    346 	/*
    347 	 * Iterate over the HURD translator block (if present)
    348 	 */
    349 	if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
    350 	    !(flags & BLOCK_FLAG_DATA_ONLY)) {
    351 		if (inode.osd1.hurd1.h_i_translator) {
    352 			ret |= (*ctx.func)(fs,
    353 					   &inode.osd1.hurd1.h_i_translator,
    354 					   BLOCK_COUNT_TRANSLATOR,
    355 					   0, 0, priv_data);
    356 			if (ret & BLOCK_ABORT)
    357 				goto abort_exit;
    358 			check_for_ro_violation_goto(&ctx, ret, abort_exit);
    359 		}
    360 	}
    361 
    362 	if (inode.i_flags & EXT4_EXTENTS_FL) {
    363 		ext2_extent_handle_t	handle;
    364 		struct ext2fs_extent	extent;
    365 		e2_blkcnt_t		blockcnt = 0;
    366 		blk_t			blk, new_blk;
    367 		int			op = EXT2_EXTENT_ROOT;
    368 		int			uninit;
    369 		unsigned int		j;
    370 
    371 		ctx.errcode = ext2fs_extent_open2(fs, ino, &inode, &handle);
    372 		if (ctx.errcode)
    373 			goto abort_exit;
    374 
    375 		while (1) {
    376 			ctx.errcode = ext2fs_extent_get(handle, op, &extent);
    377 			if (ctx.errcode) {
    378 				if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT)
    379 					break;
    380 				ctx.errcode = 0;
    381 				if (!(flags & BLOCK_FLAG_APPEND))
    382 					break;
    383 			next_block_set:
    384 				blk = 0;
    385 				r = (*ctx.func)(fs, &blk, blockcnt,
    386 						0, 0, priv_data);
    387 				ret |= r;
    388 				check_for_ro_violation_goto(&ctx, ret,
    389 							    extent_errout);
    390 				if (r & BLOCK_CHANGED) {
    391 					ctx.errcode =
    392 						ext2fs_extent_set_bmap(handle,
    393 						       (blk64_t) blockcnt++,
    394 						       (blk64_t) blk, 0);
    395 					if (ctx.errcode || (ret & BLOCK_ABORT))
    396 						break;
    397 					if (blk)
    398 						goto next_block_set;
    399 				}
    400 				break;
    401 			}
    402 
    403 			op = EXT2_EXTENT_NEXT;
    404 			blk = extent.e_pblk;
    405 			if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) {
    406 				if (ctx.flags & BLOCK_FLAG_DATA_ONLY)
    407 					continue;
    408 				if ((!(extent.e_flags &
    409 				       EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
    410 				     !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) ||
    411 				    ((extent.e_flags &
    412 				      EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
    413 				     (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) {
    414 					ret |= (*ctx.func)(fs, &blk,
    415 							   -1, 0, 0, priv_data);
    416 					if (ret & BLOCK_CHANGED) {
    417 						extent.e_pblk = blk;
    418 						ctx.errcode =
    419 				ext2fs_extent_replace(handle, 0, &extent);
    420 						if (ctx.errcode)
    421 							break;
    422 					}
    423 				}
    424 				continue;
    425 			}
    426 			uninit = 0;
    427 			if (extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT)
    428 				uninit = EXT2_EXTENT_SET_BMAP_UNINIT;
    429 			for (blockcnt = extent.e_lblk, j = 0;
    430 			     j < extent.e_len;
    431 			     blk++, blockcnt++, j++) {
    432 				new_blk = blk;
    433 				r = (*ctx.func)(fs, &new_blk, blockcnt,
    434 						0, 0, priv_data);
    435 				ret |= r;
    436 				check_for_ro_violation_goto(&ctx, ret,
    437 							    extent_errout);
    438 				if (r & BLOCK_CHANGED) {
    439 					ctx.errcode =
    440 						ext2fs_extent_set_bmap(handle,
    441 						       (blk64_t) blockcnt,
    442 						       (blk64_t) new_blk,
    443 						       uninit);
    444 					if (ctx.errcode)
    445 						goto extent_errout;
    446 				}
    447 				if (ret & BLOCK_ABORT)
    448 					break;
    449 			}
    450 		}
    451 
    452 	extent_errout:
    453 		ext2fs_extent_free(handle);
    454 		ret |= BLOCK_ERROR | BLOCK_ABORT;
    455 		goto errout;
    456 	}
    457 
    458 	/*
    459 	 * Iterate over normal data blocks
    460 	 */
    461 	for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
    462 		if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) {
    463 			ret |= (*ctx.func)(fs, &inode.i_block[i],
    464 					    ctx.bcount, 0, i, priv_data);
    465 			if (ret & BLOCK_ABORT)
    466 				goto abort_exit;
    467 		}
    468 	}
    469 	check_for_ro_violation_goto(&ctx, ret, abort_exit);
    470 	if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
    471 		ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK],
    472 					 0, EXT2_IND_BLOCK, &ctx);
    473 		if (ret & BLOCK_ABORT)
    474 			goto abort_exit;
    475 	} else
    476 		ctx.bcount += limit;
    477 	if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
    478 		ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK],
    479 					  0, EXT2_DIND_BLOCK, &ctx);
    480 		if (ret & BLOCK_ABORT)
    481 			goto abort_exit;
    482 	} else
    483 		ctx.bcount += limit * limit;
    484 	if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
    485 		ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK],
    486 					  0, EXT2_TIND_BLOCK, &ctx);
    487 		if (ret & BLOCK_ABORT)
    488 			goto abort_exit;
    489 	}
    490 
    491 abort_exit:
    492 	if (ret & BLOCK_CHANGED) {
    493 		retval = ext2fs_write_inode(fs, ino, &inode);
    494 		if (retval) {
    495 			ret |= BLOCK_ERROR;
    496 			ctx.errcode = retval;
    497 		}
    498 	}
    499 errout:
    500 	if (!block_buf)
    501 		ext2fs_free_mem(&ctx.ind_buf);
    502 
    503 	return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
    504 }
    505 
    506 /*
    507  * Emulate the old ext2fs_block_iterate function!
    508  */
    509 
    510 struct xlate {
    511 	int (*func)(ext2_filsys	fs,
    512 		    blk_t	*blocknr,
    513 		    int		bcount,
    514 		    void	*priv_data);
    515 	void *real_private;
    516 };
    517 
    518 #ifdef __TURBOC__
    519  #pragma argsused
    520 #endif
    521 static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
    522 		      blk_t ref_block EXT2FS_ATTR((unused)),
    523 		      int ref_offset EXT2FS_ATTR((unused)),
    524 		      void *priv_data)
    525 {
    526 	struct xlate *xl = (struct xlate *) priv_data;
    527 
    528 	return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
    529 }
    530 
    531 errcode_t ext2fs_block_iterate(ext2_filsys fs,
    532 			       ext2_ino_t ino,
    533 			       int	flags,
    534 			       char *block_buf,
    535 			       int (*func)(ext2_filsys fs,
    536 					   blk_t	*blocknr,
    537 					   int	blockcnt,
    538 					   void	*priv_data),
    539 			       void *priv_data)
    540 {
    541 	struct xlate xl;
    542 
    543 	xl.real_private = priv_data;
    544 	xl.func = func;
    545 
    546 	return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
    547 				     block_buf, xlate_func, &xl);
    548 }
    549 
    550