Home | History | Annotate | Download | only in ext2fs
      1 /*
      2  * block.c --- iterate over all blocks in an inode
      3  *
      4  * Copyright (C) 1993, 1994, 1995, 1996 Theodore Ts'o.
      5  *
      6  * %Begin-Header%
      7  * This file may be redistributed under the terms of the GNU Library
      8  * General Public License, version 2.
      9  * %End-Header%
     10  */
     11 
     12 #include <stdio.h>
     13 #include <string.h>
     14 #if HAVE_UNISTD_H
     15 #include <unistd.h>
     16 #endif
     17 
     18 #include "ext2_fs.h"
     19 #include "ext2fs.h"
     20 
     21 struct block_context {
     22 	ext2_filsys	fs;
     23 	int (*func)(ext2_filsys	fs,
     24 		    blk64_t	*blocknr,
     25 		    e2_blkcnt_t	bcount,
     26 		    blk64_t	ref_blk,
     27 		    int		ref_offset,
     28 		    void	*priv_data);
     29 	e2_blkcnt_t	bcount;
     30 	int		bsize;
     31 	int		flags;
     32 	errcode_t	errcode;
     33 	char	*ind_buf;
     34 	char	*dind_buf;
     35 	char	*tind_buf;
     36 	void	*priv_data;
     37 };
     38 
     39 #define check_for_ro_violation_return(ctx, ret)				\
     40 	do {								\
     41 		if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) &&		\
     42 		    ((ret) & BLOCK_CHANGED)) {				\
     43 			(ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE;	\
     44 			ret |= BLOCK_ABORT | BLOCK_ERROR;		\
     45 			return ret;					\
     46 		}							\
     47 	} while (0)
     48 
     49 #define check_for_ro_violation_goto(ctx, ret, label)			\
     50 	do {								\
     51 		if (((ctx)->flags & BLOCK_FLAG_READ_ONLY) &&		\
     52 		    ((ret) & BLOCK_CHANGED)) {				\
     53 			(ctx)->errcode = EXT2_ET_RO_BLOCK_ITERATE;	\
     54 			ret |= BLOCK_ABORT | BLOCK_ERROR;		\
     55 			goto label;					\
     56 		}							\
     57 	} while (0)
     58 
     59 static int block_iterate_ind(blk_t *ind_block, blk_t ref_block,
     60 			     int ref_offset, struct block_context *ctx)
     61 {
     62 	int	ret = 0, changed = 0;
     63 	int	i, flags, limit, offset;
     64 	blk_t	*block_nr;
     65 	blk64_t	blk64;
     66 
     67 	limit = ctx->fs->blocksize >> 2;
     68 	if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
     69 	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY)) {
     70 		blk64 = *ind_block;
     71 		ret = (*ctx->func)(ctx->fs, &blk64,
     72 				   BLOCK_COUNT_IND, ref_block,
     73 				   ref_offset, ctx->priv_data);
     74 		*ind_block = blk64;
     75 	}
     76 	check_for_ro_violation_return(ctx, ret);
     77 	if (!*ind_block || (ret & BLOCK_ABORT)) {
     78 		ctx->bcount += limit;
     79 		return ret;
     80 	}
     81 	if (*ind_block >= ext2fs_blocks_count(ctx->fs->super) ||
     82 	    *ind_block < ctx->fs->super->s_first_data_block) {
     83 		ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
     84 		ret |= BLOCK_ERROR;
     85 		return ret;
     86 	}
     87 	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
     88 					     ctx->ind_buf);
     89 	if (ctx->errcode) {
     90 		ret |= BLOCK_ERROR;
     91 		return ret;
     92 	}
     93 
     94 	block_nr = (blk_t *) ctx->ind_buf;
     95 	offset = 0;
     96 	if (ctx->flags & BLOCK_FLAG_APPEND) {
     97 		for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
     98 			blk64 = *block_nr;
     99 			flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
    100 					     *ind_block, offset,
    101 					     ctx->priv_data);
    102 			*block_nr = blk64;
    103 			changed	|= flags;
    104 			if (flags & BLOCK_ABORT) {
    105 				ret |= BLOCK_ABORT;
    106 				break;
    107 			}
    108 			offset += sizeof(blk_t);
    109 		}
    110 	} else {
    111 		for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
    112 			if (*block_nr == 0)
    113 				goto skip_sparse;
    114 			blk64 = *block_nr;
    115 			flags = (*ctx->func)(ctx->fs, &blk64, ctx->bcount,
    116 					     *ind_block, offset,
    117 					     ctx->priv_data);
    118 			*block_nr = blk64;
    119 			changed	|= flags;
    120 			if (flags & BLOCK_ABORT) {
    121 				ret |= BLOCK_ABORT;
    122 				break;
    123 			}
    124 		skip_sparse:
    125 			offset += sizeof(blk_t);
    126 		}
    127 	}
    128 	check_for_ro_violation_return(ctx, changed);
    129 	if (changed & BLOCK_CHANGED) {
    130 		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
    131 						      ctx->ind_buf);
    132 		if (ctx->errcode)
    133 			ret |= BLOCK_ERROR | BLOCK_ABORT;
    134 	}
    135 	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
    136 	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
    137 	    !(ret & BLOCK_ABORT)) {
    138 		blk64 = *ind_block;
    139 		ret |= (*ctx->func)(ctx->fs, &blk64,
    140 				    BLOCK_COUNT_IND, ref_block,
    141 				    ref_offset, ctx->priv_data);
    142 		*ind_block = blk64;
    143 	}
    144 	check_for_ro_violation_return(ctx, ret);
    145 	return ret;
    146 }
    147 
    148 static int block_iterate_dind(blk_t *dind_block, blk_t ref_block,
    149 			      int ref_offset, struct block_context *ctx)
    150 {
    151 	int	ret = 0, changed = 0;
    152 	int	i, flags, limit, offset;
    153 	blk_t	*block_nr;
    154 	blk64_t	blk64;
    155 
    156 	limit = ctx->fs->blocksize >> 2;
    157 	if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
    158 			    BLOCK_FLAG_DATA_ONLY))) {
    159 		blk64 = *dind_block;
    160 		ret = (*ctx->func)(ctx->fs, &blk64,
    161 				   BLOCK_COUNT_DIND, ref_block,
    162 				   ref_offset, ctx->priv_data);
    163 		*dind_block = blk64;
    164 	}
    165 	check_for_ro_violation_return(ctx, ret);
    166 	if (!*dind_block || (ret & BLOCK_ABORT)) {
    167 		ctx->bcount += limit*limit;
    168 		return ret;
    169 	}
    170 	if (*dind_block >= ext2fs_blocks_count(ctx->fs->super) ||
    171 	    *dind_block < ctx->fs->super->s_first_data_block) {
    172 		ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
    173 		ret |= BLOCK_ERROR;
    174 		return ret;
    175 	}
    176 	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
    177 					     ctx->dind_buf);
    178 	if (ctx->errcode) {
    179 		ret |= BLOCK_ERROR;
    180 		return ret;
    181 	}
    182 
    183 	block_nr = (blk_t *) ctx->dind_buf;
    184 	offset = 0;
    185 	if (ctx->flags & BLOCK_FLAG_APPEND) {
    186 		for (i = 0; i < limit; i++, block_nr++) {
    187 			flags = block_iterate_ind(block_nr,
    188 						  *dind_block, offset,
    189 						  ctx);
    190 			changed |= flags;
    191 			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
    192 				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
    193 				break;
    194 			}
    195 			offset += sizeof(blk_t);
    196 		}
    197 	} else {
    198 		for (i = 0; i < limit; i++, block_nr++) {
    199 			if (*block_nr == 0) {
    200 				ctx->bcount += limit;
    201 				continue;
    202 			}
    203 			flags = block_iterate_ind(block_nr,
    204 						  *dind_block, offset,
    205 						  ctx);
    206 			changed |= flags;
    207 			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
    208 				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
    209 				break;
    210 			}
    211 			offset += sizeof(blk_t);
    212 		}
    213 	}
    214 	check_for_ro_violation_return(ctx, changed);
    215 	if (changed & BLOCK_CHANGED) {
    216 		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
    217 						      ctx->dind_buf);
    218 		if (ctx->errcode)
    219 			ret |= BLOCK_ERROR | BLOCK_ABORT;
    220 	}
    221 	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
    222 	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
    223 	    !(ret & BLOCK_ABORT)) {
    224 		blk64 = *dind_block;
    225 		ret |= (*ctx->func)(ctx->fs, &blk64,
    226 				    BLOCK_COUNT_DIND, ref_block,
    227 				    ref_offset, ctx->priv_data);
    228 		*dind_block = blk64;
    229 	}
    230 	check_for_ro_violation_return(ctx, ret);
    231 	return ret;
    232 }
    233 
    234 static int block_iterate_tind(blk_t *tind_block, blk_t ref_block,
    235 			      int ref_offset, struct block_context *ctx)
    236 {
    237 	int	ret = 0, changed = 0;
    238 	int	i, flags, limit, offset;
    239 	blk_t	*block_nr;
    240 	blk64_t	blk64;
    241 
    242 	limit = ctx->fs->blocksize >> 2;
    243 	if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
    244 			    BLOCK_FLAG_DATA_ONLY))) {
    245 		blk64 = *tind_block;
    246 		ret = (*ctx->func)(ctx->fs, &blk64,
    247 				   BLOCK_COUNT_TIND, ref_block,
    248 				   ref_offset, ctx->priv_data);
    249 		*tind_block = blk64;
    250 	}
    251 	check_for_ro_violation_return(ctx, ret);
    252 	if (!*tind_block || (ret & BLOCK_ABORT)) {
    253 		ctx->bcount += limit*limit*limit;
    254 		return ret;
    255 	}
    256 	if (*tind_block >= ext2fs_blocks_count(ctx->fs->super) ||
    257 	    *tind_block < ctx->fs->super->s_first_data_block) {
    258 		ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
    259 		ret |= BLOCK_ERROR;
    260 		return ret;
    261 	}
    262 	ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
    263 					     ctx->tind_buf);
    264 	if (ctx->errcode) {
    265 		ret |= BLOCK_ERROR;
    266 		return ret;
    267 	}
    268 
    269 	block_nr = (blk_t *) ctx->tind_buf;
    270 	offset = 0;
    271 	if (ctx->flags & BLOCK_FLAG_APPEND) {
    272 		for (i = 0; i < limit; i++, block_nr++) {
    273 			flags = block_iterate_dind(block_nr,
    274 						   *tind_block,
    275 						   offset, ctx);
    276 			changed |= flags;
    277 			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
    278 				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
    279 				break;
    280 			}
    281 			offset += sizeof(blk_t);
    282 		}
    283 	} else {
    284 		for (i = 0; i < limit; i++, block_nr++) {
    285 			if (*block_nr == 0) {
    286 				ctx->bcount += limit*limit;
    287 				continue;
    288 			}
    289 			flags = block_iterate_dind(block_nr,
    290 						   *tind_block,
    291 						   offset, ctx);
    292 			changed |= flags;
    293 			if (flags & (BLOCK_ABORT | BLOCK_ERROR)) {
    294 				ret |= flags & (BLOCK_ABORT | BLOCK_ERROR);
    295 				break;
    296 			}
    297 			offset += sizeof(blk_t);
    298 		}
    299 	}
    300 	check_for_ro_violation_return(ctx, changed);
    301 	if (changed & BLOCK_CHANGED) {
    302 		ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
    303 						      ctx->tind_buf);
    304 		if (ctx->errcode)
    305 			ret |= BLOCK_ERROR | BLOCK_ABORT;
    306 	}
    307 	if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
    308 	    !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
    309 	    !(ret & BLOCK_ABORT)) {
    310 		blk64 = *tind_block;
    311 		ret |= (*ctx->func)(ctx->fs, &blk64,
    312 				    BLOCK_COUNT_TIND, ref_block,
    313 				    ref_offset, ctx->priv_data);
    314 		*tind_block = blk64;
    315 	}
    316 	check_for_ro_violation_return(ctx, ret);
    317 	return ret;
    318 }
    319 
    320 errcode_t ext2fs_block_iterate3(ext2_filsys fs,
    321 				ext2_ino_t ino,
    322 				int	flags,
    323 				char *block_buf,
    324 				int (*func)(ext2_filsys fs,
    325 					    blk64_t	*blocknr,
    326 					    e2_blkcnt_t	blockcnt,
    327 					    blk64_t	ref_blk,
    328 					    int		ref_offset,
    329 					    void	*priv_data),
    330 				void *priv_data)
    331 {
    332 	int	i;
    333 	int	r, ret = 0;
    334 	struct ext2_inode inode;
    335 	errcode_t	retval;
    336 	struct block_context ctx;
    337 	int	limit;
    338 	blk64_t	blk64;
    339 
    340 	EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS);
    341 
    342 	ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
    343 	if (ctx.errcode)
    344 		return ctx.errcode;
    345 
    346 	/*
    347 	 * Check to see if we need to limit large files
    348 	 */
    349 	if (flags & BLOCK_FLAG_NO_LARGE) {
    350 		if (!LINUX_S_ISDIR(inode.i_mode) &&
    351 		    (inode.i_size_high != 0))
    352 			return EXT2_ET_FILE_TOO_BIG;
    353 	}
    354 
    355 	limit = fs->blocksize >> 2;
    356 
    357 	ctx.fs = fs;
    358 	ctx.func = func;
    359 	ctx.priv_data = priv_data;
    360 	ctx.flags = flags;
    361 	ctx.bcount = 0;
    362 	if (block_buf) {
    363 		ctx.ind_buf = block_buf;
    364 	} else {
    365 		retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf);
    366 		if (retval)
    367 			return retval;
    368 	}
    369 	ctx.dind_buf = ctx.ind_buf + fs->blocksize;
    370 	ctx.tind_buf = ctx.dind_buf + fs->blocksize;
    371 
    372 	/*
    373 	 * Iterate over the HURD translator block (if present)
    374 	 */
    375 	if ((fs->super->s_creator_os == EXT2_OS_HURD) &&
    376 	    !(flags & BLOCK_FLAG_DATA_ONLY)) {
    377 		if (inode.osd1.hurd1.h_i_translator) {
    378 			blk64 = inode.osd1.hurd1.h_i_translator;
    379 			ret |= (*ctx.func)(fs, &blk64,
    380 					   BLOCK_COUNT_TRANSLATOR,
    381 					   0, 0, priv_data);
    382 			inode.osd1.hurd1.h_i_translator = (blk_t) blk64;
    383 			if (ret & BLOCK_ABORT)
    384 				goto abort_exit;
    385 			check_for_ro_violation_goto(&ctx, ret, abort_exit);
    386 		}
    387 	}
    388 
    389 	if (inode.i_flags & EXT4_EXTENTS_FL) {
    390 		ext2_extent_handle_t	handle;
    391 		struct ext2fs_extent	extent, next;
    392 		e2_blkcnt_t		blockcnt = 0;
    393 		blk64_t			blk, new_blk;
    394 		int			op = EXT2_EXTENT_ROOT;
    395 		int			uninit;
    396 		unsigned int		j;
    397 
    398 		ctx.errcode = ext2fs_extent_open2(fs, ino, &inode, &handle);
    399 		if (ctx.errcode)
    400 			goto abort_exit;
    401 
    402 		while (1) {
    403 			if (op == EXT2_EXTENT_CURRENT)
    404 				ctx.errcode = 0;
    405 			else
    406 				ctx.errcode = ext2fs_extent_get(handle, op,
    407 								&extent);
    408 			if (ctx.errcode) {
    409 				if (ctx.errcode != EXT2_ET_EXTENT_NO_NEXT)
    410 					break;
    411 				ctx.errcode = 0;
    412 				if (!(flags & BLOCK_FLAG_APPEND))
    413 					break;
    414 			next_block_set:
    415 				blk = 0;
    416 				r = (*ctx.func)(fs, &blk, blockcnt,
    417 						0, 0, priv_data);
    418 				ret |= r;
    419 				check_for_ro_violation_goto(&ctx, ret,
    420 							    extent_done);
    421 				if (r & BLOCK_CHANGED) {
    422 					ctx.errcode =
    423 						ext2fs_extent_set_bmap(handle,
    424 						       (blk64_t) blockcnt++,
    425 						       (blk64_t) blk, 0);
    426 					if (ctx.errcode || (ret & BLOCK_ABORT))
    427 						break;
    428 					if (blk)
    429 						goto next_block_set;
    430 				}
    431 				break;
    432 			}
    433 
    434 			op = EXT2_EXTENT_NEXT;
    435 			blk = extent.e_pblk;
    436 			if (!(extent.e_flags & EXT2_EXTENT_FLAGS_LEAF)) {
    437 				if (ctx.flags & BLOCK_FLAG_DATA_ONLY)
    438 					continue;
    439 				if ((!(extent.e_flags &
    440 				       EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
    441 				     !(ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE)) ||
    442 				    ((extent.e_flags &
    443 				      EXT2_EXTENT_FLAGS_SECOND_VISIT) &&
    444 				     (ctx.flags & BLOCK_FLAG_DEPTH_TRAVERSE))) {
    445 					ret |= (*ctx.func)(fs, &blk,
    446 							   -1, 0, 0, priv_data);
    447 					if (ret & BLOCK_CHANGED) {
    448 						extent.e_pblk = blk;
    449 						ctx.errcode =
    450 				ext2fs_extent_replace(handle, 0, &extent);
    451 						if (ctx.errcode)
    452 							break;
    453 					}
    454 					if (ret & BLOCK_ABORT)
    455 						break;
    456 				}
    457 				continue;
    458 			}
    459 			uninit = 0;
    460 			if (extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT)
    461 				uninit = EXT2_EXTENT_SET_BMAP_UNINIT;
    462 
    463 			/*
    464 			 * Get the next extent before we start messing
    465 			 * with the current extent
    466 			 */
    467 			retval = ext2fs_extent_get(handle, op, &next);
    468 
    469 #if 0
    470 			printf("lblk %llu pblk %llu len %d blockcnt %llu\n",
    471 			       extent.e_lblk, extent.e_pblk,
    472 			       extent.e_len, blockcnt);
    473 #endif
    474 			if (extent.e_lblk + extent.e_len <= (blk64_t) blockcnt)
    475 				continue;
    476 			if (extent.e_lblk > (blk64_t) blockcnt)
    477 				blockcnt = extent.e_lblk;
    478 			j = blockcnt - extent.e_lblk;
    479 			blk += j;
    480 			for (blockcnt = extent.e_lblk, j = 0;
    481 			     j < extent.e_len;
    482 			     blk++, blockcnt++, j++) {
    483 				new_blk = blk;
    484 				r = (*ctx.func)(fs, &new_blk, blockcnt,
    485 						0, 0, priv_data);
    486 				ret |= r;
    487 				check_for_ro_violation_goto(&ctx, ret,
    488 							    extent_done);
    489 				if (r & BLOCK_CHANGED) {
    490 					ctx.errcode =
    491 						ext2fs_extent_set_bmap(handle,
    492 						       (blk64_t) blockcnt,
    493 						       new_blk, uninit);
    494 					if (ctx.errcode)
    495 						goto extent_done;
    496 				}
    497 				if (ret & BLOCK_ABORT)
    498 					goto extent_done;
    499 			}
    500 			if (retval == 0) {
    501 				extent = next;
    502 				op = EXT2_EXTENT_CURRENT;
    503 			}
    504 		}
    505 
    506 	extent_done:
    507 		ext2fs_extent_free(handle);
    508 		ret |= BLOCK_ERROR; /* ctx.errcode is always valid here */
    509 		goto errout;
    510 	}
    511 
    512 	/*
    513 	 * Iterate over normal data blocks
    514 	 */
    515 	for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
    516 		if (inode.i_block[i] || (flags & BLOCK_FLAG_APPEND)) {
    517 			blk64 = inode.i_block[i];
    518 			ret |= (*ctx.func)(fs, &blk64, ctx.bcount, 0, i,
    519 					   priv_data);
    520 			inode.i_block[i] = (blk_t) blk64;
    521 			if (ret & BLOCK_ABORT)
    522 				goto abort_exit;
    523 		}
    524 	}
    525 	check_for_ro_violation_goto(&ctx, ret, abort_exit);
    526 	if (inode.i_block[EXT2_IND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
    527 		ret |= block_iterate_ind(&inode.i_block[EXT2_IND_BLOCK],
    528 					 0, EXT2_IND_BLOCK, &ctx);
    529 		if (ret & BLOCK_ABORT)
    530 			goto abort_exit;
    531 	} else
    532 		ctx.bcount += limit;
    533 	if (inode.i_block[EXT2_DIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
    534 		ret |= block_iterate_dind(&inode.i_block[EXT2_DIND_BLOCK],
    535 					  0, EXT2_DIND_BLOCK, &ctx);
    536 		if (ret & BLOCK_ABORT)
    537 			goto abort_exit;
    538 	} else
    539 		ctx.bcount += limit * limit;
    540 	if (inode.i_block[EXT2_TIND_BLOCK] || (flags & BLOCK_FLAG_APPEND)) {
    541 		ret |= block_iterate_tind(&inode.i_block[EXT2_TIND_BLOCK],
    542 					  0, EXT2_TIND_BLOCK, &ctx);
    543 		if (ret & BLOCK_ABORT)
    544 			goto abort_exit;
    545 	}
    546 
    547 abort_exit:
    548 	if (ret & BLOCK_CHANGED) {
    549 		retval = ext2fs_write_inode(fs, ino, &inode);
    550 		if (retval) {
    551 			ret |= BLOCK_ERROR;
    552 			ctx.errcode = retval;
    553 		}
    554 	}
    555 errout:
    556 	if (!block_buf)
    557 		ext2fs_free_mem(&ctx.ind_buf);
    558 
    559 	return (ret & BLOCK_ERROR) ? ctx.errcode : 0;
    560 }
    561 
    562 /*
    563  * Emulate the old ext2fs_block_iterate function!
    564  */
    565 
    566 struct xlate64 {
    567 	int (*func)(ext2_filsys fs,
    568 		    blk_t	*blocknr,
    569 		    e2_blkcnt_t	blockcnt,
    570 		    blk_t	ref_blk,
    571 		    int		ref_offset,
    572 		    void	*priv_data);
    573 	void *real_private;
    574 };
    575 
    576 static int xlate64_func(ext2_filsys fs, blk64_t	*blocknr,
    577 			e2_blkcnt_t blockcnt, blk64_t ref_blk,
    578 			int ref_offset, void *priv_data)
    579 {
    580 	struct xlate64 *xl = (struct xlate64 *) priv_data;
    581 	int		ret;
    582 	blk_t		block32 = *blocknr;
    583 
    584 	ret = (*xl->func)(fs, &block32, blockcnt, (blk_t) ref_blk, ref_offset,
    585 			     xl->real_private);
    586 	*blocknr = block32;
    587 	return ret;
    588 }
    589 
    590 errcode_t ext2fs_block_iterate2(ext2_filsys fs,
    591 				ext2_ino_t ino,
    592 				int	flags,
    593 				char *block_buf,
    594 				int (*func)(ext2_filsys fs,
    595 					    blk_t	*blocknr,
    596 					    e2_blkcnt_t	blockcnt,
    597 					    blk_t	ref_blk,
    598 					    int		ref_offset,
    599 					    void	*priv_data),
    600 				void *priv_data)
    601 {
    602 	struct xlate64 xl;
    603 
    604 	xl.real_private = priv_data;
    605 	xl.func = func;
    606 
    607 	return ext2fs_block_iterate3(fs, ino, flags, block_buf,
    608 				     xlate64_func, &xl);
    609 }
    610 
    611 
    612 struct xlate {
    613 	int (*func)(ext2_filsys	fs,
    614 		    blk_t	*blocknr,
    615 		    int		bcount,
    616 		    void	*priv_data);
    617 	void *real_private;
    618 };
    619 
    620 #ifdef __TURBOC__
    621  #pragma argsused
    622 #endif
    623 static int xlate_func(ext2_filsys fs, blk_t *blocknr, e2_blkcnt_t blockcnt,
    624 		      blk_t ref_block EXT2FS_ATTR((unused)),
    625 		      int ref_offset EXT2FS_ATTR((unused)),
    626 		      void *priv_data)
    627 {
    628 	struct xlate *xl = (struct xlate *) priv_data;
    629 
    630 	return (*xl->func)(fs, blocknr, (int) blockcnt, xl->real_private);
    631 }
    632 
    633 errcode_t ext2fs_block_iterate(ext2_filsys fs,
    634 			       ext2_ino_t ino,
    635 			       int	flags,
    636 			       char *block_buf,
    637 			       int (*func)(ext2_filsys fs,
    638 					   blk_t	*blocknr,
    639 					   int	blockcnt,
    640 					   void	*priv_data),
    641 			       void *priv_data)
    642 {
    643 	struct xlate xl;
    644 
    645 	xl.real_private = priv_data;
    646 	xl.func = func;
    647 
    648 	return ext2fs_block_iterate2(fs, ino, BLOCK_FLAG_NO_LARGE | flags,
    649 				     block_buf, xlate_func, &xl);
    650 }
    651 
    652