Lines Matching refs:ctx
40 int ref_offset, struct block_context *ctx)
46 limit = ctx->fs->blocksize >> 2;
47 if (!(ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
48 !(ctx->flags & BLOCK_FLAG_DATA_ONLY))
49 ret = (*ctx->func)(ctx->fs, ind_block,
51 ref_offset, ctx->priv_data);
53 ctx->bcount += limit;
56 if (*ind_block >= ctx->fs->super->s_blocks_count ||
57 *ind_block < ctx->fs->super->s_first_data_block) {
58 ctx->errcode = EXT2_ET_BAD_IND_BLOCK;
62 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *ind_block,
63 ctx->ind_buf);
64 if (ctx->errcode) {
69 block_nr = (blk_t *) ctx->ind_buf;
71 if (ctx->flags & BLOCK_FLAG_APPEND) {
72 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
73 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
75 ctx->priv_data);
84 for (i = 0; i < limit; i++, ctx->bcount++, block_nr++) {
87 flags = (*ctx->func)(ctx->fs, block_nr, ctx->bcount,
89 ctx->priv_data);
99 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *ind_block,
100 ctx->ind_buf);
101 if (ctx->errcode)
104 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
105 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
107 ret |= (*ctx->func)(ctx->fs, ind_block,
109 ref_offset, ctx->priv_data);
114 int ref_offset, struct block_context *ctx)
120 limit = ctx->fs->blocksize >> 2;
121 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
123 ret = (*ctx->func)(ctx->fs, dind_block,
125 ref_offset, ctx->priv_data);
127 ctx->bcount += limit*limit;
130 if (*dind_block >= ctx->fs->super->s_blocks_count ||
131 *dind_block < ctx->fs->super->s_first_data_block) {
132 ctx->errcode = EXT2_ET_BAD_DIND_BLOCK;
136 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *dind_block,
137 ctx->dind_buf);
138 if (ctx->errcode) {
143 block_nr = (blk_t *) ctx->dind_buf;
145 if (ctx->flags & BLOCK_FLAG_APPEND) {
149 ctx);
160 ctx->bcount += limit;
165 ctx);
175 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *dind_block,
176 ctx->dind_buf);
177 if (ctx->errcode)
180 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
181 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
183 ret |= (*ctx->func)(ctx->fs, dind_block,
185 ref_offset, ctx->priv_data);
190 int ref_offset, struct block_context *ctx)
196 limit = ctx->fs->blocksize >> 2;
197 if (!(ctx->flags & (BLOCK_FLAG_DEPTH_TRAVERSE |
199 ret = (*ctx->func)(ctx->fs, tind_block,
201 ref_offset, ctx->priv_data);
203 ctx->bcount += limit*limit*limit;
206 if (*tind_block >= ctx->fs->super->s_blocks_count ||
207 *tind_block < ctx->fs->super->s_first_data_block) {
208 ctx->errcode = EXT2_ET_BAD_TIND_BLOCK;
212 ctx->errcode = ext2fs_read_ind_block(ctx->fs, *tind_block,
213 ctx->tind_buf);
214 if (ctx->errcode) {
219 block_nr = (blk_t *) ctx->tind_buf;
221 if (ctx->flags & BLOCK_FLAG_APPEND) {
225 offset, ctx);
236 ctx->bcount += limit*limit;
241 offset, ctx);
251 ctx->errcode = ext2fs_write_ind_block(ctx->fs, *tind_block,
252 ctx->tind_buf);
253 if (ctx->errcode)
256 if ((ctx->flags & BLOCK_FLAG_DEPTH_TRAVERSE) &&
257 !(ctx->flags & BLOCK_FLAG_DATA_ONLY) &&
259 ret |= (*ctx->func)(ctx->fs, tind_block,
261 ref_offset, ctx->priv_data);
284 struct block_context ctx;
293 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
294 if (ctx.errcode)
295 return ctx.errcode;
308 ctx.fs = fs;
309 ctx.func = func;
310 ctx.priv_data = priv_data;
311 ctx.flags = flags;
312 ctx.bcount = 0;
314 ctx.ind_buf = block_buf;
316 retval = ext2fs_get_array(3, fs->blocksize, &ctx.ind_buf);
320 ctx.dind_buf = ctx.ind_buf + fs->blocksize;
321 ctx.tind_buf = ctx.dind_buf + fs->blocksize;
328 ctx.errcode = ext2fs_read_inode(fs, ino, &inode);
329 if (ctx.errcode)
333 ret |= (*ctx.func)(fs,
345 for (i = 0; i < EXT2_NDIR_BLOCKS ; i++, ctx.bcount++) {
347 ret |= (*ctx.func)(fs, &blocks[i],
348 ctx.bcount, 0, i, priv_data);
355 0, EXT2_IND_BLOCK, &ctx);
359 ctx.bcount += limit;
362 0, EXT2_DIND_BLOCK, &ctx);
366 ctx.bcount += limit * limit;
369 0, EXT2_TIND_BLOCK, &ctx);
389 ext2fs_free_mem(&ctx.ind_buf);
391 return (ret & BLOCK_ERROR) ? ctx.errcode : 0;