HomeSort by relevance Sort by last modified time
    Searched refs:sector_num (Results 1 - 23 of 23) sorted by null

  /external/qemu/block/
parallels.c 105 static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
110 index = sector_num / s->tracks;
111 offset = sector_num % s->tracks;
119 static int parallels_read(BlockDriverState *bs, int64_t sector_num,
123 int64_t position = seek_to_sector(bs, sector_num);
131 sector_num++;
cow.c 133 static int cow_is_allocated(BlockDriverState *bs, int64_t sector_num,
143 changed = is_bit_set(bs, sector_num);
149 if (is_bit_set(bs, sector_num + *num_same) != changed)
156 static int cow_update_bitmap(BlockDriverState *bs, int64_t sector_num,
163 error = cow_set_bit(bs, sector_num + i);
172 static int cow_read(BlockDriverState *bs, int64_t sector_num,
179 if (cow_is_allocated(bs, sector_num, nb_sectors, &n)) {
181 s->cow_sectors_offset + sector_num * 512,
188 ret = bdrv_read(bs->backing_hd, sector_num, buf, n);
196 sector_num += n
    [all...]
raw.c 35 static int check_write_unsafe(BlockDriverState *bs, int64_t sector_num,
44 if (sector_num == 0 && nb_sectors > 0) {
51 static int raw_read(BlockDriverState *bs, int64_t sector_num,
54 return bdrv_read(bs->file, sector_num, buf, nb_sectors);
69 static int raw_write(BlockDriverState *bs, int64_t sector_num,
72 if (check_write_unsafe(bs, sector_num, buf, nb_sectors)) {
88 return bdrv_write(bs->file, sector_num, buf, nb_sectors);
92 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
95 return bdrv_aio_readv(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
120 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors
    [all...]
raw-posix-aio.h 31 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
40 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
dmg.c 186 uint32_t chunk_num,int sector_num)
188 if(chunk_num>=s->n_chunks || s->sectors[chunk_num]>sector_num ||
189 s->sectors[chunk_num]+s->sectorcounts[chunk_num]<=sector_num)
195 static inline uint32_t search_chunk(BDRVDMGState* s,int sector_num)
201 if(s->sectors[chunk3]>sector_num)
203 else if(s->sectors[chunk3]+s->sectorcounts[chunk3]>sector_num)
211 static inline int dmg_read_chunk(BlockDriverState *bs, int sector_num)
215 if(!is_sector_in_chunk(s,s->current_chunk,sector_num)) {
217 uint32_t chunk = search_chunk(s,sector_num);
267 static int dmg_read(BlockDriverState *bs, int64_t sector_num,
    [all...]
qcow.c 217 static void encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
229 ivec.ll[0] = cpu_to_le64(sector_num);
233 sector_num++;
379 static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
386 cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0);
387 index_in_cluster = sector_num & (s->cluster_sectors - 1);
446 static int qcow_read(BlockDriverState *bs, int64_t sector_num,
454 cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0);
455 index_in_cluster = sector_num & (s->cluster_sectors - 1);
462 ret = bdrv_read(bs->backing_hd, sector_num, buf, n)
491 int64_t sector_num; member in struct:QCowAIOCB
    [all...]
nbd.c 96 static int nbd_read(BlockDriverState *bs, int64_t sector_num,
105 request.from = sector_num * 512;;
126 static int nbd_write(BlockDriverState *bs, int64_t sector_num,
135 request.from = sector_num * 512;;
vpc.c 238 int64_t sector_num, int write)
241 uint64_t offset = sector_num * 512;
268 // sector_num, pagetable_index, pageentry_index,
328 static int64_t alloc_block(BlockDriverState* bs, int64_t sector_num)
336 // Check if sector_num is valid
337 if ((sector_num < 0) || (sector_num > bs->total_sectors))
341 index = (sector_num * 512) / s->block_size;
365 return get_sector_offset(bs, sector_num, 0);
372 static int vpc_read(BlockDriverState *bs, int64_t sector_num,
    [all...]
qcow2.c 296 static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
305 ret = qcow2_get_cluster_offset(bs, sector_num << 9, pnum, &cluster_offset);
315 int64_t sector_num, uint8_t *buf, int nb_sectors)
318 if ((sector_num + nb_sectors) <= bs->total_sectors)
320 if (sector_num >= bs->total_sectors)
323 n1 = bs->total_sectors - sector_num;
330 int64_t sector_num; member in struct:QCowAIOCB
400 qcow2_encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
407 acb->sector_num += acb->cur_nr_sectors;
418 ret = qcow2_get_cluster_offset(bs, acb->sector_num << 9
    [all...]
bochs.c 158 static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
161 int64_t offset = sector_num * 512;
189 static int bochs_read(BlockDriverState *bs, int64_t sector_num,
195 int64_t block_offset = seek_to_sector(bs, sector_num);
204 sector_num++;
cloop.c 131 static int cloop_read(BlockDriverState *bs, int64_t sector_num,
138 uint32_t sector_offset_in_block=((sector_num+i)%s->sectors_per_block),
139 block_num=(sector_num+i)/s->sectors_per_block;
vmdk.c 570 static int vmdk_is_allocated(BlockDriverState *bs, int64_t sector_num,
577 cluster_offset = get_cluster_offset(bs, NULL, sector_num << 9, 0);
578 index_in_cluster = sector_num % s->cluster_sectors;
586 static int vmdk_read(BlockDriverState *bs, int64_t sector_num,
594 cluster_offset = get_cluster_offset(bs, NULL, sector_num << 9, 0);
595 index_in_cluster = sector_num % s->cluster_sectors;
604 ret = bdrv_read(bs->backing_hd, sector_num, buf, n);
615 sector_num += n;
621 static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
630 if (sector_num > bs->total_sectors)
    [all...]
raw-win32.c 108 static int raw_read(BlockDriverState *bs, int64_t sector_num,
115 int64_t offset = sector_num * 512;
129 static int raw_write(BlockDriverState *bs, int64_t sector_num,
136 int64_t offset = sector_num * 512;
raw-posix.c 385 static int raw_read(BlockDriverState *bs, int64_t sector_num,
390 ret = raw_pread(bs, sector_num * BDRV_SECTOR_SIZE, buf,
475 static int raw_write(BlockDriverState *bs, int64_t sector_num,
479 ret = raw_pwrite(bs, sector_num * BDRV_SECTOR_SIZE, buf,
503 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
521 return laio_submit(bs, s->aio_ctx, s->fd, sector_num, qiov,
527 return paio_submit(bs, s->fd, sector_num, qiov, nb_sectors,
532 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
535 return raw_aio_submit(bs, sector_num, qiov, nb_sectors,
540 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors
    [all...]
qcow2-cluster.c 322 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
334 ivec.ll[0] = cpu_to_le64(sector_num);
338 sector_num++;
345 static int qcow_read(BlockDriverState *bs, int64_t sector_num,
355 ret = qcow2_get_cluster_offset(bs, sector_num << 9, &n,
361 index_in_cluster = sector_num & (s->cluster_sectors - 1);
365 n1 = qcow2_backing_read1(bs->backing_hd, sector_num, buf, n);
368 ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);
385 qcow2_encrypt_sectors(s, sector_num, buf, buf, n, 0,
390 sector_num += n
    [all...]
vvfat.c 817 static inline uint32_t sector2cluster(BDRVVVFATState* s,off_t sector_num)
819 return (sector_num-s->faked_sectors)/s->sectors_per_cluster;
827 static inline uint32_t sector_offset_in_cluster(BDRVVVFATState* s,off_t sector_num)
829 return (sector_num-s->first_sectors_number-2*s->sectors_per_fat)%s->sectors_per_cluster;
    [all...]
qcow2.h 170 int64_t sector_num, uint8_t *buf, int nb_sectors);
194 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
  /external/qemu/
dma-helpers.c 44 uint64_t sector_num; member in struct:__anon12291
89 dbs->sector_num += dbs->iov.size / 512;
120 dbs->acb = bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
123 dbs->acb = bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
148 BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
157 dbs->sector_num = sector_num;
block.c 46 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
49 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
55 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
57 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
896 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
899 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
904 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
911 if (bdrv_check_request(bs, sector_num, nb_sectors))
914 return drv->bdrv_read(bs, sector_num, buf, nb_sectors);
917 static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
978 int64_t sector_num; local
1023 int64_t sector_num; local
    [all...]
block.h 77 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
79 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
87 int bdrv_write_sync(BlockDriverState *bs, int64_t sector_num,
112 int sector_num);
113 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
116 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
150 int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
197 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
block_int.h 56 int (*bdrv_read)(BlockDriverState *bs, int64_t sector_num,
58 int (*bdrv_write)(BlockDriverState *bs, int64_t sector_num,
63 int (*bdrv_is_allocated)(BlockDriverState *bs, int64_t sector_num,
69 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
72 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
86 int (*bdrv_write_compressed)(BlockDriverState *bs, int64_t sector_num,
posix-aio-compat.c 569 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
587 acb->aio_offset = sector_num * 512;
592 //trace_paio_submit(acb, opaque, sector_num, nb_sectors, type);
  /external/grub/stage2/
disk_io.c 264 int sector_num = sector; local
268 (*disk_read_func) (sector_num++, byte_offset, length);
274 (*disk_read_func) (sector_num++, 0, buf_geom.sector_size);
277 (*disk_read_func) (sector_num, 0, length);
    [all...]

Completed in 204 milliseconds