HomeSort by relevance Sort by last modified time
    Searched refs:io_u (Results 26 - 48 of 48) sorted by null

12

  /external/fio/
ioengines.c 191 int td_io_prep(struct thread_data *td, struct io_u *io_u)
193 dprint_io_u(io_u, "prep");
194 fio_ro_check(td, io_u);
196 lock_file(td, io_u->file, io_u->ddir);
199 int ret = td->io_ops->prep(td, io_u);
201 dprint(FD_IO, "->prep(%p)=%d\n", io_u, ret);
203 unlock_file(td, io_u->file);
253 int td_io_queue(struct thread_data *td, struct io_u *io_u
    [all...]
backend.c 259 struct io_u *io_u; local
262 io_u_qiter(&td->io_u_all, io_u, i) {
263 if (io_u->flags & IO_U_F_FLIGHT) {
264 r = td->io_ops->cancel(td, io_u);
266 put_io_u(td, io_u);
281 struct io_u *io_u = __get_io_u(td); local
284 if (!io_u)
287 io_u->ddir = DDIR_SYNC
596 struct io_u *io_u; local
878 struct io_u *io_u; local
1126 struct io_u *io_u; local
1147 struct io_u *io_u; local
1411 struct io_u *io_u; local
    [all...]
profile.h 7 * Functions for overriding internal fio io_u functions
13 int (*fill_io_u_off)(struct thread_data *, struct io_u *, unsigned int *);
14 int (*fill_io_u_size)(struct thread_data *, struct io_u *, unsigned int);
io_u_queue.c 6 q->io_us = calloc(nr, sizeof(struct io_u *));
33 ring->ring = calloc(ring->max, sizeof(struct io_u *));
rate-submit.c 15 struct io_u *io_u = container_of(work, struct io_u, work); local
16 const enum fio_ddir ddir = io_u->ddir;
20 dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid());
22 io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
27 ret = td_io_queue(td, io_u);
33 io_u_clear(td, io_u, IO_U_F_FLIGHT);
36 dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid())
    [all...]
iolog.h 193 * When logging io actions, this matches a single sent io_u
226 struct io_u;
227 extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
228 extern void log_io_u(const struct thread_data *, const struct io_u *);
231 extern void log_io_piece(struct thread_data *, struct io_u *);
232 extern void unlog_io_piece(struct thread_data *, struct io_u *);
233 extern void trim_io_piece(struct thread_data *, const struct io_u *);
iolog.c 33 void log_io_u(const struct thread_data *td, const struct io_u *io_u)
38 fprintf(td->iolog_f, "%s %s %llu %lu\n", io_u->file->file_name,
39 io_ddir_name(io_u->ddir),
40 io_u->offset, io_u->buflen);
136 int read_iolog_get(struct thread_data *td, struct io_u *io_u)
157 io_u->ddir = ipo->ddir;
159 io_u->offset = ipo->offset
    [all...]
verify-state.h 58 struct io_u;
64 extern int verify_state_should_stop(struct thread_data *, struct io_u *);
stat.h 305 extern void add_iops_sample(struct thread_data *, struct io_u *,
307 extern void add_bw_sample(struct thread_data *, struct io_u *,
333 uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u);
fio.h 43 #include "io_u.h"
237 * IO engine hooks, contains everything needed to submit an io_u
250 * Queue depth of io_u's that fio MIGHT do
255 * io_u's about to be committed
260 * io_u's submitted but not completed yet
265 * List of free and busy io_u's
502 static inline void fio_ro_check(const struct thread_data *td, struct io_u *io_u)
504 assert(!(io_u->ddir == DDIR_WRITE && !td_write(td)));
651 extern int io_queue_event(struct thread_data *td, struct io_u *io_u, int *ret
    [all...]
stat.c     [all...]
Makefile 42 eta.c verify.c memory.c io_u.c parse.c mutex.c options.c \
options.c 28 .len = FIELD_SIZE(struct io_u *, offset),
    [all...]
  /external/fio/engines/
null.c 21 struct io_u **io_us;
26 static struct io_u *fio_null_event(struct thread_data *td, int event)
63 static int fio_null_queue(struct thread_data *td, struct io_u *io_u)
67 fio_ro_check(td, io_u);
74 nd->io_us[nd->queued++] = io_u;
101 nd->io_us = (struct io_u **) malloc(td->o.iodepth * sizeof(struct io_u *));
102 memset(nd->io_us, 0, td->o.iodepth * sizeof(struct io_u *));
binject.c 24 struct io_u **events;
35 static void binject_buc_init(struct binject_data *bd, struct io_u *io_u)
37 struct b_user_cmd *buc = &io_u->buc;
42 buc->buf = (unsigned long) io_u->xfer_buf;
43 buc->len = io_u->xfer_buflen;
44 buc->offset = io_u->offset;
45 buc->usr_ptr = (unsigned long) io_u;
150 bd->events[ev_index] = (struct io_u *) (unsigned long) buc->usr_ptr;
173 static int fio_binject_doio(struct thread_data *td, struct io_u *io_u
    [all...]
libhdfs.c 123 static int fio_hdfsio_prep(struct thread_data *td, struct io_u *io_u)
132 f_id = floor(io_u->offset / options-> chunck_size);
147 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_SYNC) {
149 } else if (io_u->ddir == DDIR_WRITE) {
156 get_chunck_name(fname, io_u->file->file_name, f_id);
168 static int fio_hdfsio_queue(struct thread_data *td, struct io_u *io_u)
175 offset = io_u->offset % options->chunck_size
    [all...]
net.c 332 static int fio_netio_prep(struct thread_data *td, struct io_u *io_u)
342 if ((o->listen && io_u->ddir == DDIR_WRITE) ||
343 (!o->listen && io_u->ddir == DDIR_READ)) {
377 static int splice_in(struct thread_data *td, struct io_u *io_u)
381 return splice_io_u(io_u->file->fd, nd->pipes[1], io_u->xfer_buflen);
387 static int splice_out(struct thread_data *td, struct io_u *io_u,
    [all...]
fusion-aw.c 37 static int queue(struct thread_data *td, struct io_u *io_u)
39 struct fas_data *d = FILE_ENG_DATA(io_u->file);
42 if (io_u->ddir != DDIR_WRITE) {
43 td_vmsg(td, EINVAL, "only writes supported", "io_u->ddir");
48 if ((size_t) io_u->xfer_buf % d->xfer_buf_align) {
49 td_vmsg(td, EINVAL, "unaligned data buffer", "io_u->xfer_buf");
54 if (io_u->xfer_buflen % d->xfer_buflen_align) {
55 td_vmsg(td, EINVAL, "unaligned data size", "io_u->xfer_buflen");
60 if (io_u->xfer_buflen > d->xfer_buflen_max)
    [all...]
sg.c 30 struct io_u **events;
39 struct io_u *io_u, int fs)
41 struct sgio_cmd *sc = &sd->cmds[io_u->index];
51 hdr->pack_id = io_u->index;
52 hdr->usr_ptr = io_u;
55 hdr->dxferp = io_u->xfer_buf;
56 hdr->dxfer_len = io_u->xfer_buflen;
185 struct io_u *io_u; local
    [all...]
pmemblk.c 345 static int fio_pmemblk_queue(struct thread_data *td, struct io_u *io_u)
347 struct fio_file *f = io_u->file;
354 fio_ro_check(td, io_u);
356 switch (io_u->ddir) {
359 off = io_u->offset;
360 len = io_u->xfer_buflen;
362 io_u->error = EINVAL;
370 io_u->error = 0;
371 buf = io_u->xfer_buf
    [all...]
rdma.c 182 struct io_u **io_us_queued;
184 struct io_u **io_us_flight;
186 struct io_u **io_us_completed;
562 static int fio_rdmaio_prep(struct thread_data *td, struct io_u *io_u)
567 r_io_u_d = io_u->engine_data;
572 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
573 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey;
580 r_io_u_d->rdma_sgl.addr = (uint64_t) (unsigned long)io_u->buf;
581 r_io_u_d->rdma_sgl.lkey = io_u->mr->lkey
608 struct io_u *io_u; local
814 struct io_u *io_u = io_us[i]; local
1287 struct io_u *io_u = td->io_u_freelist.io_us[i]; local
    [all...]
gfapi.h 13 struct io_u **aio_events;
cpu.c 56 static int fio_cpuio_queue(struct thread_data *td, struct io_u fio_unused *io_u)

Completed in 1780 milliseconds

12