Home | History | Annotate | Download | only in engines
      1 /*
      2  * glusterfs engine
      3  *
      4  * IO engine using Glusterfs's gfapi async interface
      5  *
      6  */
      7 #include "gfapi.h"
      8 #define NOT_YET 1
      9 struct fio_gf_iou {
     10 	struct io_u *io_u;
     11 	int io_complete;
     12 };
     13 
     14 static struct io_u *fio_gf_event(struct thread_data *td, int event)
     15 {
     16 	struct gf_data *gf_data = td->io_ops->data;
     17 
     18 	dprint(FD_IO, "%s\n", __FUNCTION__);
     19 	return gf_data->aio_events[event];
     20 }
     21 
     22 static int fio_gf_getevents(struct thread_data *td, unsigned int min,
     23 			    unsigned int max, const struct timespec *t)
     24 {
     25 	struct gf_data *g = td->io_ops->data;
     26 	unsigned int events = 0;
     27 	struct io_u *io_u;
     28 	int i;
     29 
     30 	dprint(FD_IO, "%s\n", __FUNCTION__);
     31 	do {
     32 		io_u_qiter(&td->io_u_all, io_u, i) {
     33 			struct fio_gf_iou *io;
     34 
     35 			if (!(io_u->flags & IO_U_F_FLIGHT))
     36 				continue;
     37 
     38 			io = io_u->engine_data;
     39 			if (io->io_complete) {
     40 				io->io_complete = 0;
     41 				g->aio_events[events] = io_u;
     42 				events++;
     43 
     44 				if (events >= max)
     45 					break;
     46 			}
     47 
     48 		}
     49 		if (events < min)
     50 			usleep(100);
     51 		else
     52 			break;
     53 
     54 	} while (1);
     55 
     56 	return events;
     57 }
     58 
     59 static void fio_gf_io_u_free(struct thread_data *td, struct io_u *io_u)
     60 {
     61 	struct fio_gf_iou *io = io_u->engine_data;
     62 
     63 	if (io) {
     64 		if (io->io_complete)
     65 			log_err("incomplete IO found.\n");
     66 		io_u->engine_data = NULL;
     67 		free(io);
     68 	}
     69 }
     70 
     71 static int fio_gf_io_u_init(struct thread_data *td, struct io_u *io_u)
     72 {
     73 	dprint(FD_FILE, "%s\n", __FUNCTION__);
     74 
     75 	if (!io_u->engine_data) {
     76 		struct fio_gf_iou *io;
     77 
     78 		io = malloc(sizeof(struct fio_gf_iou));
     79 		if (!io) {
     80 			td_verror(td, errno, "malloc");
     81 			return 1;
     82 		}
     83 		io->io_complete = 0;
     84 		io->io_u = io_u;
     85 		io_u->engine_data = io;
     86 	}
     87 	return 0;
     88 }
     89 
     90 static void gf_async_cb(glfs_fd_t * fd, ssize_t ret, void *data)
     91 {
     92 	struct io_u *io_u = data;
     93 	struct fio_gf_iou *iou = io_u->engine_data;
     94 
     95 	dprint(FD_IO, "%s ret %lu\n", __FUNCTION__, ret);
     96 	iou->io_complete = 1;
     97 }
     98 
     99 static int fio_gf_async_queue(struct thread_data fio_unused * td,
    100 			      struct io_u *io_u)
    101 {
    102 	struct gf_data *g = td->io_ops->data;
    103 	int r;
    104 
    105 	dprint(FD_IO, "%s op %s\n", __FUNCTION__, io_ddir_name(io_u->ddir));
    106 
    107 	fio_ro_check(td, io_u);
    108 
    109 	if (io_u->ddir == DDIR_READ)
    110 		r = glfs_pread_async(g->fd, io_u->xfer_buf, io_u->xfer_buflen,
    111 				     io_u->offset, 0, gf_async_cb, io_u);
    112 	else if (io_u->ddir == DDIR_WRITE)
    113 		r = glfs_pwrite_async(g->fd, io_u->xfer_buf, io_u->xfer_buflen,
    114 				      io_u->offset, 0, gf_async_cb, io_u);
    115 #if defined(CONFIG_GF_TRIM)
    116 	else if (io_u->ddir == DDIR_TRIM)
    117 		r = glfs_discard_async(g->fd, io_u->offset, io_u->xfer_buflen,
    118 				       gf_async_cb, io_u);
    119 #endif
    120 	else if (io_u->ddir == DDIR_DATASYNC)
    121 		r = glfs_fdatasync_async(g->fd, gf_async_cb, io_u);
    122 	else if (io_u->ddir == DDIR_SYNC)
    123 		r = glfs_fsync_async(g->fd, gf_async_cb, io_u);
    124 	else
    125 		r = EINVAL;
    126 
    127 	if (r) {
    128 		log_err("glfs queue failed.\n");
    129 		io_u->error = r;
    130 		goto failed;
    131 	}
    132 	return FIO_Q_QUEUED;
    133 
    134 failed:
    135 	io_u->error = r;
    136 	td_verror(td, io_u->error, "xfer");
    137 	return FIO_Q_COMPLETED;
    138 }
    139 
    140 int fio_gf_async_setup(struct thread_data *td)
    141 {
    142 	struct gf_data *g;
    143 	int r;
    144 
    145 #if defined(NOT_YET)
    146 	log_err("the async interface is still very experimental...\n");
    147 #endif
    148 	r = fio_gf_setup(td);
    149 	if (r)
    150 		return r;
    151 
    152 	td->o.use_thread = 1;
    153 	g = td->io_ops->data;
    154 	g->aio_events = calloc(td->o.iodepth, sizeof(struct io_u *));
    155 	if (!g->aio_events) {
    156 		r = -ENOMEM;
    157 		fio_gf_cleanup(td);
    158 		return r;
    159 	}
    160 
    161 	return r;
    162 }
    163 
    164 static struct ioengine_ops ioengine = {
    165 	.name = "gfapi_async",
    166 	.version = FIO_IOOPS_VERSION,
    167 	.init = fio_gf_async_setup,
    168 	.cleanup = fio_gf_cleanup,
    169 	.queue = fio_gf_async_queue,
    170 	.open_file = fio_gf_open_file,
    171 	.close_file = fio_gf_close_file,
    172 	.unlink_file = fio_gf_unlink_file,
    173 	.get_file_size = fio_gf_get_file_size,
    174 	.getevents = fio_gf_getevents,
    175 	.event = fio_gf_event,
    176 	.io_u_init = fio_gf_io_u_init,
    177 	.io_u_free = fio_gf_io_u_free,
    178 	.options = gfapi_options,
    179 	.option_struct_size = sizeof(struct gf_options),
    180 	.flags = FIO_DISKLESSIO,
    181 };
    182 
    183 static void fio_init fio_gf_register(void)
    184 {
    185 	register_ioengine(&ioengine);
    186 }
    187 
    188 static void fio_exit fio_gf_unregister(void)
    189 {
    190 	unregister_ioengine(&ioengine);
    191 }
    192