Home | History | Annotate | Download | only in engines

Lines Matching refs:rd

121 	struct rdmaio_data *rd = td->io_ops->data;
123 if (wc->byte_len != sizeof(rd->recv_buf)) {
129 if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) ||
130 (rd->rdma_protocol == FIO_RDMA_MEM_READ)) {
134 rd->rmt_nr = ntohl(rd->recv_buf.nr);
136 for (i = 0; i < rd->rmt_nr; i++) {
137 rd->rmt_us[i].buf = ntohll(rd->recv_buf.rmt_us[i].buf);
138 rd->rmt_us[i].rkey = ntohl(rd->recv_buf.rmt_us[i].rkey);
139 rd->rmt_us[i].size = ntohl(rd->recv_buf.rmt_us[i].size);
143 " len %d from peer\n", rd->rmt_us[i].rkey,
144 rd->rmt_us[i].buf, rd->rmt_us[i].size);
153 struct rdmaio_data *rd = td->io_ops->data;
156 rd->rdma_protocol = ntohl(rd->recv_buf.mode);
159 if (rd->rdma_protocol == FIO_RDMA_CHA_SEND)
160 rd->rdma_protocol = FIO_RDMA_CHA_RECV;
168 struct rdmaio_data *rd = td->io_ops->data;
175 while ((ret = ibv_poll_cq(rd->cq, 1, &wc)) == 1) {
188 if (rd->is_client == 1)
196 for (i = 0; i < rd->io_u_flight_nr; i++) {
197 r_io_u_d = rd->io_us_flight[i]->engine_data;
200 rd->io_us_flight[i]->resid =
201 rd->io_us_flight[i]->buflen
204 rd->io_us_flight[i]->error = 0;
206 rd->io_us_completed[rd->
208 = rd->io_us_flight[i];
209 rd->io_u_completed_nr++;
213 if (i == rd->io_u_flight_nr)
218 rd->io_us_flight[i] =
219 rd->io_us_flight[rd->io_u_flight_nr - 1];
220 rd->io_u_flight_nr--;
231 for (i = 0; i < rd->io_u_flight_nr; i++) {
232 r_io_u_d = rd->io_us_flight[i]->engine_data;
235 rd->io_us_completed[rd->
237 = rd->io_us_flight[i];
238 rd->io_u_completed_nr++;
242 if (i == rd->io_u_flight_nr)
247 rd->io_us_flight[i] =
248 rd->io_us_flight[rd->io_u_flight_nr - 1];
249 rd->io_u_flight_nr--;
259 rd->cq_event_num++;
275 struct rdmaio_data *rd = td->io_ops->data;
280 if (rd->cq_event_num > 0) { /* previous left */
281 rd->cq_event_num--;
286 if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
290 if (ev_cq != rd->cq) {
294 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
303 ibv_ack_cq_events(rd->cq, ret);
305 rd->cq_event_num--;
312 struct rdmaio_data *rd = td->io_ops->data;
316 if (rd->is_client == 0)
317 rd->pd = ibv_alloc_pd(rd->child_cm_id->verbs);
319 rd->pd = ibv_alloc_pd(rd->cm_id->verbs);
321 if (rd->pd == NULL) {
326 if (rd->is_client == 0)
327 rd->channel = ibv_create_comp_channel(rd->child_cm_id->verbs);
329 rd->channel = ibv_create_comp_channel(rd->cm_id->verbs);
330 if (rd->channel == NULL) {
338 if (rd->is_client == 0)
339 rd->cq = ibv_create_cq(rd->child_cm_id->verbs,
340 qp_depth, rd, rd->channel, 0);
342 rd->cq = ibv_create_cq(rd->cm_id->verbs,
343 qp_depth, rd, rd->channel, 0);
344 if (rd->cq == NULL) {
349 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
361 init_attr.send_cq = rd->cq;
362 init_attr.recv_cq = rd->cq;
364 if (rd->is_client == 0) {
365 if (rdma_create_qp(rd->child_cm_id, rd->pd, &init_attr) != 0) {
369 rd->qp = rd->child_cm_id->qp;
371 if (rdma_create_qp(rd->cm_id, rd->pd, &init_attr) != 0) {
375 rd->qp = rd->cm_id->qp;
381 ibv_destroy_cq(rd->cq);
383 ibv_destroy_comp_channel(rd->channel);
385 ibv_dealloc_pd(rd->pd);
392 struct rdmaio_data *rd = td->io_ops->data;
394 rd->recv_mr = ibv_reg_mr(rd->pd, &rd->recv_buf, sizeof(rd->recv_buf),
396 if (rd->recv_mr == NULL) {
401 rd->send_mr = ibv_reg_mr(rd->pd, &rd->send_buf, sizeof(rd->send_buf),
403 if (rd->send_mr == NULL) {
405 ibv_dereg_mr(rd->recv_mr);
411 rd->recv_sgl.addr = (uint64_t) (unsigned long)&rd->recv_buf;
412 rd->recv_sgl.length = sizeof(rd->recv_buf);
413 rd->recv_sgl.lkey = rd->recv_mr->lkey;
414 rd->rq_wr.sg_list = &rd->recv_sgl;
415 rd->rq_wr.num_sge = 1;
416 rd->rq_wr.wr_id = FIO_RDMA_MAX_IO_DEPTH;
419 rd->send_sgl.addr = (uint64_t) (unsigned long)&rd->send_buf;
420 rd->send_sgl.length = sizeof(rd->send_buf);
421 rd->send_sgl.lkey = rd->send_mr->lkey;
423 rd->sq_wr.opcode = IBV_WR_SEND;
424 rd->sq_wr.send_flags = IBV_SEND_SIGNALED;
425 rd->sq_wr.sg_list = &rd->send_sgl;
426 rd->sq_wr.num_sge = 1;
427 rd->sq_wr.wr_id = FIO_RDMA_MAX_IO_DEPTH;
436 struct rdmaio_data *rd = td->io_ops->data;
455 rd->child_cm_id = event->id;
468 struct rdmaio_data *rd = td->io_ops->data;
473 switch (rd->rdma_protocol) {
502 log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol);
511 struct rdmaio_data *rd = td->io_ops->data;
515 io_u = rd->io_us_completed[0];
516 for (i = 0; i < rd->io_u_completed_nr - 1; i++)
517 rd->io_us_completed[i] = rd->io_us_completed[i + 1];
519 rd->io_u_completed_nr--;
529 struct rdmaio_data *rd = td->io_ops->data;
536 switch (rd->rdma_protocol) {
550 log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol);
554 if (rd->cq_event_num > 0) { /* previous left */
555 rd->cq_event_num--;
560 if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
564 if (ev_cq != rd->cq) {
568 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
577 ibv_ack_cq_events(rd->cq, ret);
583 rd->cq_event_num -= r;
591 struct rdmaio_data *rd = td->io_ops->data;
605 switch (rd->rdma_protocol) {
609 index = __rand(&rd->rand_state) % rd->rmt_nr;
611 r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey;
613 rd->rmt_us[index].buf;
619 index = __rand(&rd->rand_state) % rd->rmt_nr;
621 r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey;
623 rd->rmt_us[index].buf;
633 rd->rdma_protocol);
637 if (ibv_post_send(rd->qp, &r_io_u_d->sq_wr, &bad_wr) != 0) {
654 struct rdmaio_data *rd = td->io_ops->data;
660 if (rd->rdma_protocol == FIO_RDMA_CHA_RECV) {
664 if (ibv_post_recv(rd->qp, &r_io_u_d->rq_wr, &bad_wr) !=
670 } else if ((rd->rdma_protocol == FIO_RDMA_MEM_READ)
671 || (rd->rdma_protocol == FIO_RDMA_MEM_WRITE)) {
673 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
690 struct rdmaio_data *rd = td->io_ops->data;
694 if (rd->io_u_queued_nr == (int)td->o.iodepth)
697 rd->io_us_queued[rd->io_u_queued_nr] = io_u;
698 rd->io_u_queued_nr++;
708 struct rdmaio_data *rd = td->io_ops->data;
721 rd->io_us_flight[rd->io_u_flight_nr] = io_u;
722 rd->io_u_flight_nr++;
731 struct rdmaio_data *rd = td->io_ops->data;
735 if (!rd->io_us_queued)
738 io_us = rd->io_us_queued;
741 if (rd->is_client)
742 ret = fio_rdmaio_send(td, io_us, rd->io_u_queued_nr);
743 else if (!rd->is_client)
744 ret = fio_rdmaio_recv(td, io_us, rd->io_u_queued_nr);
751 rd->io_u_queued_nr -= ret;
756 } while (rd->io_u_queued_nr);
763 struct rdmaio_data *rd = td->io_ops->data;
772 if (rdma_connect(rd->cm_id, &conn_param) != 0) {
778 (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) {
784 rd->send_buf.mode = htonl(rd->rdma_protocol);
785 rd->send_buf.nr = htonl(td->o.iodepth);
787 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
812 struct rdmaio_data *rd = td->io_ops->data;
821 if (rdma_accept(rd->child_cm_id, &conn_param) != 0) {
827 (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) {
835 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
855 struct rdmaio_data *rd = td->io_ops->data;
864 if ((rd->is_client == 1) && ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE)
865 || (rd->rdma_protocol ==
867 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
876 if (rd->is_client == 1)
877 rdma_disconnect(rd->cm_id);
879 rdma_disconnect(rd->child_cm_id);
881 rdma_disconnect(rd->cm_id);
886 if (get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_DISCONNECTED) != 0) {
892 ibv_destroy_cq(rd->cq);
893 ibv_destroy_qp(rd->qp);
895 if (rd->is_client == 1)
896 rdma_destroy_id(rd->cm_id);
898 rdma_destroy_id(rd->child_cm_id);
899 rdma_destroy_id(rd->cm_id);
902 ibv_destroy_comp_channel(rd->channel);
903 ibv_dealloc_pd(rd->pd);
911 struct rdmaio_data *rd = td->io_ops->data;
915 rd->addr.sin_family = AF_INET;
916 rd->addr.sin_port = htons(port);
918 if (inet_aton(host, &rd->addr.sin_addr) != 1) {
927 memcpy(&rd->addr.sin_addr, hent->h_addr, 4);
931 err = rdma_resolve_addr(rd->cm_id, NULL, (struct sockaddr *)&rd->addr, 2000);
937 err = get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_ADDR_RESOLVED);
944 err = rdma_resolve_route(rd->cm_id, 2000);
950 err = get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_ROUTE_RESOLVED);
964 err = ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr);
975 struct rdmaio_data *rd = td->io_ops->data;
978 rd->addr.sin_family = AF_INET;
979 rd->addr.sin_addr.s_addr = htonl(INADDR_ANY);
980 rd->addr.sin_port = htons(port);
983 if (rdma_bind_addr(rd->cm_id, (struct sockaddr *)&rd->addr) != 0) {
988 if (rdma_listen(rd->cm_id, 3) != 0) {
995 (td, rd->cm_channel, RDMA_CM_EVENT_CONNECT_REQUEST) != 0) {
1007 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
1051 struct rdmaio_data *rd = td->io_ops->data;
1097 rd->rdma_protocol = FIO_RDMA_MEM_WRITE;
1100 rd->rdma_protocol = FIO_RDMA_MEM_READ;
1103 rd->rdma_protocol = FIO_RDMA_CHA_SEND;
1107 rd->rdma_protocol = FIO_RDMA_MEM_WRITE;
1109 rd->cq_event_num = 0;
1111 rd->cm_channel = rdma_create_event_channel();
1112 if (!rd->cm_channel) {
1117 ret = rdma_create_id(rd->cm_channel, &rd->cm_id, rd, RDMA_PS_TCP);
1123 if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) ||
1124 (rd->rdma_protocol == FIO_RDMA_MEM_READ)) {
1125 rd->rmt_us =
1127 memset(rd->rmt_us, 0,
1129 rd->rmt_nr = 0;
1132 rd->io_us_queued = malloc(td->o.iodepth * sizeof(struct io_u *));
1133 memset(rd->io_us_queued, 0, td->o.iodepth * sizeof(struct io_u *));
1134 rd->io_u_queued_nr = 0;
1136 rd->io_us_flight = malloc(td->o.iodepth * sizeof(struct io_u *));
1137 memset(rd->io_us_flight, 0, td->o.iodepth * sizeof(struct io_u *));
1138 rd->io_u_flight_nr = 0;
1140 rd->io_us_completed = malloc(td->o.iodepth * sizeof(struct io_u *));
1141 memset(rd->io_us_completed, 0, td->o.iodepth * sizeof(struct io_u *));
1142 rd->io_u_completed_nr = 0;
1145 rd->is_client = 0;
1146 /* server rd->rdma_buf_len will be setup after got request */
1149 rd->is_client = 1;
1162 io_u->mr = ibv_reg_mr(rd->pd, io_u->buf, max_bs,
1171 rd->send_buf.rmt_us[i].buf =
1173 rd->send_buf.rmt_us[i].rkey = htonl(io_u->mr->rkey);
1174 rd->send_buf.rmt_us[i].size = htonl(max_bs);
1181 rd->send_buf.nr = htonl(i);
1191 struct rdmaio_data *rd = td->io_ops->data;
1193 if (rd)
1194 free(rd);
1199 struct rdmaio_data *rd;
1202 rd = malloc(sizeof(*rd));
1204 memset(rd, 0, sizeof(*rd));
1205 init_rand_seed(&rd->rand_state, (unsigned int) GOLDEN_RATIO_PRIME);
1206 td->io_ops->data = rd;