Lines Matching refs:rd
194 struct rdmaio_data *rd = td->io_ops_data;
197 if (wc->byte_len != sizeof(rd->recv_buf)) {
203 if (max_bs > ntohl(rd->recv_buf.max_bs)) {
206 ntohl(rd->recv_buf.max_bs), max_bs);
211 if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) ||
212 (rd->rdma_protocol == FIO_RDMA_MEM_READ)) {
216 rd->rmt_nr = ntohl(rd->recv_buf.nr);
218 for (i = 0; i < rd->rmt_nr; i++) {
219 rd->rmt_us[i].buf = ntohll(rd->recv_buf.rmt_us[i].buf);
220 rd->rmt_us[i].rkey = ntohl(rd->recv_buf.rmt_us[i].rkey);
221 rd->rmt_us[i].size = ntohl(rd->recv_buf.rmt_us[i].size);
225 " len %d from peer\n", rd->rmt_us[i].rkey,
226 rd->rmt_us[i].buf, rd->rmt_us[i].size);
235 struct rdmaio_data *rd = td->io_ops_data;
239 rd->rdma_protocol = ntohl(rd->recv_buf.mode);
242 if (rd->rdma_protocol == FIO_RDMA_CHA_SEND)
243 rd->rdma_protocol = FIO_RDMA_CHA_RECV;
246 if (max_bs < ntohl(rd->recv_buf.max_bs)) {
249 ntohl(rd->recv_buf.max_bs), max_bs);
260 struct rdmaio_data *rd = td->io_ops_data;
267 while ((ret = ibv_poll_cq(rd->cq, 1, &wc)) == 1) {
280 if (rd->is_client == 1)
291 for (i = 0; i < rd->io_u_flight_nr; i++) {
292 r_io_u_d = rd->io_us_flight[i]->engine_data;
295 rd->io_us_flight[i]->resid =
296 rd->io_us_flight[i]->buflen
299 rd->io_us_flight[i]->error = 0;
301 rd->io_us_completed[rd->
303 = rd->io_us_flight[i];
304 rd->io_u_completed_nr++;
308 if (i == rd->io_u_flight_nr)
313 rd->io_us_flight[i] =
314 rd->io_us_flight[rd->io_u_flight_nr - 1];
315 rd->io_u_flight_nr--;
326 for (i = 0; i < rd->io_u_flight_nr; i++) {
327 r_io_u_d = rd->io_us_flight[i]->engine_data;
330 rd->io_us_completed[rd->
332 = rd->io_us_flight[i];
333 rd->io_u_completed_nr++;
337 if (i == rd->io_u_flight_nr)
342 rd->io_us_flight[i] =
343 rd->io_us_flight[rd->io_u_flight_nr - 1];
344 rd->io_u_flight_nr--;
354 rd->cq_event_num++;
371 struct rdmaio_data *rd = td->io_ops_data;
376 if (rd->cq_event_num > 0) { /* previous left */
377 rd->cq_event_num--;
382 if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
386 if (ev_cq != rd->cq) {
390 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
399 ibv_ack_cq_events(rd->cq, ret);
401 rd->cq_event_num--;
408 struct rdmaio_data *rd = td->io_ops_data;
412 if (rd->is_client == 0)
413 rd->pd = ibv_alloc_pd(rd->child_cm_id->verbs);
415 rd->pd = ibv_alloc_pd(rd->cm_id->verbs);
417 if (rd->pd == NULL) {
422 if (rd->is_client == 0)
423 rd->channel = ibv_create_comp_channel(rd->child_cm_id->verbs);
425 rd->channel = ibv_create_comp_channel(rd->cm_id->verbs);
426 if (rd->channel == NULL) {
434 if (rd->is_client == 0)
435 rd->cq = ibv_create_cq(rd->child_cm_id->verbs,
436 qp_depth, rd, rd->channel, 0);
438 rd->cq = ibv_create_cq(rd->cm_id->verbs,
439 qp_depth, rd, rd->channel, 0);
440 if (rd->cq == NULL) {
445 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
457 init_attr.send_cq = rd->cq;
458 init_attr.recv_cq = rd->cq;
460 if (rd->is_client == 0) {
461 if (rdma_create_qp(rd->child_cm_id, rd->pd, &init_attr) != 0) {
465 rd->qp = rd->child_cm_id->qp;
467 if (rdma_create_qp(rd->cm_id, rd->pd, &init_attr) != 0) {
471 rd->qp = rd->cm_id->qp;
477 ibv_destroy_cq(rd->cq);
479 ibv_destroy_comp_channel(rd->channel);
481 ibv_dealloc_pd(rd->pd);
488 struct rdmaio_data *rd = td->io_ops_data;
490 rd->recv_mr = ibv_reg_mr(rd->pd, &rd->recv_buf, sizeof(rd->recv_buf),
492 if (rd->recv_mr == NULL) {
497 rd->send_mr = ibv_reg_mr(rd->pd, &rd->send_buf, sizeof(rd->send_buf),
499 if (rd->send_mr == NULL) {
501 ibv_dereg_mr(rd->recv_mr);
507 rd->recv_sgl.addr = (uint64_t) (unsigned long)&rd->recv_buf;
508 rd->recv_sgl.length = sizeof(rd->recv_buf);
509 rd->recv_sgl.lkey = rd->recv_mr->lkey;
510 rd->rq_wr.sg_list = &rd->recv_sgl;
511 rd->rq_wr.num_sge = 1;
512 rd->rq_wr.wr_id = FIO_RDMA_MAX_IO_DEPTH;
515 rd->send_sgl.addr = (uint64_t) (unsigned long)&rd->send_buf;
516 rd->send_sgl.length = sizeof(rd->send_buf);
517 rd->send_sgl.lkey = rd->send_mr->lkey;
519 rd->sq_wr.opcode = IBV_WR_SEND;
520 rd->sq_wr.send_flags = IBV_SEND_SIGNALED;
521 rd->sq_wr.sg_list = &rd->send_sgl;
522 rd->sq_wr.num_sge = 1;
523 rd->sq_wr.wr_id = FIO_RDMA_MAX_IO_DEPTH;
532 struct rdmaio_data *rd = td->io_ops_data;
551 rd->child_cm_id = event->id;
564 struct rdmaio_data *rd = td->io_ops_data;
569 switch (rd->rdma_protocol) {
598 log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol);
607 struct rdmaio_data *rd = td->io_ops_data;
611 io_u = rd->io_us_completed[0];
612 for (i = 0; i < rd->io_u_completed_nr - 1; i++)
613 rd->io_us_completed[i] = rd->io_us_completed[i + 1];
615 rd->io_u_completed_nr--;
625 struct rdmaio_data *rd = td->io_ops_data;
632 switch (rd->rdma_protocol) {
646 log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol);
650 if (rd->cq_event_num > 0) { /* previous left */
651 rd->cq_event_num--;
656 if (ibv_get_cq_event(rd->channel, &ev_cq, &ev_ctx) != 0) {
660 if (ev_cq != rd->cq) {
664 if (ibv_req_notify_cq(rd->cq, 0) != 0) {
673 ibv_ack_cq_events(rd->cq, ret);
679 rd->cq_event_num -= r;
687 struct rdmaio_data *rd = td->io_ops_data;
701 switch (rd->rdma_protocol) {
705 index = __rand(&rd->rand_state) % rd->rmt_nr;
707 r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey;
709 rd->rmt_us[index].buf;
715 index = __rand(&rd->rand_state) % rd->rmt_nr;
717 r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey;
719 rd->rmt_us[index].buf;
729 rd->rdma_protocol);
733 if (ibv_post_send(rd->qp, &r_io_u_d->sq_wr, &bad_wr) != 0) {
750 struct rdmaio_data *rd = td->io_ops_data;
756 if (rd->rdma_protocol == FIO_RDMA_CHA_RECV) {
760 if (ibv_post_recv(rd->qp, &r_io_u_d->rq_wr, &bad_wr) !=
766 } else if ((rd->rdma_protocol == FIO_RDMA_MEM_READ)
767 || (rd->rdma_protocol == FIO_RDMA_MEM_WRITE)) {
769 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
786 struct rdmaio_data *rd = td->io_ops_data;
790 if (rd->io_u_queued_nr == (int)td->o.iodepth)
793 rd->io_us_queued[rd->io_u_queued_nr] = io_u;
794 rd->io_u_queued_nr++;
804 struct rdmaio_data *rd = td->io_ops_data;
817 rd->io_us_flight[rd->io_u_flight_nr] = io_u;
818 rd->io_u_flight_nr++;
827 struct rdmaio_data *rd = td->io_ops_data;
831 if (!rd->io_us_queued)
834 io_us = rd->io_us_queued;
837 if (rd->is_client)
838 ret = fio_rdmaio_send(td, io_us, rd->io_u_queued_nr);
839 else if (!rd->is_client)
840 ret = fio_rdmaio_recv(td, io_us, rd->io_u_queued_nr);
847 rd->io_u_queued_nr -= ret;
852 } while (rd->io_u_queued_nr);
859 struct rdmaio_data *rd = td->io_ops_data;
868 if (rdma_connect(rd->cm_id, &conn_param) != 0) {
874 (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) {
880 rd->send_buf.mode = htonl(rd->rdma_protocol);
881 rd->send_buf.nr = htonl(td->o.iodepth);
883 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
910 struct rdmaio_data *rd = td->io_ops_data;
920 if (rdma_accept(rd->child_cm_id, &conn_param) != 0) {
926 (td, rd->cm_channel, RDMA_CM_EVENT_ESTABLISHED) != 0) {
934 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
955 struct rdmaio_data *rd = td->io_ops_data;
964 if ((rd->is_client == 1) && ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE)
965 || (rd->rdma_protocol ==
967 if (ibv_post_send(rd->qp, &rd->sq_wr, &bad_wr) != 0) {
976 if (rd->is_client == 1)
977 rdma_disconnect(rd->cm_id);
979 rdma_disconnect(rd->child_cm_id);
981 rdma_disconnect(rd->cm_id);
986 if (get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_DISCONNECTED) != 0) {
992 ibv_destroy_cq(rd->cq);
993 ibv_destroy_qp(rd->qp);
995 if (rd->is_client == 1)
996 rdma_destroy_id(rd->cm_id);
998 rdma_destroy_id(rd->child_cm_id);
999 rdma_destroy_id(rd->cm_id);
1002 ibv_destroy_comp_channel(rd->channel);
1003 ibv_dealloc_pd(rd->pd);
1011 struct rdmaio_data *rd = td->io_ops_data;
1015 rd->addr.sin_family = AF_INET;
1016 rd->addr.sin_port = htons(port);
1018 if (inet_aton(host, &rd->addr.sin_addr) != 1) {
1027 memcpy(&rd->addr.sin_addr, hent->h_addr, 4);
1031 err = rdma_resolve_addr(rd->cm_id, NULL, (struct sockaddr *)&rd->addr, 2000);
1037 err = get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_ADDR_RESOLVED);
1044 err = rdma_resolve_route(rd->cm_id, 2000);
1050 err = get_next_channel_event(td, rd->cm_channel, RDMA_CM_EVENT_ROUTE_RESOLVED);
1064 err = ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr);
1075 struct rdmaio_data *rd = td->io_ops_data;
1081 rd->addr.sin_family = AF_INET;
1082 rd->addr.sin_addr.s_addr = htonl(INADDR_ANY);
1083 rd->addr.sin_port = htons(port);
1086 if (rdma_bind_addr(rd->cm_id, (struct sockaddr *)&rd->addr) != 0) {
1091 if (rdma_listen(rd->cm_id, 3) != 0) {
1100 (td, rd->cm_channel, RDMA_CM_EVENT_CONNECT_REQUEST) != 0) {
1112 if (ibv_post_recv(rd->qp, &rd->rq_wr, &bad_wr) != 0) {
1210 struct rdmaio_data *rd = td->io_ops_data;
1236 rd->rdma_protocol = o->verb;
1237 rd->cq_event_num = 0;
1239 rd->cm_channel = rdma_create_event_channel();
1240 if (!rd->cm_channel) {
1245 ret = rdma_create_id(rd->cm_channel, &rd->cm_id, rd, RDMA_PS_TCP);
1251 if ((rd->rdma_protocol == FIO_RDMA_MEM_WRITE) ||
1252 (rd->rdma_protocol == FIO_RDMA_MEM_READ)) {
1253 rd->rmt_us =
1255 memset(rd->rmt_us, 0,
1257 rd->rmt_nr = 0;
1260 rd->io_us_queued = malloc(td->o.iodepth * sizeof(struct io_u *));
1261 memset(rd->io_us_queued, 0, td->o.iodepth * sizeof(struct io_u *));
1262 rd->io_u_queued_nr = 0;
1264 rd->io_us_flight = malloc(td->o.iodepth * sizeof(struct io_u *));
1265 memset(rd->io_us_flight, 0, td->o.iodepth * sizeof(struct io_u *));
1266 rd->io_u_flight_nr = 0;
1268 rd->io_us_completed = malloc(td->o.iodepth * sizeof(struct io_u *));
1269 memset(rd->io_us_completed, 0, td->o.iodepth * sizeof(struct io_u *));
1270 rd->io_u_completed_nr = 0;
1273 rd->is_client = 0;
1275 /* server rd->rdma_buf_len will be setup after got request */
1278 rd->is_client = 1;
1283 rd->send_buf.max_bs = htonl(max_bs);
1293 io_u->mr = ibv_reg_mr(rd->pd, io_u->buf, max_bs,
1302 rd->send_buf.rmt_us[i].buf =
1304 rd->send_buf.rmt_us[i].rkey = htonl(io_u->mr->rkey);
1305 rd->send_buf.rmt_us[i].size = htonl(max_bs);
1312 rd->send_buf.nr = htonl(i);
1319 struct rdmaio_data *rd = td->io_ops_data;
1321 if (rd)
1322 free(rd);
1327 struct rdmaio_data *rd;
1336 rd = malloc(sizeof(*rd));
1338 memset(rd, 0, sizeof(*rd));
1339 init_rand_seed(&rd->rand_state, (unsigned int) GOLDEN_RATIO_PRIME, 0);
1340 td->io_ops_data = rd;