nvmf/rdma: Use RDMA provider API to send WRs

Change-Id: I9bd6956d27716ae95abea0fd78adebaa9b288edb
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1660
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Seth Howell <seth.howell@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Alexey Marchuk 2020-04-02 10:24:55 +03:00 committed by Tomasz Zawadzki
parent a12530d6d2
commit bbb493ce0c
3 changed files with 13 additions and 55 deletions

View File

@ -373,9 +373,6 @@ struct spdk_nvmf_rdma_qpair {
/* The maximum number of SGEs per WR on the recv queue */
uint32_t max_recv_sge;
/* The list of pending send requests for a transfer */
struct spdk_nvmf_send_wr_list sends_to_post;
struct spdk_nvmf_rdma_resources *resources;
STAILQ_HEAD(, spdk_nvmf_rdma_request) pending_rdma_read_queue;
@ -997,9 +994,6 @@ nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
spdk_trace_record(TRACE_RDMA_QP_CREATE, 0, 0, (uintptr_t)rqpair->cm_id, 0);
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "New RDMA Connection: %p\n", qpair);
rqpair->sends_to_post.first = NULL;
rqpair->sends_to_post.last = NULL;
if (rqpair->poller->srq == NULL) {
rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
transport = &rtransport->transport;
@ -1058,28 +1052,6 @@ nvmf_rdma_qpair_queue_recv_wrs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_r
}
}
/* Append the given send wr structure to the qpair's outstanding sends list. */
/* This function accepts either a single wr or the first wr in a linked list. */
static void
nvmf_rdma_qpair_queue_send_wrs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_send_wr *first)
{
struct ibv_send_wr *last;
last = first;
while (last->next != NULL) {
last = last->next;
}
if (rqpair->sends_to_post.first == NULL) {
rqpair->sends_to_post.first = first;
rqpair->sends_to_post.last = last;
STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_send, rqpair, send_link);
} else {
rqpair->sends_to_post.last->next = first;
rqpair->sends_to_post.last = last;
}
}
static int
request_transfer_in(struct spdk_nvmf_request *req)
{
@ -1094,7 +1066,10 @@ request_transfer_in(struct spdk_nvmf_request *req)
assert(req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
assert(rdma_req != NULL);
nvmf_rdma_qpair_queue_send_wrs(rqpair, &rdma_req->data.wr);
if (spdk_rdma_qp_queue_send_wrs(rqpair->rdma_qp, &rdma_req->data.wr)) {
STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_send, rqpair, send_link);
}
rqpair->current_read_depth += rdma_req->num_outstanding_data_wr;
rqpair->current_send_depth += rdma_req->num_outstanding_data_wr;
return 0;
@ -1145,7 +1120,10 @@ request_transfer_out(struct spdk_nvmf_request *req, int *data_posted)
*data_posted = 1;
num_outstanding_data_wr = rdma_req->num_outstanding_data_wr;
}
nvmf_rdma_qpair_queue_send_wrs(rqpair, first);
if (spdk_rdma_qp_queue_send_wrs(rqpair->rdma_qp, first)) {
STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_send, rqpair, send_link);
}
/* +1 for the rsp wr */
rqpair->current_send_depth += num_outstanding_data_wr + 1;
@ -3743,15 +3721,12 @@ _poller_submit_sends(struct spdk_nvmf_rdma_transport *rtransport,
while (!STAILQ_EMPTY(&rpoller->qpairs_pending_send)) {
rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_send);
assert(rqpair->sends_to_post.first != NULL);
rc = ibv_post_send(rqpair->rdma_qp->qp, rqpair->sends_to_post.first, &bad_wr);
rc = spdk_rdma_qp_flush_send_wrs(rqpair->rdma_qp, &bad_wr);
/* bad wr always points to the first wr that failed. */
if (rc) {
_qp_reset_failed_sends(rtransport, rqpair, bad_wr, rc);
}
rqpair->sends_to_post.first = NULL;
rqpair->sends_to_post.last = NULL;
STAILQ_REMOVE_HEAD(&rpoller->qpairs_pending_send, send_link);
}
}

View File

@ -41,3 +41,7 @@ DEFINE_STUB(spdk_rdma_qp_create, struct spdk_rdma_qp *, (struct rdma_cm_id *cm_i
DEFINE_STUB(spdk_rdma_qp_complete_connect, int, (struct spdk_rdma_qp *spdk_rdma_qp), 0);
DEFINE_STUB_V(spdk_rdma_qp_destroy, (struct spdk_rdma_qp *spdk_rdma_qp));
DEFINE_STUB(spdk_rdma_qp_disconnect, int, (struct spdk_rdma_qp *spdk_rdma_qp), 0);
DEFINE_STUB(spdk_rdma_qp_queue_send_wrs, bool, (struct spdk_rdma_qp *spdk_rdma_qp,
struct ibv_send_wr *first), true);
DEFINE_STUB(spdk_rdma_qp_flush_send_wrs, int, (struct spdk_rdma_qp *spdk_rdma_qp,
struct ibv_send_wr **bad_wr), 0);

View File

@ -647,8 +647,6 @@ test_spdk_nvmf_rdma_request_process(void)
CU_ASSERT(progress == true);
CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
CU_ASSERT(rdma_req->recv == NULL);
CU_ASSERT(rqpair.sends_to_post.first == &rdma_req->data.wr);
CU_ASSERT(rqpair.sends_to_post.last == &rdma_req->rsp.wr);
CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr);
CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr);
/* COMPLETED -> FREE */
@ -671,9 +669,6 @@ test_spdk_nvmf_rdma_request_process(void)
CU_ASSERT(progress == true);
CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
CU_ASSERT(rqpair.sends_to_post.first == &rdma_req->data.wr);
CU_ASSERT(rqpair.sends_to_post.last == &rdma_req->data.wr);
rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL;
STAILQ_INIT(&poller.qpairs_pending_send);
/* READY_TO_EXECUTE -> EXECUTING */
rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
@ -686,8 +681,6 @@ test_spdk_nvmf_rdma_request_process(void)
CU_ASSERT(progress == true);
CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
CU_ASSERT(rdma_req->recv == NULL);
CU_ASSERT(rqpair.sends_to_post.first == &rdma_req->rsp.wr);
CU_ASSERT(rqpair.sends_to_post.last == &rdma_req->rsp.wr);
CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr);
CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr);
/* COMPLETED -> FREE */
@ -714,20 +707,12 @@ test_spdk_nvmf_rdma_request_process(void)
rqpair.current_recv_depth = 1;
nvmf_rdma_request_process(&rtransport, req1);
CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
/* WRITE 1 is the first in batching list */
CU_ASSERT(rqpair.sends_to_post.first == &req1->data.wr);
CU_ASSERT(rqpair.sends_to_post.last == &req1->data.wr);
/* WRITE 2: NEW -> TRANSFERRING_H2C */
rqpair.current_recv_depth = 2;
nvmf_rdma_request_process(&rtransport, req2);
CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
/* WRITE 2 is now also in the batching list */
CU_ASSERT(rqpair.sends_to_post.first->next == &req2->data.wr);
CU_ASSERT(rqpair.sends_to_post.last == &req2->data.wr);
/* Send everything */
rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL;
STAILQ_INIT(&poller.qpairs_pending_send);
/* WRITE 1 completes before WRITE 2 has finished RDMA reading */
@ -739,9 +724,6 @@ test_spdk_nvmf_rdma_request_process(void)
req1->state = RDMA_REQUEST_STATE_EXECUTED;
nvmf_rdma_request_process(&rtransport, req1);
CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING);
CU_ASSERT(rqpair.sends_to_post.first == &req1->rsp.wr);
CU_ASSERT(rqpair.sends_to_post.last == &req1->rsp.wr);
rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL;
STAILQ_INIT(&poller.qpairs_pending_send);
/* WRITE 1: COMPLETED -> FREE */
req1->state = RDMA_REQUEST_STATE_COMPLETED;
@ -758,9 +740,6 @@ test_spdk_nvmf_rdma_request_process(void)
req2->state = RDMA_REQUEST_STATE_EXECUTED;
nvmf_rdma_request_process(&rtransport, req2);
CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING);
CU_ASSERT(rqpair.sends_to_post.first == &req2->rsp.wr);
CU_ASSERT(rqpair.sends_to_post.last == &req2->rsp.wr);
rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL;
STAILQ_INIT(&poller.qpairs_pending_send);
/* WRITE 1: COMPLETED -> FREE */
req2->state = RDMA_REQUEST_STATE_COMPLETED;