nvmf/rdma: Use RDMA provider API to post recv WRs

Change-Id: I782698bb12f8bbe9dc3bf06db8d83c9caf42a8db
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6292
Community-CI: Broadcom CI
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Alexey Marchuk 2020-12-03 16:19:44 +03:00 committed by Tomasz Zawadzki
parent 696e858094
commit d9ff7d09ed
3 changed files with 22 additions and 46 deletions

View File

@ -277,11 +277,6 @@ struct spdk_nvmf_rdma_resource_opts {
bool shared;
};
struct spdk_nvmf_recv_wr_list {
struct ibv_recv_wr *first;
struct ibv_recv_wr *last;
};
struct spdk_nvmf_rdma_resources {
/* Array of size "max_queue_depth" containing RDMA requests. */
struct spdk_nvmf_rdma_request *reqs;
@ -307,9 +302,6 @@ struct spdk_nvmf_rdma_resources {
void *bufs;
struct ibv_mr *bufs_mr;
/* The list of pending recvs to transfer */
struct spdk_nvmf_recv_wr_list recvs_to_post;
/* Receives that are waiting for a request object */
STAILQ_HEAD(, spdk_nvmf_rdma_recv) incoming_queue;
@ -687,7 +679,7 @@ nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
struct spdk_nvmf_rdma_resources *resources;
struct spdk_nvmf_rdma_request *rdma_req;
struct spdk_nvmf_rdma_recv *rdma_recv;
struct ibv_qp *qp = NULL;
struct spdk_rdma_qp *qp = NULL;
struct spdk_rdma_srq *srq = NULL;
struct ibv_recv_wr *bad_wr = NULL;
uint32_t i;
@ -756,7 +748,7 @@ nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
if (opts->shared) {
srq = (struct spdk_rdma_srq *)opts->qp;
} else {
qp = (struct ibv_qp *)opts->qp;
qp = (struct spdk_rdma_qp *)opts->qp;
}
for (i = 0; i < opts->max_queue_depth; i++) {
@ -785,13 +777,10 @@ nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
rdma_recv->wr.wr_id = (uintptr_t)&rdma_recv->rdma_wr;
rdma_recv->wr.sg_list = rdma_recv->sgl;
if (opts->shared) {
if (srq) {
spdk_rdma_srq_queue_recv_wrs(srq, &rdma_recv->wr);
} else {
rc = ibv_post_recv(qp, &rdma_recv->wr, &bad_wr);
}
if (rc) {
goto cleanup;
spdk_rdma_qp_queue_recv_wrs(qp, &rdma_recv->wr);
}
}
@ -833,12 +822,15 @@ nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
STAILQ_INSERT_TAIL(&resources->free_queue, rdma_req, state_link);
}
if (opts->shared) {
if (srq) {
rc = spdk_rdma_srq_flush_recv_wrs(srq, &bad_wr);
} else {
rc = spdk_rdma_qp_flush_recv_wrs(qp, &bad_wr);
}
if (rc) {
goto cleanup;
}
}
return resources;
@ -1030,7 +1022,7 @@ nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
transport = &rtransport->transport;
opts.qp = rqpair->rdma_qp->qp;
opts.qp = rqpair->rdma_qp;
opts.pd = rqpair->cm_id->pd;
opts.qpair = rqpair;
opts.shared = false;
@ -1065,27 +1057,15 @@ error:
static void
nvmf_rdma_qpair_queue_recv_wrs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_recv_wr *first)
{
struct ibv_recv_wr *last;
struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
struct spdk_nvmf_rdma_transport, transport);
if (rqpair->srq != NULL) {
spdk_rdma_srq_queue_recv_wrs(rqpair->srq, first);
return;
}
last = first;
while (last->next != NULL) {
last = last->next;
}
if (rqpair->resources->recvs_to_post.first == NULL) {
rqpair->resources->recvs_to_post.first = first;
rqpair->resources->recvs_to_post.last = last;
STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_recv, rqpair, recv_link);
} else {
rqpair->resources->recvs_to_post.last->next = first;
rqpair->resources->recvs_to_post.last = last;
if (spdk_rdma_qp_queue_recv_wrs(rqpair->rdma_qp, first)) {
STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_recv, rqpair, recv_link);
}
}
if (rtransport->rdma_opts.no_wr_batching) {
@ -3651,13 +3631,10 @@ _poller_submit_recvs(struct spdk_nvmf_rdma_transport *rtransport,
} else {
while (!STAILQ_EMPTY(&rpoller->qpairs_pending_recv)) {
rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_recv);
assert(rqpair->resources->recvs_to_post.first != NULL);
rc = ibv_post_recv(rqpair->rdma_qp->qp, rqpair->resources->recvs_to_post.first, &bad_recv_wr);
rc = spdk_rdma_qp_flush_recv_wrs(rqpair->rdma_qp, &bad_recv_wr);
if (rc) {
_qp_reset_failed_recvs(rqpair, bad_recv_wr, rc);
}
rqpair->resources->recvs_to_post.first = NULL;
rqpair->resources->recvs_to_post.last = NULL;
STAILQ_REMOVE_HEAD(&rpoller->qpairs_pending_recv, recv_link);
}
}

View File

@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -57,6 +57,10 @@ DEFINE_STUB(spdk_rdma_srq_queue_recv_wrs, bool, (struct spdk_rdma_srq *rdma_srq,
struct ibv_recv_wr *first), true);
DEFINE_STUB(spdk_rdma_srq_flush_recv_wrs, int, (struct spdk_rdma_srq *rdma_srq,
struct ibv_recv_wr **bad_wr), 0);
DEFINE_STUB(spdk_rdma_qp_queue_recv_wrs, bool, (struct spdk_rdma_qp *spdk_rdma_qp,
struct ibv_recv_wr *first), true);
DEFINE_STUB(spdk_rdma_qp_flush_recv_wrs, int, (struct spdk_rdma_qp *spdk_rdma_qp,
struct ibv_recv_wr **bad_wr), 0);
DEFINE_STUB(spdk_rdma_create_mem_map, struct spdk_rdma_mem_map *, (struct ibv_pd *pd,
struct spdk_nvme_rdma_hooks *hooks), NULL);
DEFINE_STUB_V(spdk_rdma_free_mem_map, (struct spdk_rdma_mem_map **map));

View File

@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2019, 2021 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -543,7 +543,6 @@ qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair,
rqpair->max_send_depth = 16;
rqpair->max_read_depth = 16;
rqpair->qpair.transport = transport;
resources->recvs_to_post.first = resources->recvs_to_post.last = NULL;
}
static void
@ -601,8 +600,6 @@ test_spdk_nvmf_rdma_request_process(void)
CU_ASSERT(progress == true);
CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
CU_ASSERT(rdma_req->recv == NULL);
CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr);
CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr);
/* COMPLETED -> FREE */
rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
progress = nvmf_rdma_request_process(&rtransport, rdma_req);
@ -635,8 +632,6 @@ test_spdk_nvmf_rdma_request_process(void)
CU_ASSERT(progress == true);
CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
CU_ASSERT(rdma_req->recv == NULL);
CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr);
CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr);
/* COMPLETED -> FREE */
rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
progress = nvmf_rdma_request_process(&rtransport, rdma_req);