nvmf/rdma: Use RDMA provider API to post recv WRs
Change-Id: I782698bb12f8bbe9dc3bf06db8d83c9caf42a8db Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6292 Community-CI: Broadcom CI Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
696e858094
commit
d9ff7d09ed
@ -277,11 +277,6 @@ struct spdk_nvmf_rdma_resource_opts {
|
|||||||
bool shared;
|
bool shared;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct spdk_nvmf_recv_wr_list {
|
|
||||||
struct ibv_recv_wr *first;
|
|
||||||
struct ibv_recv_wr *last;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct spdk_nvmf_rdma_resources {
|
struct spdk_nvmf_rdma_resources {
|
||||||
/* Array of size "max_queue_depth" containing RDMA requests. */
|
/* Array of size "max_queue_depth" containing RDMA requests. */
|
||||||
struct spdk_nvmf_rdma_request *reqs;
|
struct spdk_nvmf_rdma_request *reqs;
|
||||||
@ -307,9 +302,6 @@ struct spdk_nvmf_rdma_resources {
|
|||||||
void *bufs;
|
void *bufs;
|
||||||
struct ibv_mr *bufs_mr;
|
struct ibv_mr *bufs_mr;
|
||||||
|
|
||||||
/* The list of pending recvs to transfer */
|
|
||||||
struct spdk_nvmf_recv_wr_list recvs_to_post;
|
|
||||||
|
|
||||||
/* Receives that are waiting for a request object */
|
/* Receives that are waiting for a request object */
|
||||||
STAILQ_HEAD(, spdk_nvmf_rdma_recv) incoming_queue;
|
STAILQ_HEAD(, spdk_nvmf_rdma_recv) incoming_queue;
|
||||||
|
|
||||||
@ -687,7 +679,7 @@ nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
|
|||||||
struct spdk_nvmf_rdma_resources *resources;
|
struct spdk_nvmf_rdma_resources *resources;
|
||||||
struct spdk_nvmf_rdma_request *rdma_req;
|
struct spdk_nvmf_rdma_request *rdma_req;
|
||||||
struct spdk_nvmf_rdma_recv *rdma_recv;
|
struct spdk_nvmf_rdma_recv *rdma_recv;
|
||||||
struct ibv_qp *qp = NULL;
|
struct spdk_rdma_qp *qp = NULL;
|
||||||
struct spdk_rdma_srq *srq = NULL;
|
struct spdk_rdma_srq *srq = NULL;
|
||||||
struct ibv_recv_wr *bad_wr = NULL;
|
struct ibv_recv_wr *bad_wr = NULL;
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
@ -756,7 +748,7 @@ nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
|
|||||||
if (opts->shared) {
|
if (opts->shared) {
|
||||||
srq = (struct spdk_rdma_srq *)opts->qp;
|
srq = (struct spdk_rdma_srq *)opts->qp;
|
||||||
} else {
|
} else {
|
||||||
qp = (struct ibv_qp *)opts->qp;
|
qp = (struct spdk_rdma_qp *)opts->qp;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < opts->max_queue_depth; i++) {
|
for (i = 0; i < opts->max_queue_depth; i++) {
|
||||||
@ -785,13 +777,10 @@ nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
|
|||||||
|
|
||||||
rdma_recv->wr.wr_id = (uintptr_t)&rdma_recv->rdma_wr;
|
rdma_recv->wr.wr_id = (uintptr_t)&rdma_recv->rdma_wr;
|
||||||
rdma_recv->wr.sg_list = rdma_recv->sgl;
|
rdma_recv->wr.sg_list = rdma_recv->sgl;
|
||||||
if (opts->shared) {
|
if (srq) {
|
||||||
spdk_rdma_srq_queue_recv_wrs(srq, &rdma_recv->wr);
|
spdk_rdma_srq_queue_recv_wrs(srq, &rdma_recv->wr);
|
||||||
} else {
|
} else {
|
||||||
rc = ibv_post_recv(qp, &rdma_recv->wr, &bad_wr);
|
spdk_rdma_qp_queue_recv_wrs(qp, &rdma_recv->wr);
|
||||||
}
|
|
||||||
if (rc) {
|
|
||||||
goto cleanup;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -833,11 +822,14 @@ nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
|
|||||||
STAILQ_INSERT_TAIL(&resources->free_queue, rdma_req, state_link);
|
STAILQ_INSERT_TAIL(&resources->free_queue, rdma_req, state_link);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (opts->shared) {
|
if (srq) {
|
||||||
rc = spdk_rdma_srq_flush_recv_wrs(srq, &bad_wr);
|
rc = spdk_rdma_srq_flush_recv_wrs(srq, &bad_wr);
|
||||||
if (rc) {
|
} else {
|
||||||
goto cleanup;
|
rc = spdk_rdma_qp_flush_recv_wrs(qp, &bad_wr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (rc) {
|
||||||
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
return resources;
|
return resources;
|
||||||
@ -1030,7 +1022,7 @@ nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
|
|||||||
rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
|
rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
|
||||||
transport = &rtransport->transport;
|
transport = &rtransport->transport;
|
||||||
|
|
||||||
opts.qp = rqpair->rdma_qp->qp;
|
opts.qp = rqpair->rdma_qp;
|
||||||
opts.pd = rqpair->cm_id->pd;
|
opts.pd = rqpair->cm_id->pd;
|
||||||
opts.qpair = rqpair;
|
opts.qpair = rqpair;
|
||||||
opts.shared = false;
|
opts.shared = false;
|
||||||
@ -1065,27 +1057,15 @@ error:
|
|||||||
static void
|
static void
|
||||||
nvmf_rdma_qpair_queue_recv_wrs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_recv_wr *first)
|
nvmf_rdma_qpair_queue_recv_wrs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_recv_wr *first)
|
||||||
{
|
{
|
||||||
struct ibv_recv_wr *last;
|
|
||||||
struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
|
struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
|
||||||
struct spdk_nvmf_rdma_transport, transport);
|
struct spdk_nvmf_rdma_transport, transport);
|
||||||
|
|
||||||
if (rqpair->srq != NULL) {
|
if (rqpair->srq != NULL) {
|
||||||
spdk_rdma_srq_queue_recv_wrs(rqpair->srq, first);
|
spdk_rdma_srq_queue_recv_wrs(rqpair->srq, first);
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
last = first;
|
|
||||||
while (last->next != NULL) {
|
|
||||||
last = last->next;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rqpair->resources->recvs_to_post.first == NULL) {
|
|
||||||
rqpair->resources->recvs_to_post.first = first;
|
|
||||||
rqpair->resources->recvs_to_post.last = last;
|
|
||||||
STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_recv, rqpair, recv_link);
|
|
||||||
} else {
|
} else {
|
||||||
rqpair->resources->recvs_to_post.last->next = first;
|
if (spdk_rdma_qp_queue_recv_wrs(rqpair->rdma_qp, first)) {
|
||||||
rqpair->resources->recvs_to_post.last = last;
|
STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_recv, rqpair, recv_link);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rtransport->rdma_opts.no_wr_batching) {
|
if (rtransport->rdma_opts.no_wr_batching) {
|
||||||
@ -3651,13 +3631,10 @@ _poller_submit_recvs(struct spdk_nvmf_rdma_transport *rtransport,
|
|||||||
} else {
|
} else {
|
||||||
while (!STAILQ_EMPTY(&rpoller->qpairs_pending_recv)) {
|
while (!STAILQ_EMPTY(&rpoller->qpairs_pending_recv)) {
|
||||||
rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_recv);
|
rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_recv);
|
||||||
assert(rqpair->resources->recvs_to_post.first != NULL);
|
rc = spdk_rdma_qp_flush_recv_wrs(rqpair->rdma_qp, &bad_recv_wr);
|
||||||
rc = ibv_post_recv(rqpair->rdma_qp->qp, rqpair->resources->recvs_to_post.first, &bad_recv_wr);
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
_qp_reset_failed_recvs(rqpair, bad_recv_wr, rc);
|
_qp_reset_failed_recvs(rqpair, bad_recv_wr, rc);
|
||||||
}
|
}
|
||||||
rqpair->resources->recvs_to_post.first = NULL;
|
|
||||||
rqpair->resources->recvs_to_post.last = NULL;
|
|
||||||
STAILQ_REMOVE_HEAD(&rpoller->qpairs_pending_recv, recv_link);
|
STAILQ_REMOVE_HEAD(&rpoller->qpairs_pending_recv, recv_link);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
* BSD LICENSE
|
* BSD LICENSE
|
||||||
*
|
*
|
||||||
* Copyright (c) Intel Corporation. All rights reserved.
|
* Copyright (c) Intel Corporation. All rights reserved.
|
||||||
* Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
|
* Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
|
||||||
*
|
*
|
||||||
* Redistribution and use in source and binary forms, with or without
|
* Redistribution and use in source and binary forms, with or without
|
||||||
* modification, are permitted provided that the following conditions
|
* modification, are permitted provided that the following conditions
|
||||||
@ -57,6 +57,10 @@ DEFINE_STUB(spdk_rdma_srq_queue_recv_wrs, bool, (struct spdk_rdma_srq *rdma_srq,
|
|||||||
struct ibv_recv_wr *first), true);
|
struct ibv_recv_wr *first), true);
|
||||||
DEFINE_STUB(spdk_rdma_srq_flush_recv_wrs, int, (struct spdk_rdma_srq *rdma_srq,
|
DEFINE_STUB(spdk_rdma_srq_flush_recv_wrs, int, (struct spdk_rdma_srq *rdma_srq,
|
||||||
struct ibv_recv_wr **bad_wr), 0);
|
struct ibv_recv_wr **bad_wr), 0);
|
||||||
|
DEFINE_STUB(spdk_rdma_qp_queue_recv_wrs, bool, (struct spdk_rdma_qp *spdk_rdma_qp,
|
||||||
|
struct ibv_recv_wr *first), true);
|
||||||
|
DEFINE_STUB(spdk_rdma_qp_flush_recv_wrs, int, (struct spdk_rdma_qp *spdk_rdma_qp,
|
||||||
|
struct ibv_recv_wr **bad_wr), 0);
|
||||||
DEFINE_STUB(spdk_rdma_create_mem_map, struct spdk_rdma_mem_map *, (struct ibv_pd *pd,
|
DEFINE_STUB(spdk_rdma_create_mem_map, struct spdk_rdma_mem_map *, (struct ibv_pd *pd,
|
||||||
struct spdk_nvme_rdma_hooks *hooks), NULL);
|
struct spdk_nvme_rdma_hooks *hooks), NULL);
|
||||||
DEFINE_STUB_V(spdk_rdma_free_mem_map, (struct spdk_rdma_mem_map **map));
|
DEFINE_STUB_V(spdk_rdma_free_mem_map, (struct spdk_rdma_mem_map **map));
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
* BSD LICENSE
|
* BSD LICENSE
|
||||||
*
|
*
|
||||||
* Copyright (c) Intel Corporation. All rights reserved.
|
* Copyright (c) Intel Corporation. All rights reserved.
|
||||||
* Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
|
* Copyright (c) 2019, 2021 Mellanox Technologies LTD. All rights reserved.
|
||||||
*
|
*
|
||||||
* Redistribution and use in source and binary forms, with or without
|
* Redistribution and use in source and binary forms, with or without
|
||||||
* modification, are permitted provided that the following conditions
|
* modification, are permitted provided that the following conditions
|
||||||
@ -543,7 +543,6 @@ qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair,
|
|||||||
rqpair->max_send_depth = 16;
|
rqpair->max_send_depth = 16;
|
||||||
rqpair->max_read_depth = 16;
|
rqpair->max_read_depth = 16;
|
||||||
rqpair->qpair.transport = transport;
|
rqpair->qpair.transport = transport;
|
||||||
resources->recvs_to_post.first = resources->recvs_to_post.last = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -601,8 +600,6 @@ test_spdk_nvmf_rdma_request_process(void)
|
|||||||
CU_ASSERT(progress == true);
|
CU_ASSERT(progress == true);
|
||||||
CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
|
CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
|
||||||
CU_ASSERT(rdma_req->recv == NULL);
|
CU_ASSERT(rdma_req->recv == NULL);
|
||||||
CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr);
|
|
||||||
CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr);
|
|
||||||
/* COMPLETED -> FREE */
|
/* COMPLETED -> FREE */
|
||||||
rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
|
rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
|
||||||
progress = nvmf_rdma_request_process(&rtransport, rdma_req);
|
progress = nvmf_rdma_request_process(&rtransport, rdma_req);
|
||||||
@ -635,8 +632,6 @@ test_spdk_nvmf_rdma_request_process(void)
|
|||||||
CU_ASSERT(progress == true);
|
CU_ASSERT(progress == true);
|
||||||
CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
|
CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
|
||||||
CU_ASSERT(rdma_req->recv == NULL);
|
CU_ASSERT(rdma_req->recv == NULL);
|
||||||
CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr);
|
|
||||||
CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr);
|
|
||||||
/* COMPLETED -> FREE */
|
/* COMPLETED -> FREE */
|
||||||
rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
|
rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
|
||||||
progress = nvmf_rdma_request_process(&rtransport, rdma_req);
|
progress = nvmf_rdma_request_process(&rtransport, rdma_req);
|
||||||
|
Loading…
Reference in New Issue
Block a user