nvme_rdma: Defer send/recv objects allocation until connection is established
When SRQ is supported, recv objects will be allocated by poll group and qpair will associated and use them. In this case, we do not want qpair to allocate and free recv objects. When connection is established, it will be decided if SRQ is used or not. Hence, defer recv objects allocation until connection is established. Send objects are not affected directly by SRQ, but nvme_rdma_register_reqs() no longer does any registration and deferring send objects allocation makes the code more consistent. Hence, defer send objects allocation until connection is established too. Even after this patch, we rely on nvme_rdma_ctrlr_delete_io_qpair() to free resources completely. Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Signed-off-by: Denis Nagorny <denisn@nvidia.com> Signed-off-by: Evgeniy Kochetov <evgeniik@nvidia.com> Change-Id: Ic151fad01009d92a7fc809a730e6e9dff1a365f3 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14169 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Mellanox Build Bot Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
This commit is contained in:
parent
6602291766
commit
8e48517f96
@ -1088,6 +1088,14 @@ nvme_rdma_connect_established(struct nvme_rdma_qpair *rqpair, int ret)
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = nvme_rdma_alloc_reqs(rqpair);
|
||||
SPDK_DEBUGLOG(nvme, "rc =%d\n", ret);
|
||||
if (ret) {
|
||||
SPDK_ERRLOG("Unable to allocate rqpair RDMA requests\n");
|
||||
return -1;
|
||||
}
|
||||
SPDK_DEBUGLOG(nvme, "RDMA requests allocated\n");
|
||||
|
||||
ret = nvme_rdma_register_reqs(rqpair);
|
||||
SPDK_DEBUGLOG(nvme, "rc =%d\n", ret);
|
||||
if (ret) {
|
||||
@ -1096,6 +1104,14 @@ nvme_rdma_connect_established(struct nvme_rdma_qpair *rqpair, int ret)
|
||||
}
|
||||
SPDK_DEBUGLOG(nvme, "RDMA requests registered\n");
|
||||
|
||||
ret = nvme_rdma_alloc_rsps(rqpair);
|
||||
SPDK_DEBUGLOG(nvme, "rc =%d\n", ret);
|
||||
if (ret < 0) {
|
||||
SPDK_ERRLOG("Unable to allocate rqpair RDMA responses\n");
|
||||
return -1;
|
||||
}
|
||||
SPDK_DEBUGLOG(nvme, "RDMA responses allocated\n");
|
||||
|
||||
ret = nvme_rdma_register_rsps(rqpair);
|
||||
SPDK_DEBUGLOG(nvme, "rc =%d\n", ret);
|
||||
if (ret < 0) {
|
||||
@ -1776,25 +1792,6 @@ nvme_rdma_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rc = nvme_rdma_alloc_reqs(rqpair);
|
||||
SPDK_DEBUGLOG(nvme, "rc =%d\n", rc);
|
||||
if (rc) {
|
||||
SPDK_ERRLOG("Unable to allocate rqpair RDMA requests\n");
|
||||
spdk_free(rqpair);
|
||||
return NULL;
|
||||
}
|
||||
SPDK_DEBUGLOG(nvme, "RDMA requests allocated\n");
|
||||
|
||||
rc = nvme_rdma_alloc_rsps(rqpair);
|
||||
SPDK_DEBUGLOG(nvme, "rc =%d\n", rc);
|
||||
if (rc < 0) {
|
||||
SPDK_ERRLOG("Unable to allocate rqpair RDMA responses\n");
|
||||
nvme_rdma_free_reqs(rqpair);
|
||||
spdk_free(rqpair);
|
||||
return NULL;
|
||||
}
|
||||
SPDK_DEBUGLOG(nvme, "RDMA responses allocated\n");
|
||||
|
||||
return qpair;
|
||||
}
|
||||
|
||||
|
@ -551,12 +551,7 @@ test_nvme_rdma_ctrlr_create_qpair(void)
|
||||
CU_ASSERT(qpair == &rqpair->qpair);
|
||||
CU_ASSERT(rqpair->num_entries == qsize - 1);
|
||||
CU_ASSERT(rqpair->delay_cmd_submit == false);
|
||||
CU_ASSERT(rqpair->rsp_sgls != NULL);
|
||||
CU_ASSERT(rqpair->rsp_recv_wrs != NULL);
|
||||
CU_ASSERT(rqpair->rsps != NULL);
|
||||
|
||||
nvme_rdma_free_reqs(rqpair);
|
||||
nvme_rdma_free_rsps(rqpair);
|
||||
spdk_free(rqpair);
|
||||
rqpair = NULL;
|
||||
|
||||
@ -568,12 +563,7 @@ test_nvme_rdma_ctrlr_create_qpair(void)
|
||||
CU_ASSERT(qpair != NULL);
|
||||
rqpair = SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair);
|
||||
CU_ASSERT(rqpair->num_entries == qsize - 1);
|
||||
CU_ASSERT(rqpair->rsp_sgls != NULL);
|
||||
CU_ASSERT(rqpair->rsp_recv_wrs != NULL);
|
||||
CU_ASSERT(rqpair->rsps != NULL);
|
||||
|
||||
nvme_rdma_free_reqs(rqpair);
|
||||
nvme_rdma_free_rsps(rqpair);
|
||||
spdk_free(rqpair);
|
||||
rqpair = NULL;
|
||||
|
||||
@ -773,9 +763,6 @@ test_nvme_rdma_ctrlr_construct(void)
|
||||
rqpair = SPDK_CONTAINEROF(ctrlr->adminq, struct nvme_rdma_qpair, qpair);
|
||||
CU_ASSERT(rqpair->num_entries == opts.admin_queue_size - 1);
|
||||
CU_ASSERT(rqpair->delay_cmd_submit == false);
|
||||
CU_ASSERT(rqpair->rsp_sgls != NULL);
|
||||
CU_ASSERT(rqpair->rsp_recv_wrs != NULL);
|
||||
CU_ASSERT(rqpair->rsps != NULL);
|
||||
MOCK_CLEAR(rdma_create_event_channel);
|
||||
|
||||
/* Hardcode the trtype, because nvme_qpair_init() is stub function. */
|
||||
|
Loading…
Reference in New Issue
Block a user