nvme_rdma: Release poller from poll group when qpair is actually disconnected

If the being disconnected qpair is the last of a poller of a poll group,
CQ is destroyed and the poller is released before the qpair is actually
disconnected.

This patch destroy CQ and release the poller after the qpair is actually
disconnected.

One exception is when spdk_nvme_ctrlr_free_io_qpair() is called to a
connected qpair. In this case, the qpair is removed from a poll group
before the qpair is actually disconnected. In this case, destroy CQ and
release the poller when the qpair is removed from the poll group.

Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: Idf266bbb6dbb40f04ae6313db724fabf80865763
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14253
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2022-08-30 11:01:03 +09:00 committed by Tomasz Zawadzki
parent 80d75fda06
commit 1d58eb038b
2 changed files with 29 additions and 27 deletions

View File

@ -1980,7 +1980,17 @@ nvme_rdma_qpair_destroy(struct nvme_rdma_qpair *rqpair)
rqpair->cm_id = NULL; rqpair->cm_id = NULL;
} }
if (rqpair->cq) { if (rqpair->poller) {
struct nvme_rdma_poll_group *group;
assert(qpair->poll_group);
group = nvme_rdma_poll_group(qpair->poll_group);
nvme_rdma_poll_group_put_poller(group, rqpair->poller);
rqpair->poller = NULL;
rqpair->cq = NULL;
} else if (rqpair->cq) {
ibv_destroy_cq(rqpair->cq); ibv_destroy_cq(rqpair->cq);
rqpair->cq = NULL; rqpair->cq = NULL;
} }
@ -2165,18 +2175,6 @@ nvme_rdma_stale_conn_retry(struct nvme_rdma_qpair *rqpair)
SPDK_NOTICELOG("%d times, retry stale connnection to qpair (cntlid:%u, qid:%u).\n", SPDK_NOTICELOG("%d times, retry stale connnection to qpair (cntlid:%u, qid:%u).\n",
rqpair->stale_conn_retry_count, qpair->ctrlr->cntlid, qpair->id); rqpair->stale_conn_retry_count, qpair->ctrlr->cntlid, qpair->id);
if (rqpair->poller) {
struct nvme_rdma_poll_group *group;
assert(qpair->poll_group);
group = nvme_rdma_poll_group(qpair->poll_group);
nvme_rdma_poll_group_put_poller(group, rqpair->poller);
rqpair->poller = NULL;
rqpair->cq = NULL;
}
_nvme_rdma_ctrlr_disconnect_qpair(qpair->ctrlr, qpair, nvme_rdma_stale_conn_disconnected); _nvme_rdma_ctrlr_disconnect_qpair(qpair->ctrlr, qpair, nvme_rdma_stale_conn_disconnected);
return 0; return 0;
@ -2968,18 +2966,6 @@ nvme_rdma_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
static int static int
nvme_rdma_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) nvme_rdma_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
{ {
struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);
struct nvme_rdma_poll_group *group;
if (rqpair->poller) {
group = nvme_rdma_poll_group(qpair->poll_group);
nvme_rdma_poll_group_put_poller(group, rqpair->poller);
rqpair->poller = NULL;
rqpair->cq = NULL;
}
return 0; return 0;
} }
@ -2994,8 +2980,18 @@ static int
nvme_rdma_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup, nvme_rdma_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair) struct spdk_nvme_qpair *qpair)
{ {
struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);
struct nvme_rdma_poll_group *group = nvme_rdma_poll_group(tgroup);
assert(qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs); assert(qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs);
if (rqpair->poller) {
nvme_rdma_poll_group_put_poller(group, rqpair->poller);
rqpair->poller = NULL;
rqpair->cq = NULL;
}
return 0; return 0;
} }

View File

@ -1494,12 +1494,16 @@ test_nvme_rdma_poll_group_set_cq(void)
CU_ASSERT(poller->required_num_wc == 0); CU_ASSERT(poller->required_num_wc == 0);
CU_ASSERT(rqpair.poller == poller); CU_ASSERT(rqpair.poller == poller);
rc = nvme_rdma_poll_group_disconnect_qpair(&rqpair.qpair); rqpair.qpair.poll_group_tailq_head = &tgroup->disconnected_qpairs;
rc = nvme_rdma_poll_group_remove(tgroup, &rqpair.qpair);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(rqpair.cq == NULL); CU_ASSERT(rqpair.cq == NULL);
CU_ASSERT(rqpair.poller == NULL); CU_ASSERT(rqpair.poller == NULL);
CU_ASSERT(STAILQ_EMPTY(&group->pollers)); CU_ASSERT(STAILQ_EMPTY(&group->pollers));
rqpair.qpair.poll_group_tailq_head = &tgroup->connected_qpairs;
/* Test4: Match cq success, function ibv_resize_cq failed */ /* Test4: Match cq success, function ibv_resize_cq failed */
rqpair.cq = NULL; rqpair.cq = NULL;
rqpair.num_entries = DEFAULT_NVME_RDMA_CQ_SIZE - 1; rqpair.num_entries = DEFAULT_NVME_RDMA_CQ_SIZE - 1;
@ -1522,7 +1526,9 @@ test_nvme_rdma_poll_group_set_cq(void)
CU_ASSERT(rqpair.cq == poller->cq); CU_ASSERT(rqpair.cq == poller->cq);
CU_ASSERT(rqpair.poller == poller); CU_ASSERT(rqpair.poller == poller);
rc = nvme_rdma_poll_group_disconnect_qpair(&rqpair.qpair); rqpair.qpair.poll_group_tailq_head = &tgroup->disconnected_qpairs;
rc = nvme_rdma_poll_group_remove(tgroup, &rqpair.qpair);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
rc = nvme_rdma_poll_group_destroy(tgroup); rc = nvme_rdma_poll_group_destroy(tgroup);