nvme_rdma: Get qpair from poll group using WC
NVMe-RDMA target has a helper function get_rdma_qpair_from_wc() and uses it to identify a qpair from a WC. NVMe-RDMA initiator has a similar function nvme_rdma_poll_group_get_qpair_by_id(). NVMe-RDMA initiator will support SRQ in the following patches, and it will want to identify a qpair from a WC. get_rdma_qpair_from_wc() of NVMe-RDMA target uses wc->qp_num internally anyway. However, the upcoming custom transport for RDMA will have to use other variables of WC. Hence, it will be convenient to pass WC instead of qp_num if we consider future enhancements. Based on these thoughts, for NVMe-RDMA initiator rename nvme_rdma_poll_group_get_qpair_by_id() by get_rdma_qpair_from_wc(). remove unnecessary declaration, and pass WC instead of qp_num. Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Signed-off-by: Denis Nagorny <denisn@nvidia.com> Signed-off-by: Evgeniy Kochetov <evgeniik@nvidia.com> Change-Id: I01ead4730207e2c6ac53b83f151bd5f977a11465 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14279 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Mellanox Build Bot Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
This commit is contained in:
parent
6ea9de5fc8
commit
194047249b
@ -279,8 +279,6 @@ static const char *rdma_cm_event_str[] = {
|
||||
"RDMA_CM_EVENT_TIMEWAIT_EXIT"
|
||||
};
|
||||
|
||||
struct nvme_rdma_qpair *nvme_rdma_poll_group_get_qpair_by_id(struct nvme_rdma_poll_group *group,
|
||||
uint32_t qp_num);
|
||||
static struct nvme_rdma_poller *nvme_rdma_poll_group_get_poller(struct nvme_rdma_poll_group *group,
|
||||
struct ibv_context *device);
|
||||
static void nvme_rdma_poll_group_put_poller(struct nvme_rdma_poll_group *group,
|
||||
@ -2393,6 +2391,29 @@ nvme_rdma_fail_qpair(struct spdk_nvme_qpair *qpair, int failure_reason)
|
||||
nvme_ctrlr_disconnect_qpair(qpair);
|
||||
}
|
||||
|
||||
static struct nvme_rdma_qpair *
|
||||
get_rdma_qpair_from_wc(struct nvme_rdma_poll_group *group, struct ibv_wc *wc)
|
||||
{
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
struct nvme_rdma_qpair *rqpair;
|
||||
|
||||
STAILQ_FOREACH(qpair, &group->group.connected_qpairs, poll_group_stailq) {
|
||||
rqpair = nvme_rdma_qpair(qpair);
|
||||
if (NVME_RDMA_POLL_GROUP_CHECK_QPN(rqpair, wc->qp_num)) {
|
||||
return rqpair;
|
||||
}
|
||||
}
|
||||
|
||||
STAILQ_FOREACH(qpair, &group->group.disconnected_qpairs, poll_group_stailq) {
|
||||
rqpair = nvme_rdma_qpair(qpair);
|
||||
if (NVME_RDMA_POLL_GROUP_CHECK_QPN(rqpair, wc->qp_num)) {
|
||||
return rqpair;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void
|
||||
nvme_rdma_log_wc_status(struct nvme_rdma_qpair *rqpair, struct ibv_wc *wc)
|
||||
{
|
||||
@ -2489,8 +2510,7 @@ nvme_rdma_cq_process_completions(struct ibv_cq *cq, uint32_t batch_size,
|
||||
if (wc[i].status) {
|
||||
rqpair = rdma_req->req ? nvme_rdma_qpair(rdma_req->req->qpair) : NULL;
|
||||
if (!rqpair) {
|
||||
rqpair = rdma_qpair != NULL ? rdma_qpair : nvme_rdma_poll_group_get_qpair_by_id(group,
|
||||
wc[i].qp_num);
|
||||
rqpair = rdma_qpair != NULL ? rdma_qpair : get_rdma_qpair_from_wc(group, &wc[i]);
|
||||
}
|
||||
if (!rqpair) {
|
||||
/* When poll_group is used, several qpairs share the same CQ and it is possible to
|
||||
@ -2838,30 +2858,6 @@ nvme_rdma_poll_group_create(void)
|
||||
return &group->group;
|
||||
}
|
||||
|
||||
struct nvme_rdma_qpair *
|
||||
nvme_rdma_poll_group_get_qpair_by_id(struct nvme_rdma_poll_group *group, uint32_t qp_num)
|
||||
{
|
||||
struct spdk_nvme_qpair *qpair;
|
||||
struct nvme_rdma_qpair *rqpair;
|
||||
|
||||
STAILQ_FOREACH(qpair, &group->group.disconnected_qpairs, poll_group_stailq) {
|
||||
rqpair = nvme_rdma_qpair(qpair);
|
||||
if (NVME_RDMA_POLL_GROUP_CHECK_QPN(rqpair, qp_num)) {
|
||||
return rqpair;
|
||||
}
|
||||
}
|
||||
|
||||
STAILQ_FOREACH(qpair, &group->group.connected_qpairs, poll_group_stailq) {
|
||||
rqpair = nvme_rdma_qpair(qpair);
|
||||
if (NVME_RDMA_POLL_GROUP_CHECK_QPN(rqpair, qp_num)) {
|
||||
return rqpair;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
nvme_rdma_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
|
@ -1236,26 +1236,27 @@ test_rdma_get_memory_translation(void)
|
||||
}
|
||||
|
||||
static void
|
||||
test_nvme_rdma_poll_group_get_qpair_by_id(void)
|
||||
test_get_rdma_qpair_from_wc(void)
|
||||
{
|
||||
const uint32_t test_qp_num = 123;
|
||||
struct nvme_rdma_poll_group group = {};
|
||||
struct nvme_rdma_qpair rqpair = {};
|
||||
struct spdk_rdma_qp rdma_qp = {};
|
||||
struct ibv_qp qp = { .qp_num = test_qp_num };
|
||||
struct ibv_wc wc = { .qp_num = test_qp_num };
|
||||
|
||||
STAILQ_INIT(&group.group.disconnected_qpairs);
|
||||
STAILQ_INIT(&group.group.connected_qpairs);
|
||||
rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
|
||||
|
||||
/* Test 1 - Simulate case when nvme_rdma_qpair is disconnected but still in one of lists.
|
||||
* nvme_rdma_poll_group_get_qpair_by_id must return NULL */
|
||||
* get_rdma_qpair_from_wc must return NULL */
|
||||
STAILQ_INSERT_HEAD(&group.group.disconnected_qpairs, &rqpair.qpair, poll_group_stailq);
|
||||
CU_ASSERT(nvme_rdma_poll_group_get_qpair_by_id(&group, test_qp_num) == NULL);
|
||||
CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == NULL);
|
||||
STAILQ_REMOVE_HEAD(&group.group.disconnected_qpairs, poll_group_stailq);
|
||||
|
||||
STAILQ_INSERT_HEAD(&group.group.connected_qpairs, &rqpair.qpair, poll_group_stailq);
|
||||
CU_ASSERT(nvme_rdma_poll_group_get_qpair_by_id(&group, test_qp_num) == NULL);
|
||||
CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == NULL);
|
||||
STAILQ_REMOVE_HEAD(&group.group.connected_qpairs, poll_group_stailq);
|
||||
|
||||
/* Test 2 - nvme_rdma_qpair with valid rdma_qp/ibv_qp and qp_num */
|
||||
@ -1263,11 +1264,11 @@ test_nvme_rdma_poll_group_get_qpair_by_id(void)
|
||||
rqpair.rdma_qp = &rdma_qp;
|
||||
|
||||
STAILQ_INSERT_HEAD(&group.group.disconnected_qpairs, &rqpair.qpair, poll_group_stailq);
|
||||
CU_ASSERT(nvme_rdma_poll_group_get_qpair_by_id(&group, test_qp_num) == &rqpair);
|
||||
CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == &rqpair);
|
||||
STAILQ_REMOVE_HEAD(&group.group.disconnected_qpairs, poll_group_stailq);
|
||||
|
||||
STAILQ_INSERT_HEAD(&group.group.connected_qpairs, &rqpair.qpair, poll_group_stailq);
|
||||
CU_ASSERT(nvme_rdma_poll_group_get_qpair_by_id(&group, test_qp_num) == &rqpair);
|
||||
CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == &rqpair);
|
||||
STAILQ_REMOVE_HEAD(&group.group.connected_qpairs, poll_group_stailq);
|
||||
}
|
||||
|
||||
@ -1524,7 +1525,7 @@ main(int argc, char **argv)
|
||||
CU_ADD_TEST(suite, test_nvme_rdma_memory_domain);
|
||||
CU_ADD_TEST(suite, test_rdma_ctrlr_get_memory_domains);
|
||||
CU_ADD_TEST(suite, test_rdma_get_memory_translation);
|
||||
CU_ADD_TEST(suite, test_nvme_rdma_poll_group_get_qpair_by_id);
|
||||
CU_ADD_TEST(suite, test_get_rdma_qpair_from_wc);
|
||||
CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_get_max_sges);
|
||||
CU_ADD_TEST(suite, test_nvme_rdma_poll_group_get_stats);
|
||||
CU_ADD_TEST(suite, test_nvme_rdma_poll_group_set_cq);
|
||||
|
Loading…
Reference in New Issue
Block a user