nvme_rdma: Pass poller instead of poll_group to cq_process_completions()

The following patches will support SRQ and SRQ will be per poller.
We will need SRQ in nvme_rdma_cq_process_completions().

It is not possible to identify poller if poll_group is passed to
nvme_rdma_cq_process_completions().

Based on these thoughts, add poll_group pointer to poller and pass
poller to nvme_rdma_cq_process_completions() instead of poll_group.

Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Signed-off-by: Denis Nagorny <denisn@nvidia.com>
Signed-off-by: Evgeniy Kochetov <evgeniik@nvidia.com>
Change-Id: I322a7a0cc08bdcc8e87e720ad65dd8f0b6ae9112
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14282
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
This commit is contained in:
Shuhei Matsumoto 2022-08-31 17:29:40 +09:00 committed by Tomasz Zawadzki
parent 194047249b
commit 1439f9c773

View File

@ -132,6 +132,8 @@ struct nvme_rdma_poller_stats {
struct spdk_rdma_qp_stats rdma_stats;
};
struct nvme_rdma_poll_group;
struct nvme_rdma_poller {
struct ibv_context *device;
struct ibv_cq *cq;
@ -139,6 +141,7 @@ struct nvme_rdma_poller {
int required_num_wc;
int current_num_wc;
struct nvme_rdma_poller_stats stats;
struct nvme_rdma_poll_group *group;
STAILQ_ENTRY(nvme_rdma_poller) link;
};
@ -2441,7 +2444,7 @@ nvme_rdma_is_rxe_device(struct ibv_device_attr *dev_attr)
static int
nvme_rdma_cq_process_completions(struct ibv_cq *cq, uint32_t batch_size,
struct nvme_rdma_poll_group *group,
struct nvme_rdma_poller *poller,
struct nvme_rdma_qpair *rdma_qpair,
uint64_t *rdma_completions)
{
@ -2510,14 +2513,14 @@ nvme_rdma_cq_process_completions(struct ibv_cq *cq, uint32_t batch_size,
if (wc[i].status) {
rqpair = rdma_req->req ? nvme_rdma_qpair(rdma_req->req->qpair) : NULL;
if (!rqpair) {
rqpair = rdma_qpair != NULL ? rdma_qpair : get_rdma_qpair_from_wc(group, &wc[i]);
rqpair = rdma_qpair != NULL ? rdma_qpair : get_rdma_qpair_from_wc(poller->group, &wc[i]);
}
if (!rqpair) {
/* When poll_group is used, several qpairs share the same CQ and it is possible to
* receive a completion with error (e.g. IBV_WC_WR_FLUSH_ERR) for already disconnected qpair
* That happens due to qpair is destroyed while there are submitted but not completed send/receive
* Work Requests */
assert(group);
assert(poller);
continue;
}
assert(rqpair->current_num_sends > 0);
@ -2773,6 +2776,7 @@ nvme_rdma_poller_create(struct nvme_rdma_poll_group *group, struct ibv_context *
return NULL;
}
poller->group = group;
poller->device = ctx;
poller->cq = ibv_create_cq(poller->device, DEFAULT_NVME_RDMA_CQ_SIZE, group, NULL, 0);
@ -2960,7 +2964,7 @@ nvme_rdma_poll_group_process_completions(struct spdk_nvme_transport_poll_group *
do {
poller->stats.polls++;
batch_size = spdk_min((completions_per_poller - poller_completions), MAX_COMPLETIONS_PER_POLL);
rc = nvme_rdma_cq_process_completions(poller->cq, batch_size, group, NULL, &rdma_completions);
rc = nvme_rdma_cq_process_completions(poller->cq, batch_size, poller, NULL, &rdma_completions);
if (rc <= 0) {
if (rc == -ECANCELED) {
return -EIO;