nvme: poll_group_process_completions() returns -ENXIO if any qpair failed

TCP transport already does it but was not documented clearly.

RDMA and PCIe transports follow it and document it clearly.

Then we can check each qpair's state if
spdk_nvme_poll_group_process_completions() returns -ENXIO before
disconnected_qpair_cb() is called.

Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: I2afe920cfd06c374251fccc1c205948fb498dd33
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11328
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2022-01-27 21:26:31 +09:00 committed by Ben Walker
parent 9717b0c3df
commit 75d38a301d
2 changed files with 7 additions and 6 deletions

View File

@ -1759,12 +1759,13 @@ nvme_pcie_poll_group_process_completions(struct spdk_nvme_transport_poll_group *
STAILQ_FOREACH_SAFE(qpair, &tgroup->connected_qpairs, poll_group_stailq, tmp_qpair) { STAILQ_FOREACH_SAFE(qpair, &tgroup->connected_qpairs, poll_group_stailq, tmp_qpair) {
local_completions = spdk_nvme_qpair_process_completions(qpair, completions_per_qpair); local_completions = spdk_nvme_qpair_process_completions(qpair, completions_per_qpair);
if (local_completions < 0) { if (spdk_unlikely(local_completions < 0)) {
disconnected_qpair_cb(qpair, tgroup->group->ctx); disconnected_qpair_cb(qpair, tgroup->group->ctx);
local_completions = 0; total_completions = -ENXIO;
} } else if (spdk_likely(total_completions >= 0)) {
total_completions += local_completions; total_completions += local_completions;
} }
}
return total_completions; return total_completions;
} }

View File

@ -2910,14 +2910,13 @@ nvme_rdma_poll_group_process_completions(struct spdk_nvme_transport_poll_group *
struct nvme_rdma_qpair *rqpair; struct nvme_rdma_qpair *rqpair;
struct nvme_rdma_poll_group *group; struct nvme_rdma_poll_group *group;
struct nvme_rdma_poller *poller; struct nvme_rdma_poller *poller;
int num_qpairs = 0, batch_size, rc; int num_qpairs = 0, batch_size, rc, rc2 = 0;
int64_t total_completions = 0; int64_t total_completions = 0;
uint64_t completions_allowed = 0; uint64_t completions_allowed = 0;
uint64_t completions_per_poller = 0; uint64_t completions_per_poller = 0;
uint64_t poller_completions = 0; uint64_t poller_completions = 0;
uint64_t rdma_completions; uint64_t rdma_completions;
if (completions_per_qpair == 0) { if (completions_per_qpair == 0) {
completions_per_qpair = MAX_COMPLETIONS_PER_POLL; completions_per_qpair = MAX_COMPLETIONS_PER_POLL;
} }
@ -2949,6 +2948,7 @@ nvme_rdma_poll_group_process_completions(struct spdk_nvme_transport_poll_group *
} }
if (spdk_unlikely(qpair->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE)) { if (spdk_unlikely(qpair->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE)) {
rc2 = -ENXIO;
nvme_rdma_fail_qpair(qpair, 0); nvme_rdma_fail_qpair(qpair, 0);
continue; continue;
} }
@ -3012,7 +3012,7 @@ nvme_rdma_poll_group_process_completions(struct spdk_nvme_transport_poll_group *
} }
} }
return total_completions; return rc2 != 0 ? rc2 : total_completions;
} }
static int static int