nvme_rdma: Factor out reset failed sends/recvs operation

Factor out reset failed recvs operation into a helper function
nvme_rdma_reset_failed_recvs(). This will make the following
patches simpler.

For send operation, this change is not required yet, but in future
we may support something like shared SQ. Hence, we do this change
for send operation too.

Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Signed-off-by: Denis Nagorny <denisn@nvidia.com>
Signed-off-by: Evgeniy Kochetov <evgeniik@nvidia.com>
Change-Id: Ib44acebe63e97e5a60ea6fa701b49278c7f44b45
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14171
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
This commit is contained in:
Shuhei Matsumoto 2022-10-06 12:02:43 +09:00 committed by Tomasz Zawadzki
parent 4cef00cbbf
commit e22dcc075a

View File

@ -770,6 +770,32 @@ nvme_rdma_qpair_init(struct nvme_rdma_qpair *rqpair)
return 0; return 0;
} }
static void
nvme_rdma_reset_failed_sends(struct nvme_rdma_qpair *rqpair,
struct ibv_send_wr *bad_send_wr, int rc)
{
SPDK_ERRLOG("Failed to post WRs on send queue, errno %d (%s), bad_wr %p\n",
rc, spdk_strerror(rc), bad_send_wr);
while (bad_send_wr != NULL) {
assert(rqpair->current_num_sends > 0);
rqpair->current_num_sends--;
bad_send_wr = bad_send_wr->next;
}
}
static void
nvme_rdma_reset_failed_recvs(struct nvme_rdma_qpair *rqpair,
struct ibv_recv_wr *bad_recv_wr, int rc)
{
SPDK_ERRLOG("Failed to post WRs on receive queue, errno %d (%s), bad_wr %p\n",
rc, spdk_strerror(rc), bad_recv_wr);
while (bad_recv_wr != NULL) {
assert(rqpair->current_num_recvs > 0);
rqpair->current_num_recvs--;
bad_recv_wr = bad_recv_wr->next;
}
}
static inline int static inline int
nvme_rdma_qpair_submit_sends(struct nvme_rdma_qpair *rqpair) nvme_rdma_qpair_submit_sends(struct nvme_rdma_qpair *rqpair)
{ {
@ -779,17 +805,10 @@ nvme_rdma_qpair_submit_sends(struct nvme_rdma_qpair *rqpair)
rc = spdk_rdma_qp_flush_send_wrs(rqpair->rdma_qp, &bad_send_wr); rc = spdk_rdma_qp_flush_send_wrs(rqpair->rdma_qp, &bad_send_wr);
if (spdk_unlikely(rc)) { if (spdk_unlikely(rc)) {
SPDK_ERRLOG("Failed to post WRs on send queue, errno %d (%s), bad_wr %p\n", nvme_rdma_reset_failed_sends(rqpair, bad_send_wr, rc);
rc, spdk_strerror(rc), bad_send_wr);
while (bad_send_wr != NULL) {
assert(rqpair->current_num_sends > 0);
rqpair->current_num_sends--;
bad_send_wr = bad_send_wr->next;
}
return rc;
} }
return 0; return rc;
} }
static inline int static inline int
@ -800,13 +819,7 @@ nvme_rdma_qpair_submit_recvs(struct nvme_rdma_qpair *rqpair)
rc = spdk_rdma_qp_flush_recv_wrs(rqpair->rdma_qp, &bad_recv_wr); rc = spdk_rdma_qp_flush_recv_wrs(rqpair->rdma_qp, &bad_recv_wr);
if (spdk_unlikely(rc)) { if (spdk_unlikely(rc)) {
SPDK_ERRLOG("Failed to post WRs on receive queue, errno %d (%s), bad_wr %p\n", nvme_rdma_reset_failed_recvs(rqpair, bad_recv_wr, rc);
rc, spdk_strerror(rc), bad_recv_wr);
while (bad_recv_wr != NULL) {
assert(rqpair->current_num_recvs > 0);
rqpair->current_num_recvs--;
bad_recv_wr = bad_recv_wr->next;
}
} }
return rc; return rc;