nvme: add cmd/cpl printing for rdma errors

This follows similar logic in the pcie and tcp
completion paths, including omitting error
messages when aborting aers by adding a print_on_error
parameter to the completion function.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Id558d0af2cdd705dfb60abb842bd567a0949ccce
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13525
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
This commit is contained in:
Jim Harris 2022-06-30 22:43:56 +00:00 committed by Tomasz Zawadzki
parent 05dce1ee78
commit e415bf0033
2 changed files with 28 additions and 6 deletions

View File

@ -432,17 +432,33 @@ nvme_rdma_req_put(struct nvme_rdma_qpair *rqpair, struct spdk_nvme_rdma_req *rdm
static void
nvme_rdma_req_complete(struct spdk_nvme_rdma_req *rdma_req,
struct spdk_nvme_cpl *rsp)
struct spdk_nvme_cpl *rsp,
bool print_on_error)
{
struct nvme_request *req = rdma_req->req;
struct nvme_rdma_qpair *rqpair;
struct spdk_nvme_qpair *qpair;
bool error, print_error;
assert(req != NULL);
rqpair = nvme_rdma_qpair(req->qpair);
qpair = req->qpair;
rqpair = nvme_rdma_qpair(qpair);
error = spdk_nvme_cpl_is_error(rsp);
print_error = error && print_on_error && !qpair->ctrlr->opts.disable_error_logging;
if (print_error) {
spdk_nvme_qpair_print_command(qpair, &req->cmd);
}
if (print_error || SPDK_DEBUGLOG_FLAG_ENABLED("nvme")) {
spdk_nvme_qpair_print_completion(qpair, rsp);
}
TAILQ_REMOVE(&rqpair->outstanding_reqs, rdma_req, link);
nvme_complete_request(req->cb_fn, req->cb_arg, req->qpair, req, rsp);
nvme_complete_request(req->cb_fn, req->cb_arg, qpair, req, rsp);
nvme_free_request(req);
}
@ -2396,7 +2412,7 @@ nvme_rdma_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
}
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->outstanding_reqs, link, tmp) {
nvme_rdma_req_complete(rdma_req, &cpl);
nvme_rdma_req_complete(rdma_req, &cpl, true);
nvme_rdma_req_put(rqpair, rdma_req);
}
}
@ -2443,7 +2459,7 @@ nvme_rdma_qpair_check_timeout(struct spdk_nvme_qpair *qpair)
static inline int
nvme_rdma_request_ready(struct nvme_rdma_qpair *rqpair, struct spdk_nvme_rdma_req *rdma_req)
{
nvme_rdma_req_complete(rdma_req, &rqpair->rsps[rdma_req->rsp_idx].cpl);
nvme_rdma_req_complete(rdma_req, &rqpair->rsps[rdma_req->rsp_idx].cpl, true);
nvme_rdma_req_put(rqpair, rdma_req);
return nvme_rdma_post_recv(rqpair, rdma_req->rsp_idx);
}
@ -2797,7 +2813,7 @@ nvme_rdma_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
continue;
}
nvme_rdma_req_complete(rdma_req, &cpl);
nvme_rdma_req_complete(rdma_req, &cpl, false);
nvme_rdma_req_put(rqpair, rdma_req);
}
}

View File

@ -43,6 +43,12 @@ DEFINE_STUB(spdk_memory_domain_pull_data, int, (struct spdk_memory_domain *src_d
void *src_domain_ctx, struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov,
uint32_t dst_iov_cnt, spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg), 0);
DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
struct spdk_nvme_cmd *cmd));
DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
struct spdk_nvme_cpl *cpl));
DEFINE_RETURN_MOCK(spdk_memory_domain_create, int);
int
spdk_memory_domain_create(struct spdk_memory_domain **domain, enum spdk_dma_device_type type,