lib/nvme: Add qpair_iterate_requests() to iterate the common operation among transports
To abort requests whose cb_arg matches, add child abort request greedily. Iterating all outstanding requests is unique for each transport but adding child abort is common among transports, and adding child abort is replaceable by other operations. Hence add qpair_iterate_requests() function to the function pointer table of transport, and pass the operation done in the iteration by a parameter of it. In each transport, the implementation of qpair_iterate_requests() uses TAILQ_FOREACH_SAFE() for potential future use cases. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: Ic70d1bf2613fce2566eade26335ceed731f66a89 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2038 Community-CI: Mellanox Build Bot Community-CI: Broadcom CI Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Michael Haeuptle <michaelhaeuptle@gmail.com>
This commit is contained in:
parent
aa2ea2bed5
commit
f2bd635ecf
@ -3152,6 +3152,10 @@ struct spdk_nvme_transport_ops {
|
||||
|
||||
int32_t (*qpair_process_completions)(struct spdk_nvme_qpair *qpair, uint32_t max_completions);
|
||||
|
||||
int (*qpair_iterate_requests)(struct spdk_nvme_qpair *qpair,
|
||||
int (*iter_fn)(struct nvme_request *req, void *arg),
|
||||
void *arg);
|
||||
|
||||
void (*admin_qpair_abort_aers)(struct spdk_nvme_qpair *qpair);
|
||||
|
||||
struct spdk_nvme_transport_poll_group *(*poll_group_create)(void);
|
||||
|
@ -1197,6 +1197,10 @@ int nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nv
|
||||
int32_t nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair,
|
||||
uint32_t max_completions);
|
||||
void nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair);
|
||||
int nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
|
||||
int (*iter_fn)(struct nvme_request *req, void *arg),
|
||||
void *arg);
|
||||
|
||||
struct spdk_nvme_transport_poll_group *nvme_transport_poll_group_create(
|
||||
const struct spdk_nvme_transport *transport);
|
||||
int nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
|
||||
|
@ -1457,6 +1457,29 @@ nvme_pcie_qpair_abort_trackers(struct spdk_nvme_qpair *qpair, uint32_t dnr)
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_pcie_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
|
||||
int (*iter_fn)(struct nvme_request *req, void *arg),
|
||||
void *arg)
|
||||
{
|
||||
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
|
||||
struct nvme_tracker *tr, *tmp;
|
||||
int rc;
|
||||
|
||||
assert(iter_fn != NULL);
|
||||
|
||||
TAILQ_FOREACH_SAFE(tr, &pqpair->outstanding_tr, tq_list, tmp) {
|
||||
assert(tr->req != NULL);
|
||||
|
||||
rc = iter_fn(tr->req, arg);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvme_pcie_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
@ -2559,6 +2582,7 @@ const struct spdk_nvme_transport_ops pcie_ops = {
|
||||
.qpair_reset = nvme_pcie_qpair_reset,
|
||||
.qpair_submit_request = nvme_pcie_qpair_submit_request,
|
||||
.qpair_process_completions = nvme_pcie_qpair_process_completions,
|
||||
.qpair_iterate_requests = nvme_pcie_qpair_iterate_requests,
|
||||
.admin_qpair_abort_aers = nvme_pcie_admin_qpair_abort_aers,
|
||||
|
||||
.poll_group_create = nvme_pcie_poll_group_create,
|
||||
|
@ -2408,6 +2408,29 @@ nvme_rdma_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
|
||||
return rctrlr->max_sge;
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_rdma_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
|
||||
int (*iter_fn)(struct nvme_request *req, void *arg),
|
||||
void *arg)
|
||||
{
|
||||
struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);
|
||||
struct spdk_nvme_rdma_req *rdma_req, *tmp;
|
||||
int rc;
|
||||
|
||||
assert(iter_fn != NULL);
|
||||
|
||||
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->outstanding_reqs, link, tmp) {
|
||||
assert(rdma_req->req != NULL);
|
||||
|
||||
rc = iter_fn(rdma_req->req, arg);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvme_rdma_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
@ -2813,6 +2836,7 @@ const struct spdk_nvme_transport_ops rdma_ops = {
|
||||
.qpair_reset = nvme_rdma_qpair_reset,
|
||||
.qpair_submit_request = nvme_rdma_qpair_submit_request,
|
||||
.qpair_process_completions = nvme_rdma_qpair_process_completions,
|
||||
.qpair_iterate_requests = nvme_rdma_qpair_iterate_requests,
|
||||
.admin_qpair_abort_aers = nvme_rdma_admin_qpair_abort_aers,
|
||||
|
||||
.poll_group_create = nvme_rdma_poll_group_create,
|
||||
|
@ -1772,6 +1772,29 @@ nvme_tcp_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_tcp_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
|
||||
int (*iter_fn)(struct nvme_request *req, void *arg),
|
||||
void *arg)
|
||||
{
|
||||
struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
|
||||
struct nvme_tcp_req *tcp_req, *tmp;
|
||||
int rc;
|
||||
|
||||
assert(iter_fn != NULL);
|
||||
|
||||
TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
|
||||
assert(tcp_req->req != NULL);
|
||||
|
||||
rc = iter_fn(tcp_req->req, arg);
|
||||
if (rc != 0) {
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvme_tcp_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
@ -1932,6 +1955,7 @@ const struct spdk_nvme_transport_ops tcp_ops = {
|
||||
.qpair_reset = nvme_tcp_qpair_reset,
|
||||
.qpair_submit_request = nvme_tcp_qpair_submit_request,
|
||||
.qpair_process_completions = nvme_tcp_qpair_process_completions,
|
||||
.qpair_iterate_requests = nvme_tcp_qpair_iterate_requests,
|
||||
.admin_qpair_abort_aers = nvme_tcp_admin_qpair_abort_aers,
|
||||
|
||||
.poll_group_create = nvme_tcp_poll_group_create,
|
||||
|
@ -399,6 +399,22 @@ nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t
|
||||
return transport->ops.qpair_process_completions(qpair, max_completions);
|
||||
}
|
||||
|
||||
int
|
||||
nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
|
||||
int (*iter_fn)(struct nvme_request *req, void *arg),
|
||||
void *arg)
|
||||
{
|
||||
const struct spdk_nvme_transport *transport;
|
||||
|
||||
if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
|
||||
return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
|
||||
}
|
||||
|
||||
transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
|
||||
assert(transport != NULL);
|
||||
return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
|
||||
}
|
||||
|
||||
void
|
||||
nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user