lib/nvme: add naive poll group implementation to pcie.

Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: Ib67b41dc9c6ea2dd1fa23a0a0eb7683c212b9df8
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/632
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Seth Howell 2020-02-06 15:27:08 -07:00 committed by Jim Harris
parent 1b818a28b5
commit 58509369ec

View File

@ -130,6 +130,10 @@ SPDK_STATIC_ASSERT(sizeof(struct nvme_tracker) == 4096, "nvme_tracker is not 4K"
SPDK_STATIC_ASSERT((offsetof(struct nvme_tracker, u.sgl) & 7) == 0, "SGL must be Qword aligned"); SPDK_STATIC_ASSERT((offsetof(struct nvme_tracker, u.sgl) & 7) == 0, "SGL must be Qword aligned");
SPDK_STATIC_ASSERT((offsetof(struct nvme_tracker, meta_sgl) & 7) == 0, "SGL must be Qword aligned"); SPDK_STATIC_ASSERT((offsetof(struct nvme_tracker, meta_sgl) & 7) == 0, "SGL must be Qword aligned");
struct nvme_pcie_poll_group {
struct spdk_nvme_transport_poll_group group;
};
/* PCIe transport extensions for spdk_nvme_qpair */ /* PCIe transport extensions for spdk_nvme_qpair */
struct nvme_pcie_qpair { struct nvme_pcie_qpair {
/* Submission queue tail doorbell */ /* Submission queue tail doorbell */
@ -2432,7 +2436,14 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
static struct spdk_nvme_transport_poll_group * static struct spdk_nvme_transport_poll_group *
nvme_pcie_poll_group_create(void) nvme_pcie_poll_group_create(void)
{ {
return NULL; struct nvme_pcie_poll_group *group = calloc(1, sizeof(*group));
if (group == NULL) {
SPDK_ERRLOG("Unable to allocate poll group.\n");
return NULL;
}
return &group->group;
} }
static int static int
@ -2451,27 +2462,50 @@ static int
nvme_pcie_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup, nvme_pcie_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair) struct spdk_nvme_qpair *qpair)
{ {
return -ENOTSUP; return 0;
} }
static int static int
nvme_pcie_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup, nvme_pcie_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair) struct spdk_nvme_qpair *qpair)
{ {
return -ENOTSUP; return 0;
} }
static int64_t static int64_t
nvme_pcie_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup, nvme_pcie_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
{ {
return -ENOTSUP; struct spdk_nvme_qpair *qpair, *tmp_qpair;
int32_t local_completions = 0;
int64_t total_completions = 0;
STAILQ_FOREACH_SAFE(qpair, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_qpair) {
disconnected_qpair_cb(qpair, tgroup->group->ctx);
}
STAILQ_FOREACH_SAFE(qpair, &tgroup->connected_qpairs, poll_group_stailq, tmp_qpair) {
local_completions = spdk_nvme_qpair_process_completions(qpair, completions_per_qpair);
if (local_completions < 0) {
disconnected_qpair_cb(qpair, tgroup->group->ctx);
local_completions = 0;
}
total_completions += local_completions;
}
return total_completions;
} }
static int static int
nvme_pcie_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup) nvme_pcie_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
{ {
return -ENOTSUP; if (!STAILQ_EMPTY(&tgroup->connected_qpairs) || !STAILQ_EMPTY(&tgroup->disconnected_qpairs)) {
return -EBUSY;
}
free(tgroup);
return 0;
} }
const struct spdk_nvme_transport_ops pcie_ops = { const struct spdk_nvme_transport_ops pcie_ops = {