lib/nvme: switch poll group to use connect/disconnect semantics.

This makes more sense within the context of the nvme driver and
helps us avoid the awkward situation of getting a failed_qp callback
on a qpair that simply hasn't been connected.

Signed-off-by: Seth Howell <seth.howell@intel.com>
Change-Id: Ibac83c87c514ddcf7bd360af10fab462ae011112
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1734
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
Seth Howell 2020-04-07 10:20:41 -07:00 committed by Jim Harris
parent 300583e2e1
commit fc86e792e4
8 changed files with 96 additions and 93 deletions

View File

@ -1961,10 +1961,11 @@ struct spdk_nvme_poll_group;
/**
* This function alerts the user to failed qpairs when calling
* This function alerts the user to disconnected qpairs when calling
* spdk_nvme_poll_group_process_completions.
*/
typedef void (*spdk_nvme_failed_qpair_cb)(struct spdk_nvme_qpair *qpair, void *poll_group_ctx);
typedef void (*spdk_nvme_disconnected_qpair_cb)(struct spdk_nvme_qpair *qpair,
void *poll_group_ctx);
/**
* Create a new poll group.
@ -2011,19 +2012,19 @@ int spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group);
/**
* Poll for completions on all qpairs in this poll group.
*
* the failed_qpair_cb will be called for all failed qpairs in the poll group
* the disconnected_qpair_cb will be called for all disconnected qpairs in the poll group
* including qpairs which fail within the context of this call.
* The user is responsible for trying to reconnect or destroy those qpairs.
*
* \param group The group on which to poll for completions.
* \param completions_per_qpair The maximum number of completions per qpair.
* \param failed_qpair_cb A callback function of type spdk_nvme_failed_qpair_cb. Must be non-NULL.
* \param disconnected_qpair_cb A callback function of type spdk_nvme_disconnected_qpair_cb. Must be non-NULL.
*
* return The number of completions across all qpairs, -EINVAL if no failed_qpair_cb is passed, or
* return The number of completions across all qpairs, -EINVAL if no disconnected_qpair_cb is passed, or
* -EIO if the shared completion queue cannot be polled for the RDMA transport.
*/
int64_t spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb);
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
/**
* Retrieve the user context for this specific poll group.
@ -3118,12 +3119,12 @@ struct spdk_nvme_transport_ops {
int (*poll_group_remove)(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair);
int (*poll_group_activate_qpair)(struct spdk_nvme_qpair *qpair);
int (*poll_group_connect_qpair)(struct spdk_nvme_qpair *qpair);
int (*poll_group_deactivate_qpair)(struct spdk_nvme_qpair *qpair);
int (*poll_group_disconnect_qpair)(struct spdk_nvme_qpair *qpair);
int64_t (*poll_group_process_completions)(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb);
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
int (*poll_group_destroy)(struct spdk_nvme_transport_poll_group *tgroup);
};

View File

@ -430,8 +430,8 @@ struct spdk_nvme_poll_group {
struct spdk_nvme_transport_poll_group {
struct spdk_nvme_poll_group *group;
const struct spdk_nvme_transport *transport;
STAILQ_HEAD(, spdk_nvme_qpair) active_qpairs;
STAILQ_HEAD(, spdk_nvme_qpair) failed_qpairs;
STAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs;
STAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs;
STAILQ_ENTRY(spdk_nvme_transport_poll_group) link;
};
@ -830,8 +830,8 @@ nvme_robust_mutex_unlock(pthread_mutex_t *mtx)
}
/* Poll group management functions. */
int nvme_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair);
int nvme_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair);
int nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
int nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
/* Admin functions */
int nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr,
@ -1185,10 +1185,10 @@ int nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair);
int nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair);
int nvme_transport_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair);
int nvme_transport_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair);
int nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair);
int nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair);
int64_t nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb);
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb);
int nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup);
/*
* Below ref related functions must be called with the global

View File

@ -2436,13 +2436,13 @@ nvme_pcie_poll_group_create(void)
}
static int
nvme_pcie_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair)
nvme_pcie_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
{
return 0;
}
static int
nvme_pcie_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
nvme_pcie_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
{
return 0;
}
@ -2463,7 +2463,7 @@ nvme_pcie_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
static int64_t
nvme_pcie_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb)
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
{
return -ENOTSUP;
}
@ -2506,8 +2506,8 @@ const struct spdk_nvme_transport_ops pcie_ops = {
.admin_qpair_abort_aers = nvme_pcie_admin_qpair_abort_aers,
.poll_group_create = nvme_pcie_poll_group_create,
.poll_group_activate_qpair = nvme_pcie_poll_group_activate_qpair,
.poll_group_deactivate_qpair = nvme_pcie_poll_group_deactivate_qpair,
.poll_group_connect_qpair = nvme_pcie_poll_group_connect_qpair,
.poll_group_disconnect_qpair = nvme_pcie_poll_group_disconnect_qpair,
.poll_group_add = nvme_pcie_poll_group_add,
.poll_group_remove = nvme_pcie_poll_group_remove,
.poll_group_process_completions = nvme_pcie_poll_group_process_completions,

View File

@ -101,31 +101,31 @@ spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, struct spdk_nvme
}
int
nvme_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair)
nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
{
return nvme_transport_poll_group_activate_qpair(qpair);
return nvme_transport_poll_group_connect_qpair(qpair);
}
int
nvme_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
{
return nvme_transport_poll_group_deactivate_qpair(qpair);
return nvme_transport_poll_group_disconnect_qpair(qpair);
}
int64_t
spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb)
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
{
struct spdk_nvme_transport_poll_group *tgroup;
int64_t local_completions = 0, error_reason = 0, num_completions = 0;
if (failed_qpair_cb == NULL) {
if (disconnected_qpair_cb == NULL) {
return -EINVAL;
}
STAILQ_FOREACH(tgroup, &group->tgroups, link) {
local_completions = nvme_transport_poll_group_process_completions(tgroup, completions_per_qpair,
failed_qpair_cb);
disconnected_qpair_cb);
if (local_completions < 0 && error_reason == 0) {
error_reason = local_completions;
} else {

View File

@ -2154,13 +2154,13 @@ nvme_rdma_poll_group_create(void)
}
static int
nvme_rdma_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair)
nvme_rdma_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
{
return 0;
}
static int
nvme_rdma_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
nvme_rdma_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
{
return 0;
}
@ -2181,7 +2181,7 @@ nvme_rdma_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
static int64_t
nvme_rdma_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb)
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
{
return -ENOTSUP;
}
@ -2226,8 +2226,8 @@ const struct spdk_nvme_transport_ops rdma_ops = {
.admin_qpair_abort_aers = nvme_rdma_admin_qpair_abort_aers,
.poll_group_create = nvme_rdma_poll_group_create,
.poll_group_activate_qpair = nvme_rdma_poll_group_activate_qpair,
.poll_group_deactivate_qpair = nvme_rdma_poll_group_deactivate_qpair,
.poll_group_connect_qpair = nvme_rdma_poll_group_connect_qpair,
.poll_group_disconnect_qpair = nvme_rdma_poll_group_disconnect_qpair,
.poll_group_add = nvme_rdma_poll_group_add,
.poll_group_remove = nvme_rdma_poll_group_remove,
.poll_group_process_completions = nvme_rdma_poll_group_process_completions,

View File

@ -1709,13 +1709,13 @@ nvme_tcp_poll_group_create(void)
}
static int
nvme_tcp_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair)
nvme_tcp_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
{
return 0;
}
static int
nvme_tcp_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
nvme_tcp_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
{
return 0;
}
@ -1736,7 +1736,7 @@ nvme_tcp_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
static int64_t
nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb)
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
{
return -ENOTSUP;
}
@ -1775,8 +1775,8 @@ const struct spdk_nvme_transport_ops tcp_ops = {
.admin_qpair_abort_aers = nvme_tcp_admin_qpair_abort_aers,
.poll_group_create = nvme_tcp_poll_group_create,
.poll_group_activate_qpair = nvme_tcp_poll_group_activate_qpair,
.poll_group_deactivate_qpair = nvme_tcp_poll_group_deactivate_qpair,
.poll_group_connect_qpair = nvme_tcp_poll_group_connect_qpair,
.poll_group_disconnect_qpair = nvme_tcp_poll_group_disconnect_qpair,
.poll_group_add = nvme_tcp_poll_group_add,
.poll_group_remove = nvme_tcp_poll_group_remove,
.poll_group_process_completions = nvme_tcp_poll_group_process_completions,

View File

@ -299,7 +299,7 @@ nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nv
nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
if (qpair->poll_group) {
rc = nvme_poll_group_activate_qpair(qpair);
rc = nvme_poll_group_connect_qpair(qpair);
if (rc) {
goto err;
}
@ -328,7 +328,7 @@ nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk
nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING);
assert(transport != NULL);
if (qpair->poll_group) {
nvme_poll_group_deactivate_qpair(qpair);
nvme_poll_group_disconnect_qpair(qpair);
}
transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
@ -411,8 +411,8 @@ nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
group = transport->ops.poll_group_create();
if (group) {
group->transport = transport;
STAILQ_INIT(&group->active_qpairs);
STAILQ_INIT(&group->failed_qpairs);
STAILQ_INIT(&group->connected_qpairs);
STAILQ_INIT(&group->disconnected_qpairs);
}
return group;
@ -428,8 +428,8 @@ nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
if (rc == 0) {
qpair->poll_group = tgroup;
assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED);
qpair->poll_group_tailq_head = &tgroup->failed_qpairs;
STAILQ_INSERT_TAIL(&tgroup->failed_qpairs, qpair, poll_group_stailq);
qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
}
return rc;
@ -451,10 +451,10 @@ nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
int64_t
nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb)
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
{
return tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair,
failed_qpair_cb);
disconnected_qpair_cb);
}
int
@ -464,23 +464,23 @@ nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
}
int
nvme_transport_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
{
struct spdk_nvme_transport_poll_group *tgroup;
int rc;
tgroup = qpair->poll_group;
if (qpair->poll_group_tailq_head == &tgroup->failed_qpairs) {
if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
return 0;
}
if (qpair->poll_group_tailq_head == &tgroup->active_qpairs) {
rc = tgroup->transport->ops.poll_group_deactivate_qpair(qpair);
if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair);
if (rc == 0) {
qpair->poll_group_tailq_head = &tgroup->failed_qpairs;
STAILQ_REMOVE(&tgroup->active_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
STAILQ_INSERT_TAIL(&tgroup->failed_qpairs, qpair, poll_group_stailq);
qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
}
return rc;
}
@ -489,23 +489,23 @@ nvme_transport_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
}
int
nvme_transport_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair)
nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
{
struct spdk_nvme_transport_poll_group *tgroup;
int rc;
tgroup = qpair->poll_group;
if (qpair->poll_group_tailq_head == &tgroup->active_qpairs) {
if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
return 0;
}
if (qpair->poll_group_tailq_head == &tgroup->failed_qpairs) {
rc = tgroup->transport->ops.poll_group_activate_qpair(qpair);
if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
rc = tgroup->transport->ops.poll_group_connect_qpair(qpair);
if (rc == 0) {
qpair->poll_group_tailq_head = &tgroup->active_qpairs;
STAILQ_REMOVE(&tgroup->failed_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
STAILQ_INSERT_TAIL(&tgroup->active_qpairs, qpair, poll_group_stailq);
qpair->poll_group_tailq_head = &tgroup->connected_qpairs;
STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
}
return rc;
}

View File

@ -64,7 +64,7 @@ TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
static void
unit_test_failed_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx)
unit_test_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx)
{
}
@ -82,22 +82,22 @@ nvme_get_next_transport(const struct spdk_nvme_transport *transport)
}
int
nvme_transport_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
{
struct spdk_nvme_transport_poll_group *tgroup;
struct spdk_nvme_qpair *iter_qp, *tmp_iter_qp;
tgroup = qpair->poll_group;
STAILQ_FOREACH_SAFE(iter_qp, &tgroup->active_qpairs, poll_group_stailq, tmp_iter_qp) {
STAILQ_FOREACH_SAFE(iter_qp, &tgroup->connected_qpairs, poll_group_stailq, tmp_iter_qp) {
if (qpair == iter_qp) {
STAILQ_REMOVE(&tgroup->active_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
STAILQ_INSERT_TAIL(&tgroup->failed_qpairs, qpair, poll_group_stailq);
STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
return 0;
}
}
STAILQ_FOREACH(iter_qp, &tgroup->failed_qpairs, poll_group_stailq) {
STAILQ_FOREACH(iter_qp, &tgroup->disconnected_qpairs, poll_group_stailq) {
if (qpair == iter_qp) {
return 0;
}
@ -107,22 +107,22 @@ nvme_transport_poll_group_deactivate_qpair(struct spdk_nvme_qpair *qpair)
}
int
nvme_transport_poll_group_activate_qpair(struct spdk_nvme_qpair *qpair)
nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
{
struct spdk_nvme_transport_poll_group *tgroup;
struct spdk_nvme_qpair *iter_qp, *tmp_iter_qp;
tgroup = qpair->poll_group;
STAILQ_FOREACH_SAFE(iter_qp, &tgroup->failed_qpairs, poll_group_stailq, tmp_iter_qp) {
STAILQ_FOREACH_SAFE(iter_qp, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_iter_qp) {
if (qpair == iter_qp) {
STAILQ_REMOVE(&tgroup->failed_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
STAILQ_INSERT_TAIL(&tgroup->active_qpairs, qpair, poll_group_stailq);
STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
return 0;
}
}
STAILQ_FOREACH(iter_qp, &tgroup->active_qpairs, poll_group_stailq) {
STAILQ_FOREACH(iter_qp, &tgroup->connected_qpairs, poll_group_stailq) {
if (qpair == iter_qp) {
return 0;
}
@ -140,8 +140,8 @@ nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
group = calloc(1, sizeof(*group));
if (group) {
group->transport = transport;
STAILQ_INIT(&group->active_qpairs);
STAILQ_INIT(&group->failed_qpairs);
STAILQ_INIT(&group->connected_qpairs);
STAILQ_INIT(&group->disconnected_qpairs);
}
return group;
@ -157,7 +157,7 @@ int
nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
struct spdk_nvme_qpair *qpair)
{
STAILQ_INSERT_TAIL(&tgroup->active_qpairs, qpair, poll_group_stailq);
STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
qpair->poll_group = tgroup;
return 0;
@ -169,16 +169,16 @@ nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
{
struct spdk_nvme_qpair *iter_qp, *tmp_iter_qp;
STAILQ_FOREACH_SAFE(iter_qp, &tgroup->active_qpairs, poll_group_stailq, tmp_iter_qp) {
STAILQ_FOREACH_SAFE(iter_qp, &tgroup->connected_qpairs, poll_group_stailq, tmp_iter_qp) {
if (qpair == iter_qp) {
STAILQ_REMOVE(&tgroup->active_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
return 0;
}
}
STAILQ_FOREACH_SAFE(iter_qp, &tgroup->failed_qpairs, poll_group_stailq, tmp_iter_qp) {
STAILQ_FOREACH_SAFE(iter_qp, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_iter_qp) {
if (qpair == iter_qp) {
STAILQ_REMOVE(&tgroup->failed_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
return 0;
}
}
@ -188,7 +188,7 @@ nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
int64_t
nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *group,
uint32_t completions_per_qpair, spdk_nvme_failed_qpair_cb failed_qpair_cb)
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
{
return g_process_completions_return_value;
}
@ -260,13 +260,13 @@ test_spdk_nvme_poll_group_add_remove(void)
if (tmp_tgroup->transport == &t1) {
tgroup = tmp_tgroup;
} else {
CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->active_qpairs));
CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->connected_qpairs));
}
i++;
}
CU_ASSERT(i == 1);
SPDK_CU_ASSERT_FATAL(tgroup != NULL);
qpair = STAILQ_FIRST(&tgroup->active_qpairs);
qpair = STAILQ_FIRST(&tgroup->connected_qpairs);
SPDK_CU_ASSERT_FATAL(qpair == &qpair1_1);
qpair = STAILQ_NEXT(qpair, poll_group_stailq);
CU_ASSERT(qpair == NULL);
@ -288,18 +288,18 @@ test_spdk_nvme_poll_group_add_remove(void)
} else if (tmp_tgroup->transport == &t2) {
tgroup_2 = tmp_tgroup;
} else {
CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->active_qpairs));
CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->connected_qpairs));
}
i++;
}
CU_ASSERT(i == 2);
SPDK_CU_ASSERT_FATAL(tgroup_1 != NULL);
qpair = STAILQ_FIRST(&tgroup_1->active_qpairs);
qpair = STAILQ_FIRST(&tgroup_1->connected_qpairs);
SPDK_CU_ASSERT_FATAL(qpair == &qpair1_1);
qpair = STAILQ_NEXT(qpair, poll_group_stailq);
CU_ASSERT(qpair == NULL);
SPDK_CU_ASSERT_FATAL(tgroup_2 != NULL);
qpair = STAILQ_FIRST(&tgroup_2->active_qpairs);
qpair = STAILQ_FIRST(&tgroup_2->connected_qpairs);
SPDK_CU_ASSERT_FATAL(qpair == &qpair2_1);
qpair = STAILQ_NEXT(qpair, poll_group_stailq);
SPDK_CU_ASSERT_FATAL(qpair == &qpair2_2);
@ -320,23 +320,23 @@ test_spdk_nvme_poll_group_add_remove(void)
} else if (tmp_tgroup->transport == &t4) {
tgroup_4 = tmp_tgroup;
} else {
CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->active_qpairs));
CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->connected_qpairs));
}
}
SPDK_CU_ASSERT_FATAL(tgroup_1 != NULL);
qpair = STAILQ_FIRST(&tgroup_1->active_qpairs);
qpair = STAILQ_FIRST(&tgroup_1->connected_qpairs);
SPDK_CU_ASSERT_FATAL(qpair == &qpair1_1);
qpair = STAILQ_NEXT(qpair, poll_group_stailq);
CU_ASSERT(qpair == NULL);
SPDK_CU_ASSERT_FATAL(tgroup_2 != NULL);
qpair = STAILQ_FIRST(&tgroup_2->active_qpairs);
qpair = STAILQ_FIRST(&tgroup_2->connected_qpairs);
SPDK_CU_ASSERT_FATAL(qpair == &qpair2_1);
qpair = STAILQ_NEXT(qpair, poll_group_stailq);
SPDK_CU_ASSERT_FATAL(qpair == &qpair2_2);
qpair = STAILQ_NEXT(qpair, poll_group_stailq);
CU_ASSERT(qpair == NULL);
SPDK_CU_ASSERT_FATAL(tgroup_4 != NULL);
qpair = STAILQ_FIRST(&tgroup_4->active_qpairs);
qpair = STAILQ_FIRST(&tgroup_4->connected_qpairs);
SPDK_CU_ASSERT_FATAL(qpair == &qpair4_1);
qpair = STAILQ_NEXT(qpair, poll_group_stailq);
SPDK_CU_ASSERT_FATAL(qpair == &qpair4_2);
@ -352,7 +352,7 @@ test_spdk_nvme_poll_group_add_remove(void)
/* Confirm the fourth transport group was created. */
i = 0;
STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
CU_ASSERT(STAILQ_EMPTY(&tgroup->active_qpairs));
CU_ASSERT(STAILQ_EMPTY(&tgroup->connected_qpairs));
STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
free(tgroup);
i++;
@ -378,7 +378,8 @@ test_spdk_nvme_poll_group_process_completions(void)
/* If we don't have any transport poll groups, we shouldn't get any completions. */
g_process_completions_return_value = 32;
CU_ASSERT(spdk_nvme_poll_group_process_completions(group, 128, unit_test_failed_qpair_cb) == 0);
CU_ASSERT(spdk_nvme_poll_group_process_completions(group, 128,
unit_test_disconnected_qpair_cb) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
@ -392,11 +393,12 @@ test_spdk_nvme_poll_group_process_completions(void)
qpair1_1.transport = &t1;
CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
qpair1_1.state = NVME_QPAIR_ENABLED;
CU_ASSERT(nvme_poll_group_activate_qpair(&qpair1_1) == 0);
CU_ASSERT(spdk_nvme_poll_group_process_completions(group, 128, unit_test_failed_qpair_cb) == 32);
CU_ASSERT(nvme_poll_group_connect_qpair(&qpair1_1) == 0);
CU_ASSERT(spdk_nvme_poll_group_process_completions(group, 128,
unit_test_disconnected_qpair_cb) == 32);
CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair1_1) == 0);
STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
CU_ASSERT(STAILQ_EMPTY(&tgroup->active_qpairs));
CU_ASSERT(STAILQ_EMPTY(&tgroup->connected_qpairs));
STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
free(tgroup);
}