nvmf: Keep a list of qpairs per poll group

Remove the list of qpairs per controller.

Change-Id: I6aaeed43c64b38b60c5a609123b3542bb7130a90
Reviewed-on: https://review.gerrithub.io/415317
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Ben Walker 2018-06-01 15:10:12 -07:00 committed by Daniel Verkamp
parent f80001e2c6
commit 65aa57b007
4 changed files with 66 additions and 57 deletions

View File

@ -77,30 +77,34 @@ ctrlr_add_qpair_and_update_rsp(struct spdk_nvmf_qpair *qpair,
struct spdk_nvmf_ctrlr *ctrlr,
struct spdk_nvmf_fabric_connect_rsp *rsp)
{
pthread_mutex_lock(&ctrlr->mtx);
/* check if we would exceed ctrlr connection limit */
if (qpair->qid >= spdk_bit_array_capacity(ctrlr->qpair_mask)) {
SPDK_ERRLOG("Requested QID %u but Max QID is %u\n",
qpair->qid, spdk_bit_array_capacity(ctrlr->qpair_mask) - 1);
rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER;
pthread_mutex_unlock(&ctrlr->mtx);
return;
}
/* check if we would exceed ctrlr connection limit */
if (spdk_bit_array_get(ctrlr->qpair_mask, qpair->qid)) {
SPDK_ERRLOG("Got I/O connect with duplicate QID %u\n", qpair->qid);
rsp->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
rsp->status.sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER;
pthread_mutex_unlock(&ctrlr->mtx);
return;
}
qpair->ctrlr = ctrlr;
spdk_bit_array_set(ctrlr->qpair_mask, qpair->qid);
TAILQ_INSERT_HEAD(&ctrlr->qpairs, qpair, link);
rsp->status.sc = SPDK_NVME_SC_SUCCESS;
rsp->status_code_specific.success.cntlid = ctrlr->cntlid;
SPDK_DEBUGLOG(SPDK_LOG_NVMF, "connect capsule response: cntlid = 0x%04x\n",
rsp->status_code_specific.success.cntlid);
pthread_mutex_unlock(&ctrlr->mtx);
}
static void
@ -162,9 +166,13 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
}
req->qpair->ctrlr = ctrlr;
TAILQ_INIT(&ctrlr->qpairs);
ctrlr->subsys = subsystem;
if (pthread_mutex_init(&ctrlr->mtx, NULL) != 0) {
SPDK_ERRLOG("Failed to initialize controller mutex\n");
free(ctrlr);
return NULL;
}
ctrlr->qpair_mask = spdk_bit_array_create(tgt->opts.max_qpairs_per_ctrlr);
if (!ctrlr->qpair_mask) {
SPDK_ERRLOG("Failed to allocate controller qpair mask\n");
@ -216,13 +224,7 @@ spdk_nvmf_ctrlr_create(struct spdk_nvmf_subsystem *subsystem,
void
spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
{
while (!TAILQ_EMPTY(&ctrlr->qpairs)) {
struct spdk_nvmf_qpair *qpair = TAILQ_FIRST(&ctrlr->qpairs);
TAILQ_REMOVE(&ctrlr->qpairs, qpair, link);
spdk_bit_array_clear(ctrlr->qpair_mask, qpair->qid);
spdk_nvmf_transport_qpair_fini(qpair);
}
/* TODO: Verify that qpair mask has been cleared. */
spdk_bit_array_free(&ctrlr->qpair_mask);
@ -864,12 +866,17 @@ spdk_nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req)
{
struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
uint32_t count;
SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Set Features - Number of Queues, cdw11 0x%x\n",
req->cmd->nvme_cmd.cdw11);
pthread_mutex_lock(&ctrlr->mtx);
count = spdk_bit_array_count_set(ctrlr->qpair_mask);
pthread_mutex_unlock(&ctrlr->mtx);
/* verify that the contoller is ready to process commands */
if (spdk_bit_array_count_set(ctrlr->qpair_mask) > 1) {
if (count > 1) {
SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Queue pairs already active!\n");
rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
} else {

View File

@ -95,6 +95,7 @@ spdk_nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
uint32_t sid;
TAILQ_INIT(&group->tgroups);
TAILQ_INIT(&group->qpairs);
TAILQ_FOREACH(transport, &tgt->transports, link) {
spdk_nvmf_poll_group_add_transport(group, transport);
@ -127,12 +128,17 @@ static void
spdk_nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
{
struct spdk_nvmf_poll_group *group = ctx_buf;
struct spdk_nvmf_qpair *qpair, *qptmp;
struct spdk_nvmf_transport_poll_group *tgroup, *tmp;
struct spdk_nvmf_subsystem_poll_group *sgroup;
uint32_t sid, nsid;
spdk_poller_unregister(&group->poller);
TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, qptmp) {
spdk_nvmf_qpair_disconnect(qpair);
}
TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
TAILQ_REMOVE(&group->tgroups, tgroup, link);
spdk_nvmf_transport_poll_group_destroy(tgroup);
@ -555,6 +561,8 @@ spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
qpair->group = group;
qpair->state = SPDK_NVMF_QPAIR_ACTIVATING;
TAILQ_INSERT_TAIL(&group->qpairs, qpair, link);
TAILQ_FOREACH(tgroup, &group->tgroups, link) {
if (tgroup->transport == qpair->transport) {
rc = spdk_nvmf_transport_poll_group_add(tgroup, qpair);
@ -578,6 +586,8 @@ spdk_nvmf_poll_group_remove(struct spdk_nvmf_poll_group *group,
int rc = -1;
struct spdk_nvmf_transport_poll_group *tgroup;
TAILQ_REMOVE(&group->qpairs, qpair, link);
qpair->group = NULL;
TAILQ_FOREACH(tgroup, &group->tgroups, link) {
@ -595,78 +605,64 @@ _spdk_nvmf_ctrlr_free(void *ctx)
{
struct spdk_nvmf_ctrlr *ctrlr = ctx;
spdk_nvmf_subsystem_remove_ctrlr(ctrlr->subsys, ctrlr);
free(ctrlr);
spdk_nvmf_ctrlr_destruct(ctrlr);
}
static void
_spdk_nvmf_qpair_destroy(void *ctx)
_spdk_nvmf_qpair_destroy(void *ctx, int status)
{
struct spdk_nvmf_qpair *qpair = ctx;
struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
uint16_t qid = qpair->qid;
uint32_t count;
spdk_nvmf_poll_group_remove(qpair->group, qpair);
assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING);
qpair->state = SPDK_NVMF_QPAIR_INACTIVE;
spdk_nvmf_transport_qpair_fini(qpair);
}
static void
_spdk_nvmf_ctrlr_remove_qpair(void *ctx)
{
struct spdk_nvmf_qpair *qpair = ctx;
struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
if (!ctrlr) {
return;
}
assert(ctrlr != NULL);
pthread_mutex_lock(&ctrlr->mtx);
spdk_bit_array_clear(ctrlr->qpair_mask, qid);
count = spdk_bit_array_count_set(ctrlr->qpair_mask);
pthread_mutex_unlock(&ctrlr->mtx);
TAILQ_REMOVE(&ctrlr->qpairs, qpair, link);
spdk_bit_array_clear(ctrlr->qpair_mask, qpair->qid);
/* Send a message to the thread that owns the qpair and destroy it. */
qpair->ctrlr = NULL;
spdk_thread_send_msg(qpair->group->thread, _spdk_nvmf_qpair_destroy, qpair);
if (spdk_bit_array_count_set(ctrlr->qpair_mask) == 0) {
if (count == 0) {
/* If this was the last queue pair on the controller, also send a message
* to the subsystem to remove the controller. */
spdk_thread_send_msg(ctrlr->subsys->thread, _spdk_nvmf_ctrlr_free, ctrlr);
}
}
static void
_spdk_nvmf_qpair_quiesced(void *cb_arg, int status)
{
struct spdk_nvmf_qpair *qpair = cb_arg;
/* Send a message to the controller thread to remove the qpair from its internal
* list. */
spdk_thread_send_msg(qpair->ctrlr->admin_qpair->group->thread, _spdk_nvmf_ctrlr_remove_qpair,
qpair);
}
static void
_spdk_nvmf_qpair_deactivate(void *ctx)
{
struct spdk_nvmf_qpair *qpair = ctx;
assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE);
qpair->state = SPDK_NVMF_QPAIR_DEACTIVATING;
if (qpair->ctrlr == NULL) {
/* This qpair was never added to a controller. Skip a step
* and destroy it immediately. */
_spdk_nvmf_qpair_destroy(qpair);
if (qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING ||
qpair->state == SPDK_NVMF_QPAIR_INACTIVE) {
/* This can occur if the connection is killed by the target,
* which results in a notification that the connection
* died. */
return;
}
assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE);
qpair->state = SPDK_NVMF_QPAIR_DEACTIVATING;
/* Check for outstanding I/O */
if (!TAILQ_EMPTY(&qpair->outstanding)) {
qpair->state_cb = _spdk_nvmf_qpair_quiesced;
qpair->state_cb = _spdk_nvmf_qpair_destroy;
qpair->state_cb_arg = qpair;
return;
}
_spdk_nvmf_qpair_quiesced(qpair, 0);
_spdk_nvmf_qpair_destroy(qpair, 0);
}
void
@ -823,9 +819,16 @@ int
spdk_nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
struct spdk_nvmf_subsystem *subsystem)
{
struct spdk_nvmf_qpair *qpair, *tmp;
struct spdk_nvmf_subsystem_poll_group *sgroup;
uint32_t nsid;
TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp) {
if (qpair->ctrlr->subsys == subsystem) {
spdk_nvmf_qpair_disconnect(qpair);
}
}
sgroup = &group->sgroups[subsystem->id];
sgroup->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;

View File

@ -115,6 +115,9 @@ struct spdk_nvmf_poll_group {
/* Array of poll groups indexed by subsystem id (sid) */
struct spdk_nvmf_subsystem_poll_group *sgroups;
uint32_t num_sgroups;
/* All of the queue pairs that belong to this poll group */
TAILQ_HEAD(, spdk_nvmf_qpair) qpairs;
};
typedef enum _spdk_nvmf_request_exec_status {
@ -205,8 +208,9 @@ struct spdk_nvmf_ctrlr {
struct spdk_nvmf_qpair *admin_qpair;
TAILQ_HEAD(, spdk_nvmf_qpair) qpairs;
struct spdk_bit_array *qpair_mask;
/* Mutex to protect the qpair mask */
pthread_mutex_t mtx;
struct spdk_bit_array *qpair_mask;
struct spdk_nvmf_request *aer_req;
union spdk_nvme_async_event_completion notice_event;

View File

@ -277,7 +277,6 @@ test_connect(void)
group.thread = thread;
memset(&ctrlr, 0, sizeof(ctrlr));
TAILQ_INIT(&ctrlr.qpairs);
ctrlr.subsys = &subsystem;
ctrlr.qpair_mask = spdk_bit_array_create(3);
SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
@ -286,7 +285,6 @@ test_connect(void)
ctrlr.vcprop.cc.bits.iocqes = 4;
memset(&admin_qpair, 0, sizeof(admin_qpair));
TAILQ_INSERT_TAIL(&ctrlr.qpairs, &admin_qpair, link);
admin_qpair.group = &group;
memset(&tgt, 0, sizeof(tgt));
@ -459,7 +457,6 @@ test_connect(void)
CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
CU_ASSERT(qpair.ctrlr == &ctrlr);
qpair.ctrlr = NULL;
TAILQ_INIT(&ctrlr.qpairs);
/* Non-existent controller */
memset(&rsp, 0, sizeof(rsp));
@ -540,7 +537,6 @@ test_connect(void)
memset(&qpair2, 0, sizeof(qpair2));
qpair2.group = &group;
qpair2.qid = 1;
TAILQ_INSERT_TAIL(&ctrlr.qpairs, &qpair, link);
spdk_bit_array_set(ctrlr.qpair_mask, 1);
cmd.connect_cmd.qid = 1;
rc = spdk_nvmf_ctrlr_connect(&req);
@ -548,7 +544,6 @@ test_connect(void)
CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
CU_ASSERT(qpair.ctrlr == NULL);
TAILQ_INIT(&ctrlr.qpairs);
/* Clean up globals */
MOCK_SET(spdk_nvmf_tgt_find_subsystem, struct spdk_nvmf_subsystem *, NULL);