nvme: expose functions to manage queue identifiers

In cases where the SPDK nvme driver is being used as a validation/test
vehicle, users may need to allocate a currently unused qid that can be
used for creating queues using the raw interfaces. One example would be
testing N:1 SQ:CQ mappings which are supported by PCIe controllers but
not through the standard SPDK nvme driver APIs.

These new functions fulfill this purpose, and ensure that the allocated
qid will not be used by the SPDK driver for any future queues allocated
through the spdk_nvme_ctrlr_alloc_io_qpair API.

Signed-off-by: Jacek Kalwas <jacek.kalwas@intel.com>
Change-Id: I21c33596ec415c2816728a600972b242da9d971b
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3896
Community-CI: Broadcom CI
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Jacek Kalwas 2020-08-22 08:34:46 +02:00 committed by Tomasz Zawadzki
parent 848e9e2dd9
commit 4e06de69b8
3 changed files with 59 additions and 12 deletions

View File

@ -1993,6 +1993,27 @@ void spdk_nvme_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
const struct spdk_nvme_transport_id *spdk_nvme_ctrlr_get_transport_id( const struct spdk_nvme_transport_id *spdk_nvme_ctrlr_get_transport_id(
struct spdk_nvme_ctrlr *ctrlr); struct spdk_nvme_ctrlr *ctrlr);
/**
* \brief Alloc NVMe I/O queue identifier.
*
* This function is only needed for the non-standard case of allocating queues using the raw
* command interface. In most cases \ref spdk_nvme_ctrlr_alloc_io_qpair should be sufficient.
*
* \param ctrlr Opaque handle to NVMe controller.
* \return qid on success, -1 on failure.
*/
int32_t spdk_nvme_ctrlr_alloc_qid(struct spdk_nvme_ctrlr *ctrlr);
/**
* \brief Free NVMe I/O queue identifier.
*
* This function must only be called with qids previously allocated with \ref spdk_nvme_ctrlr_alloc_qid.
*
* \param ctrlr Opaque handle to NVMe controller.
* \param qid NVMe Queue Identifier.
*/
void spdk_nvme_ctrlr_free_qid(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid);
/** /**
* Opaque handle for a poll group. A poll group is a collection of spdk_nvme_qpair * Opaque handle for a poll group. A poll group is a collection of spdk_nvme_qpair
* objects that are polled for completions as a unit. * objects that are polled for completions as a unit.

View File

@ -323,7 +323,7 @@ static struct spdk_nvme_qpair *
nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
const struct spdk_nvme_io_qpair_opts *opts) const struct spdk_nvme_io_qpair_opts *opts)
{ {
uint32_t qid; int32_t qid;
struct spdk_nvme_qpair *qpair; struct spdk_nvme_qpair *qpair;
union spdk_nvme_cc_register cc; union spdk_nvme_cc_register cc;
@ -353,12 +353,8 @@ nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
return NULL; return NULL;
} }
/* qid = spdk_nvme_ctrlr_alloc_qid(ctrlr);
* Get the first available I/O queue ID. if (qid < 0) {
*/
qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
if (qid > ctrlr->opts.num_io_queues) {
SPDK_ERRLOG("No free I/O queue IDs\n");
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
return NULL; return NULL;
} }
@ -366,11 +362,11 @@ nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, opts); qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, opts);
if (qpair == NULL) { if (qpair == NULL) {
SPDK_ERRLOG("nvme_transport_ctrlr_create_io_qpair() failed\n"); SPDK_ERRLOG("nvme_transport_ctrlr_create_io_qpair() failed\n");
spdk_nvme_ctrlr_free_qid(ctrlr, qid);
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
return NULL; return NULL;
} }
spdk_bit_array_clear(ctrlr->free_io_qids, qid);
TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
nvme_ctrlr_proc_add_io_qpair(qpair); nvme_ctrlr_proc_add_io_qpair(qpair);
@ -584,7 +580,7 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
nvme_ctrlr_proc_remove_io_qpair(qpair); nvme_ctrlr_proc_remove_io_qpair(qpair);
TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq); TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
spdk_bit_array_set(ctrlr->free_io_qids, qpair->id); spdk_nvme_ctrlr_free_qid(ctrlr, qpair->id);
if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) { if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) {
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
@ -1811,11 +1807,11 @@ nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
return; return;
} }
/* Initialize list of free I/O queue IDs. QID 0 is the admin queue. */ /* Initialize list of free I/O queue IDs. QID 0 is the admin queue (implicitly allocated). */
spdk_bit_array_clear(ctrlr->free_io_qids, 0);
for (i = 1; i <= ctrlr->opts.num_io_queues; i++) { for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
spdk_bit_array_set(ctrlr->free_io_qids, i); spdk_nvme_ctrlr_free_qid(ctrlr, i);
} }
nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONSTRUCT_NS, nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONSTRUCT_NS,
ctrlr->opts.admin_timeout_ms); ctrlr->opts.admin_timeout_ms);
} }
@ -3582,6 +3578,34 @@ spdk_nvme_ctrlr_get_transport_id(struct spdk_nvme_ctrlr *ctrlr)
return &ctrlr->trid; return &ctrlr->trid;
} }
int32_t
spdk_nvme_ctrlr_alloc_qid(struct spdk_nvme_ctrlr *ctrlr)
{
uint32_t qid;
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
if (qid > ctrlr->opts.num_io_queues) {
SPDK_ERRLOG("No free I/O queue IDs\n");
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
return -1;
}
spdk_bit_array_clear(ctrlr->free_io_qids, qid);
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
return qid;
}
void
spdk_nvme_ctrlr_free_qid(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid)
{
assert(qid <= ctrlr->opts.num_io_queues);
nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
spdk_bit_array_set(ctrlr->free_io_qids, qid);
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
}
/* FIXME need to specify max number of iovs */ /* FIXME need to specify max number of iovs */
int int
spdk_nvme_map_prps(void *prv, struct spdk_nvme_cmd *cmd, struct iovec *iovs, spdk_nvme_map_prps(void *prv, struct spdk_nvme_cmd *cmd, struct iovec *iovs,

View File

@ -83,6 +83,8 @@
spdk_nvme_ctrlr_map_cmb; spdk_nvme_ctrlr_map_cmb;
spdk_nvme_ctrlr_unmap_cmb; spdk_nvme_ctrlr_unmap_cmb;
spdk_nvme_ctrlr_get_transport_id; spdk_nvme_ctrlr_get_transport_id;
spdk_nvme_ctrlr_alloc_qid;
spdk_nvme_ctrlr_free_qid;
spdk_nvme_poll_group_create; spdk_nvme_poll_group_create;
spdk_nvme_poll_group_add; spdk_nvme_poll_group_add;