nvme: add I/O qpair creation options

Add a new struct spdk_nvme_io_qpair_opts to allow the user to override
controller options on a per-I/O qpair basis.

Existing callers with qprio == 0 can be updated to:

  ... = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, NULL, 0);

Callers that need to specify a non-default qprio should be updated to:

  struct spdk_nvme_io_qpair_opts opts;
  spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
  opts.qprio = SPDK_NVME_QPRIO_...;
  ... = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));

Change-Id: I8ac3ea369535cfde759abbe75e1d974b6450a800
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-on: https://review.gerrithub.io/369676
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Daniel Verkamp 2017-07-14 16:08:05 -07:00
parent 6a0e8d7ae9
commit ce4fcbce71
21 changed files with 209 additions and 53 deletions

View File

@ -44,6 +44,20 @@ modifying `io_queue_requests` in the opts structure.
The SPDK NVMe `fio_plugin` has been updated to support multiple threads (`numjobs`).
spdk_nvme_ctrlr_alloc_io_qpair() has been modified to allow the user to override
controller-level options for each individual I/O queue pair.
Existing callers with qprio == 0 can be updated to:
~~~
... = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, NULL, 0);
~~~
Callers that need to specify a non-default qprio should be updated to:
~~~
struct spdk_nvme_io_qpair_opts opts;
spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
opts.qprio = SPDK_NVME_QPRIO_...;
... = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));
~~~
### Environment Abstraction Layer
The environment abstraction layer has been updated to include several new functions

View File

@ -403,7 +403,13 @@ drain_io(struct ns_worker_ctx *ns_ctx)
static int
init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx, enum spdk_nvme_qprio qprio)
{
ns_ctx->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_ctx->entry->nvme.ctrlr, qprio);
struct spdk_nvme_ctrlr *ctrlr = ns_ctx->entry->nvme.ctrlr;
struct spdk_nvme_io_qpair_opts opts;
spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
opts.qprio = qprio;
ns_ctx->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));
if (!ns_ctx->qpair) {
printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair failed\n");
return 1;

View File

@ -171,7 +171,7 @@ attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
SPDK_ERRLOG("Cannot allocate space for fio_qpair\n");
return;
}
fio_qpair->qpair = spdk_nvme_ctrlr_alloc_io_qpair(fio_ctrlr->ctrlr, 0);
fio_qpair->qpair = spdk_nvme_ctrlr_alloc_io_qpair(fio_ctrlr->ctrlr, NULL, 0);
fio_qpair->ns = ns;
fio_qpair->f = f;
fio_qpair->next = fio_thread->fio_qpair;

View File

@ -158,7 +158,7 @@ hello_world(void)
* qpair. This enables extremely efficient I/O processing by making all
* I/O operations completely lockless.
*/
ns_entry->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_entry->ctrlr, 0);
ns_entry->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_entry->ctrlr, NULL, 0);
if (ns_entry->qpair == NULL) {
printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair() failed\n");
return;

View File

@ -119,7 +119,7 @@ register_dev(struct spdk_nvme_ctrlr *ctrlr)
dev->size_in_ios = spdk_nvme_ns_get_size(dev->ns) / g_io_size_bytes;
dev->io_size_blocks = g_io_size_bytes / spdk_nvme_ns_get_sector_size(dev->ns);
dev->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, 0);
dev->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, NULL, 0);
if (!dev->qpair) {
fprintf(stderr, "ERROR: spdk_nvme_ctrlr_alloc_io_qpair() failed\n");
goto skip;

View File

@ -592,7 +592,7 @@ init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
* TODO: If a controller has multiple namespaces, they could all use the same queue.
* For now, give each namespace/thread combination its own queue.
*/
ns_ctx->u.nvme.qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_ctx->entry->u.nvme.ctrlr, 0);
ns_ctx->u.nvme.qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_ctx->entry->u.nvme.ctrlr, NULL, 0);
if (!ns_ctx->u.nvme.qpair) {
printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair failed\n");
return -1;

View File

@ -420,7 +420,7 @@ int main(int argc, char **argv)
foreach_dev(iter) {
struct spdk_nvme_qpair *qpair;
qpair = spdk_nvme_ctrlr_alloc_io_qpair(iter->ctrlr, 0);
qpair = spdk_nvme_ctrlr_alloc_io_qpair(iter->ctrlr, NULL, 0);
if (!qpair) {
fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair() failed\n");
rc = 1;

View File

@ -468,6 +468,52 @@ typedef void (*spdk_nvme_timeout_cb)(void *cb_arg,
void spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
uint32_t timeout_sec, spdk_nvme_timeout_cb cb_fn, void *cb_arg);
/**
* \brief NVMe I/O queue pair initialization options.
*
* These options may be passed to spdk_nvme_ctrlr_alloc_io_qpair() to configure queue pair
* options at queue creation time.
*
* The user may retrieve the default I/O queue pair creation options for a controller using
* spdk_nvme_ctrlr_get_default_io_qpair_opts().
*/
struct spdk_nvme_io_qpair_opts {
/**
* Queue priority for weighted round robin arbitration. If a different arbitration
* method is in use, pass 0.
*/
enum spdk_nvme_qprio qprio;
/**
* The queue depth of this NVMe I/O queue. Overrides spdk_nvme_ctrlr_opts::io_queue_size.
*/
uint32_t io_queue_size;
/**
* The number of requests to allocate for this NVMe I/O queue.
*
* Overrides spdk_nvme_ctrlr_opts::io_queue_requests.
*
* This should be at least as large as io_queue_size.
*
* A single I/O may allocate more than one request, since splitting may be necessary to
* conform to the device's maximum transfer size, PRP list compatibility requirements,
* or driver-assisted striping.
*/
uint32_t io_queue_requests;
};
/**
* \brief Get the default options for I/O qpair creation for a specific NVMe controller.
*
* \param ctrlr NVMe controller to retrieve the defaults from.
* \param[out] opts Will be filled with the default options for spdk_nvme_ctrlr_alloc_io_qpair().
* \param opts_size Must be set to sizeof(struct spdk_nvme_io_qpair_opts).
*/
void spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_io_qpair_opts *opts,
size_t opts_size);
/**
* \brief Allocate an I/O queue pair (submission and completion queue).
*
@ -475,11 +521,13 @@ void spdk_nvme_ctrlr_register_timeout_callback(struct spdk_nvme_ctrlr *ctrlr,
* enforced by the user).
*
* \param ctrlr NVMe controller for which to allocate the I/O queue pair.
* \param qprio Queue priority for weighted round robin arbitration. If a different arbitration
* method is in use, pass 0.
* \param opts I/O qpair creation options, or NULL to use the defaults as returned by
* spdk_nvme_ctrlr_alloc_io_qpair().
* \param opts_size Must be set to sizeof(struct spdk_nvme_io_qpair_opts), or 0 if opts is NULL.
*/
struct spdk_nvme_qpair *spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
enum spdk_nvme_qprio qprio);
const struct spdk_nvme_io_qpair_opts *opts,
size_t opts_size);
/**
* \brief Free an I/O queue pair that was allocated by spdk_nvme_ctrlr_alloc_io_qpair().

View File

@ -275,7 +275,7 @@ _bdev_nvme_reset_create_qpair(void *io_device, struct spdk_io_channel *ch,
struct spdk_nvme_ctrlr *ctrlr = io_device;
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
nvme_ch->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, 0);
nvme_ch->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, NULL, 0);
assert(nvme_ch->qpair != NULL); /* Currently, no good way to handle this error */
}
@ -453,7 +453,7 @@ bdev_nvme_create_cb(void *io_device, void *ctx_buf)
ch->collect_spin_stat = false;
#endif
ch->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, 0);
ch->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, NULL, 0);
if (ch->qpair == NULL) {
return -1;

View File

@ -142,13 +142,60 @@ nvme_ctrlr_proc_remove_io_qpair(struct spdk_nvme_qpair *qpair)
}
}
void
spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_io_qpair_opts *opts,
size_t opts_size)
{
if (!ctrlr || !opts) {
return;
}
memset(opts, 0, opts_size);
#define FIELD_OK(field) \
offsetof(struct spdk_nvme_io_qpair_opts, field) + sizeof(opts->field) <= opts_size
if (FIELD_OK(qprio)) {
opts->qprio = SPDK_NVME_QPRIO_URGENT;
}
if (FIELD_OK(io_queue_size)) {
opts->io_queue_size = ctrlr->opts.io_queue_size;
}
if (FIELD_OK(io_queue_requests)) {
opts->io_queue_requests = ctrlr->opts.io_queue_requests;
}
#undef FIELD_OK
}
struct spdk_nvme_qpair *
spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
enum spdk_nvme_qprio qprio)
const struct spdk_nvme_io_qpair_opts *user_opts,
size_t opts_size)
{
uint32_t qid;
struct spdk_nvme_qpair *qpair;
union spdk_nvme_cc_register cc;
struct spdk_nvme_io_qpair_opts opts;
if (!ctrlr) {
return NULL;
}
/*
* Get the default options, then overwrite them with the user-provided options
* up to opts_size.
*
* This allows for extensions of the opts structure without breaking
* ABI compatibility.
*/
spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
if (user_opts) {
memcpy(&opts, user_opts, spdk_min(sizeof(opts), opts_size));
}
if (nvme_ctrlr_get_cc(ctrlr, &cc)) {
SPDK_ERRLOG("get_cc failed\n");
@ -156,7 +203,7 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
}
/* Only the low 2 bits (values 0, 1, 2, 3) of QPRIO are valid. */
if ((qprio & 3) != qprio) {
if ((opts.qprio & 3) != opts.qprio) {
return NULL;
}
@ -164,7 +211,7 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
* Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
* default round robin arbitration method.
*/
if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (qprio != SPDK_NVME_QPRIO_URGENT)) {
if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (opts.qprio != SPDK_NVME_QPRIO_URGENT)) {
SPDK_ERRLOG("invalid queue priority for default round robin arbitration method\n");
return NULL;
}
@ -181,7 +228,7 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
return NULL;
}
qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, qprio);
qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, &opts);
if (qpair == NULL) {
SPDK_ERRLOG("transport->ctrlr_create_io_qpair() failed\n");
nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);

View File

@ -595,7 +595,7 @@ void nvme_qpair_print_completion(struct spdk_nvme_qpair *qpair, struct spdk_nvme
int nvme_ ## name ## _ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value); \
uint32_t nvme_ ## name ## _ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr); \
uint32_t nvme_ ## name ## _ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr); \
struct spdk_nvme_qpair *nvme_ ## name ## _ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, enum spdk_nvme_qprio qprio); \
struct spdk_nvme_qpair *nvme_ ## name ## _ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, const struct spdk_nvme_io_qpair_opts *opts); \
int nvme_ ## name ## _ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _qpair_enable(struct spdk_nvme_qpair *qpair); \

View File

@ -1403,7 +1403,7 @@ _nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
struct spdk_nvme_qpair *
nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
enum spdk_nvme_qprio qprio)
const struct spdk_nvme_io_qpair_opts *opts)
{
struct nvme_pcie_qpair *pqpair;
struct spdk_nvme_qpair *qpair;
@ -1416,11 +1416,11 @@ nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
return NULL;
}
pqpair->num_entries = ctrlr->opts.io_queue_size;
pqpair->num_entries = opts->io_queue_size;
qpair = &pqpair->qpair;
rc = nvme_qpair_init(qpair, qid, ctrlr, qprio, ctrlr->opts.io_queue_requests);
rc = nvme_qpair_init(qpair, qid, ctrlr, opts->qprio, opts->io_queue_requests);
if (rc != 0) {
nvme_pcie_qpair_destroy(qpair);
return NULL;

View File

@ -1044,10 +1044,10 @@ nvme_rdma_qpair_destroy(struct spdk_nvme_qpair *qpair)
struct spdk_nvme_qpair *
nvme_rdma_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
enum spdk_nvme_qprio qprio)
const struct spdk_nvme_io_qpair_opts *opts)
{
return nvme_rdma_ctrlr_create_qpair(ctrlr, qid, ctrlr->opts.io_queue_size, qprio,
ctrlr->opts.io_queue_requests);
return nvme_rdma_ctrlr_create_qpair(ctrlr, qid, opts->io_queue_size, opts->qprio,
opts->io_queue_requests);
}
int

View File

@ -147,9 +147,9 @@ nvme_transport_ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr)
struct spdk_nvme_qpair *
nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
enum spdk_nvme_qprio qprio)
const struct spdk_nvme_io_qpair_opts *opts)
{
NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_create_io_qpair, (ctrlr, qid, qprio));
NVME_TRANSPORT_CALL(ctrlr->trid.trtype, ctrlr_create_io_qpair, (ctrlr, qid, opts));
}
int

View File

@ -303,7 +303,8 @@ nvmf_direct_ctrlr_complete_aer(void *arg, const struct spdk_nvme_cpl *cpl)
static int
nvmf_direct_ctrlr_attach(struct spdk_nvmf_subsystem *subsystem)
{
subsystem->dev.direct.io_qpair = spdk_nvme_ctrlr_alloc_io_qpair(subsystem->dev.direct.ctrlr, 0);
subsystem->dev.direct.io_qpair = spdk_nvme_ctrlr_alloc_io_qpair(subsystem->dev.direct.ctrlr, NULL,
0);
if (subsystem->dev.direct.io_qpair == NULL) {
SPDK_ERRLOG("spdk_nvme_ctrlr_alloc_io_qpair() failed\n");
return -1;

View File

@ -514,7 +514,7 @@ write_read_e2e_dp_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, con
return 0;
}
qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, 0);
qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0);
if (!qpair) {
free_req(req);
return -1;

View File

@ -409,7 +409,7 @@ init_ns_worker_ctx(void)
* TODO: If a controller has multiple namespaces, they could all use the same queue.
* For now, give each namespace/thread combination its own queue.
*/
g_ns->u.nvme.qpair = spdk_nvme_ctrlr_alloc_io_qpair(g_ns->u.nvme.ctrlr, 0);
g_ns->u.nvme.qpair = spdk_nvme_ctrlr_alloc_io_qpair(g_ns->u.nvme.ctrlr, NULL, 0);
if (!g_ns->u.nvme.qpair) {
printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair failed\n");
return -1;

View File

@ -279,7 +279,7 @@ work_fn(void *arg)
/* Submit initial I/O for each namespace. */
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
ns_ctx->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_ctx->entry->ctrlr, 0);
ns_ctx->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_ctx->entry->ctrlr, NULL, 0);
if (ns_ctx->qpair == NULL) {
fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair() failed on core %u\n", worker->lcore);
return -1;

View File

@ -358,7 +358,7 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, const ch
return 0;
}
qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, 0);
qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0);
if (!qpair) {
free_req(req);
return -1;

View File

@ -123,7 +123,7 @@ nvme_transport_ctrlr_get_max_io_queue_size(struct spdk_nvme_ctrlr *ctrlr)
struct spdk_nvme_qpair *
nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
enum spdk_nvme_qprio qprio)
const struct spdk_nvme_io_qpair_opts *opts)
{
struct spdk_nvme_qpair *qpair;
@ -132,7 +132,7 @@ nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid
qpair->ctrlr = ctrlr;
qpair->id = qid;
qpair->qprio = qprio;
qpair->qprio = opts->qprio;
return qpair;
}
@ -1115,6 +1115,7 @@ cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
static void
test_alloc_io_qpair_rr_1(void)
{
struct spdk_nvme_io_qpair_opts opts;
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_qpair *q0;
@ -1126,32 +1127,40 @@ test_alloc_io_qpair_rr_1(void)
*/
g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
SPDK_CU_ASSERT_FATAL(q0 != NULL);
SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
/* Only 1 I/O qpair was allocated, so this should fail */
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0) == NULL);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
/*
* Now that the qpair has been returned to the free list,
* we should be able to allocate it again.
*/
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
SPDK_CU_ASSERT_FATAL(q0 != NULL);
SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
/* Only 0 qprio is acceptable for default round robin arbitration mechanism */
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
opts.qprio = 1;
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q0 == NULL);
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 2);
opts.qprio = 2;
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q0 == NULL);
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
opts.qprio = 3;
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q0 == NULL);
/* Only 0 ~ 3 qprio is acceptable */
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 4) == NULL);
opts.qprio = 4;
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
cleanup_qpairs(&ctrlr);
}
@ -1159,6 +1168,7 @@ test_alloc_io_qpair_rr_1(void)
static void
test_alloc_io_qpair_wrr_1(void)
{
struct spdk_nvme_io_qpair_opts opts;
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_qpair *q0, *q1;
@ -1170,13 +1180,18 @@ test_alloc_io_qpair_wrr_1(void)
*/
g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
/*
* Allocate 2 qpairs and free them
*/
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
opts.qprio = 0;
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q0 != NULL);
SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
opts.qprio = 1;
q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q1 != NULL);
SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
@ -1185,17 +1200,21 @@ test_alloc_io_qpair_wrr_1(void)
/*
* Allocate 2 qpairs and free them in the reverse order
*/
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 2);
opts.qprio = 2;
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q0 != NULL);
SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
opts.qprio = 3;
q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q1 != NULL);
SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
/* Only 0 ~ 3 qprio is acceptable */
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 4) == NULL);
opts.qprio = 4;
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
cleanup_qpairs(&ctrlr);
}
@ -1203,6 +1222,7 @@ test_alloc_io_qpair_wrr_1(void)
static void
test_alloc_io_qpair_wrr_2(void)
{
struct spdk_nvme_io_qpair_opts opts;
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
@ -1214,20 +1234,31 @@ test_alloc_io_qpair_wrr_2(void)
*/
g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
opts.qprio = 0;
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q0 != NULL);
SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
opts.qprio = 1;
q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q1 != NULL);
SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 2);
opts.qprio = 2;
q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q2 != NULL);
SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
opts.qprio = 3;
q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q3 != NULL);
SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
/* Only 4 I/O qpairs was allocated, so this should fail */
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0) == NULL);
opts.qprio = 0;
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
@ -1239,16 +1270,23 @@ test_alloc_io_qpair_wrr_2(void)
*
* Allocate 4 I/O qpairs and half of them with same qprio.
*/
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
opts.qprio = 1;
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q0 != NULL);
SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
opts.qprio = 1;
q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q1 != NULL);
SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
opts.qprio = 3;
q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q2 != NULL);
SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
opts.qprio = 3;
q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
SPDK_CU_ASSERT_FATAL(q3 != NULL);
SPDK_CU_ASSERT_FATAL(q3->qprio == 3);

View File

@ -139,7 +139,9 @@ spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
}
struct spdk_nvme_qpair *
spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, enum spdk_nvme_qprio qprio)
spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
const struct spdk_nvme_io_qpair_opts *opts,
size_t opts_size)
{
return NULL;
}