nvme: make I/O queue allocation explicit

The previous method for registering I/O queues did not allow the user
to specify queue priority for weighted round robin arbitration, and it
limited the application to one queue per controller per thread.

Change the API to require explicit allocation of each queue for each
controller using the new function spdk_nvme_ctrlr_alloc_io_qpair().

Each function that submits a command on an I/O queue now takes an
explicit qpair parameter rather than implicitly using the thread-local
queue.

This also allows the application to allocate different numbers of
threads per controller; previously, the number of queues was capped at
the smallest value supported by any attached controller.

Weighted round robin arbitration is not supported yet; additional
changes to the controller startup process are required to enable
alternate arbitration methods.

Change-Id: Ia33be1050a6953bc5a3cca9284aefcd95b01116e
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-02-29 14:19:02 -07:00
parent 9f67a07fdc
commit 3272320c73
19 changed files with 646 additions and 469 deletions

View File

@ -13,6 +13,13 @@ user code.
moves device detection into the NVMe library. The new API also allows
parallel initialization of NVMe controllers, providing a major reduction in
startup time when using multiple controllers.
- I/O queue allocation was changed to be explicit in the API. Each function
that generates I/O requests now takes a queue pair (`spdk_nvme_qpair *`)
argument, and I/O queues may be allocated using
`spdk_nvme_ctrlr_alloc_io_qpair()`. This allows more flexible assignment of
queue pairs than the previous model, which only allowed a single queue
per thread and limited the total number of I/O queues to the lowest number
supported on any attached controller.
- Added support for the Write Zeroes command.
- `examples/nvme/perf` can now report I/O command latency from the
the controller's viewpoint using the Intel vendor-specific read/write latency

View File

@ -46,7 +46,7 @@
- spdk_nvme_ns_cmd_write() \copybrief spdk_nvme_ns_cmd_write()
- spdk_nvme_ns_cmd_deallocate() \copybrief spdk_nvme_ns_cmd_deallocate()
- spdk_nvme_ns_cmd_flush() \copybrief spdk_nvme_ns_cmd_flush()
- spdk_nvme_ctrlr_process_io_completions() \copybrief spdk_nvme_ctrlr_process_io_completions()
- spdk_nvme_qpair_process_completions() \copybrief spdk_nvme_qpair_process_completions()
\section key_concepts Key Concepts

View File

@ -462,12 +462,6 @@ int main(int argc, char **argv)
qsort(devs, num_devs, sizeof(devs[0]), cmp_devs);
if (num_devs) {
rc = spdk_nvme_register_io_thread();
if (rc != 0)
return rc;
}
usage();
while (1) {
@ -519,8 +513,5 @@ int main(int argc, char **argv)
spdk_nvme_detach(dev->ctrlr);
}
if (num_devs)
spdk_nvme_unregister_io_thread();
return rc;
}

View File

@ -94,10 +94,19 @@ struct ns_worker_ctx {
uint64_t offset_in_ios;
bool is_draining;
union {
struct {
struct spdk_nvme_qpair *qpair;
} nvme;
#if HAVE_LIBAIO
struct io_event *events;
io_context_t ctx;
struct {
struct io_event *events;
io_context_t ctx;
} aio;
#endif
} u;
struct ns_worker_ctx *next;
};
@ -177,6 +186,7 @@ register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
entry->type = ENTRY_TYPE_NVME_NS;
entry->u.nvme.ctrlr = ctrlr;
entry->u.nvme.ns = ns;
entry->size_in_ios = spdk_nvme_ns_get_size(ns) /
g_io_size_bytes;
entry->io_size_blocks = g_io_size_bytes / spdk_nvme_ns_get_sector_size(ns);
@ -346,14 +356,14 @@ aio_check_io(struct ns_worker_ctx *ns_ctx)
timeout.tv_sec = 0;
timeout.tv_nsec = 0;
count = io_getevents(ns_ctx->ctx, 1, g_queue_depth, ns_ctx->events, &timeout);
count = io_getevents(ns_ctx->u.aio.ctx, 1, g_queue_depth, ns_ctx->u.aio.events, &timeout);
if (count < 0) {
fprintf(stderr, "io_getevents error\n");
exit(1);
}
for (i = 0; i < count; i++) {
task_complete(ns_ctx->events[i].data);
task_complete(ns_ctx->u.aio.events[i].data);
}
}
#endif /* HAVE_LIBAIO */
@ -400,23 +410,25 @@ submit_single_io(struct ns_worker_ctx *ns_ctx)
(g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) {
#if HAVE_LIBAIO
if (entry->type == ENTRY_TYPE_AIO_FILE) {
rc = aio_submit(ns_ctx->ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PREAD, task->buf,
rc = aio_submit(ns_ctx->u.aio.ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PREAD, task->buf,
g_io_size_bytes, offset_in_ios * g_io_size_bytes, task);
} else
#endif
{
rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks,
rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, ns_ctx->u.nvme.qpair, task->buf,
offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task, 0);
}
} else {
#if HAVE_LIBAIO
if (entry->type == ENTRY_TYPE_AIO_FILE) {
rc = aio_submit(ns_ctx->ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PWRITE, task->buf,
rc = aio_submit(ns_ctx->u.aio.ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PWRITE, task->buf,
g_io_size_bytes, offset_in_ios * g_io_size_bytes, task);
} else
#endif
{
rc = spdk_nvme_ns_cmd_write(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks,
rc = spdk_nvme_ns_cmd_write(entry->u.nvme.ns, ns_ctx->u.nvme.qpair, task->buf,
offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task, 0);
}
}
@ -465,7 +477,7 @@ check_io(struct ns_worker_ctx *ns_ctx)
} else
#endif
{
spdk_nvme_ctrlr_process_io_completions(ns_ctx->entry->u.nvme.ctrlr, g_max_completions);
spdk_nvme_qpair_process_completions(ns_ctx->u.nvme.qpair, g_max_completions);
}
}
@ -486,20 +498,66 @@ drain_io(struct ns_worker_ctx *ns_ctx)
}
}
static int
init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
{
if (ns_ctx->entry->type == ENTRY_TYPE_AIO_FILE) {
#ifdef HAVE_LIBAIO
ns_ctx->u.aio.events = calloc(g_queue_depth, sizeof(struct io_event));
if (!ns_ctx->u.aio.events) {
return -1;
}
ns_ctx->u.aio.ctx = 0;
if (io_setup(g_queue_depth, &ns_ctx->u.aio.ctx) < 0) {
free(ns_ctx->u.aio.events);
perror("io_setup");
return -1;
}
#endif
} else {
/*
* TODO: If a controller has multiple namespaces, they could all use the same queue.
* For now, give each namespace/thread combination its own queue.
*/
ns_ctx->u.nvme.qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_ctx->entry->u.nvme.ctrlr, 0);
if (!ns_ctx->u.nvme.qpair) {
printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair failed\n");
return -1;
}
}
return 0;
}
static void
cleanup_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
{
if (ns_ctx->entry->type == ENTRY_TYPE_NVME_NS) {
spdk_nvme_ctrlr_free_io_qpair(ns_ctx->u.nvme.qpair);
}
}
static int
work_fn(void *arg)
{
uint64_t tsc_end = rte_get_timer_cycles() + g_time_in_sec * g_tsc_rate;
uint64_t tsc_end;
struct worker_thread *worker = (struct worker_thread *)arg;
struct ns_worker_ctx *ns_ctx = NULL;
printf("Starting thread on core %u\n", worker->lcore);
if (spdk_nvme_register_io_thread() != 0) {
fprintf(stderr, "spdk_nvme_register_io_thread() failed on core %u\n", worker->lcore);
return -1;
/* Allocate a queue pair for each namespace. */
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
if (init_ns_worker_ctx(ns_ctx) != 0) {
printf("ERROR: init_ns_worker_ctx() failed\n");
return 1;
}
ns_ctx = ns_ctx->next;
}
tsc_end = rte_get_timer_cycles() + g_time_in_sec * g_tsc_rate;
/* Submit initial I/O for each namespace. */
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
@ -527,11 +585,10 @@ work_fn(void *arg)
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
drain_io(ns_ctx);
cleanup_ns_worker_ctx(ns_ctx);
ns_ctx = ns_ctx->next;
}
spdk_nvme_unregister_io_thread();
return 0;
}
@ -928,20 +985,6 @@ associate_workers_with_ns(void)
return -1;
}
memset(ns_ctx, 0, sizeof(*ns_ctx));
#ifdef HAVE_LIBAIO
ns_ctx->events = calloc(g_queue_depth, sizeof(struct io_event));
if (!ns_ctx->events) {
free(ns_ctx);
return -1;
}
ns_ctx->ctx = 0;
if (io_setup(g_queue_depth, &ns_ctx->ctx) < 0) {
free(ns_ctx->events);
free(ns_ctx);
perror("io_setup");
return -1;
}
#endif
printf("Associating %s with lcore %d\n", entry->name, worker->lcore);
ns_ctx->entry = entry;

View File

@ -187,7 +187,8 @@ reservation_ns_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
}
static int
reservation_ns_register(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
reservation_ns_register(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
uint16_t ns_id)
{
int ret;
struct spdk_nvme_reservation_register_data *rr_data;
@ -202,7 +203,7 @@ reservation_ns_register(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
outstanding_commands = 0;
reserve_command_result = -1;
ret = spdk_nvme_ns_cmd_reservation_register(ns, rr_data, 1,
ret = spdk_nvme_ns_cmd_reservation_register(ns, qpair, rr_data, 1,
SPDK_NVME_RESERVE_REGISTER_KEY,
SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
reservation_ns_completion, NULL);
@ -214,7 +215,7 @@ reservation_ns_register(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
outstanding_commands++;
while (outstanding_commands) {
spdk_nvme_ctrlr_process_io_completions(ctrlr, 100);
spdk_nvme_qpair_process_completions(qpair, 100);
}
if (reserve_command_result)
@ -225,7 +226,7 @@ reservation_ns_register(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
}
static int
reservation_ns_report(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
reservation_ns_report(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, uint16_t ns_id)
{
int ret, i;
uint8_t *payload;
@ -239,7 +240,7 @@ reservation_ns_report(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
outstanding_commands = 0;
reserve_command_result = -1;
ret = spdk_nvme_ns_cmd_reservation_report(ns, payload, 0x1000,
ret = spdk_nvme_ns_cmd_reservation_report(ns, qpair, payload, 0x1000,
reservation_ns_completion, NULL);
if (ret) {
fprintf(stderr, "Reservation Report Failed\n");
@ -249,7 +250,7 @@ reservation_ns_report(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
outstanding_commands++;
while (outstanding_commands) {
spdk_nvme_ctrlr_process_io_completions(ctrlr, 100);
spdk_nvme_qpair_process_completions(qpair, 100);
}
if (reserve_command_result) {
@ -277,7 +278,7 @@ reservation_ns_report(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
}
static int
reservation_ns_acquire(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
reservation_ns_acquire(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, uint16_t ns_id)
{
int ret;
struct spdk_nvme_reservation_acquire_data *cdata;
@ -290,7 +291,7 @@ reservation_ns_acquire(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
outstanding_commands = 0;
reserve_command_result = -1;
ret = spdk_nvme_ns_cmd_reservation_acquire(ns, cdata,
ret = spdk_nvme_ns_cmd_reservation_acquire(ns, qpair, cdata,
0,
SPDK_NVME_RESERVE_ACQUIRE,
SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
@ -303,7 +304,7 @@ reservation_ns_acquire(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
outstanding_commands++;
while (outstanding_commands) {
spdk_nvme_ctrlr_process_io_completions(ctrlr, 100);
spdk_nvme_qpair_process_completions(qpair, 100);
}
if (reserve_command_result)
@ -314,7 +315,7 @@ reservation_ns_acquire(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
}
static int
reservation_ns_release(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
reservation_ns_release(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, uint16_t ns_id)
{
int ret;
struct spdk_nvme_reservation_key_data *cdata;
@ -327,7 +328,7 @@ reservation_ns_release(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
outstanding_commands = 0;
reserve_command_result = -1;
ret = spdk_nvme_ns_cmd_reservation_release(ns, cdata,
ret = spdk_nvme_ns_cmd_reservation_release(ns, qpair, cdata,
0,
SPDK_NVME_RESERVE_RELEASE,
SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
@ -340,7 +341,7 @@ reservation_ns_release(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
outstanding_commands++;
while (outstanding_commands) {
spdk_nvme_ctrlr_process_io_completions(ctrlr, 100);
spdk_nvme_qpair_process_completions(qpair, 100);
}
if (reserve_command_result)
@ -351,7 +352,8 @@ reservation_ns_release(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
}
static void
reserve_controller(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_device *pci_dev)
reserve_controller(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
struct spdk_pci_device *pci_dev)
{
const struct spdk_nvme_ctrlr_data *cdata;
@ -373,10 +375,10 @@ reserve_controller(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_device *pci_de
get_host_identifier(ctrlr);
/* tested 1 namespace */
reservation_ns_register(ctrlr, 1);
reservation_ns_acquire(ctrlr, 1);
reservation_ns_report(ctrlr, 1);
reservation_ns_release(ctrlr, 1);
reservation_ns_register(ctrlr, qpair, 1);
reservation_ns_acquire(ctrlr, qpair, 1);
reservation_ns_report(ctrlr, qpair, 1);
reservation_ns_release(ctrlr, qpair, 1);
}
static bool
@ -441,14 +443,18 @@ int main(int argc, char **argv)
return 1;
}
if (num_devs) {
rc = spdk_nvme_register_io_thread();
if (rc != 0)
return rc;
}
rc = 0;
foreach_dev(iter) {
reserve_controller(iter->ctrlr, iter->pci_dev);
struct spdk_nvme_qpair *qpair;
qpair = spdk_nvme_ctrlr_alloc_io_qpair(iter->ctrlr, 0);
if (!qpair) {
fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair() failed\n");
rc = 1;
} else {
reserve_controller(iter->ctrlr, qpair, iter->pci_dev);
}
}
printf("Cleaning up...\n");
@ -458,8 +464,5 @@ int main(int argc, char **argv)
spdk_nvme_detach(dev->ctrlr);
}
if (num_devs)
spdk_nvme_unregister_io_thread();
return rc;
}

View File

@ -172,6 +172,31 @@ void spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
spdk_nvme_aer_cb aer_cb_fn,
void *aer_cb_arg);
/**
* \brief Opaque handle to a queue pair.
*
* I/O queue pairs may be allocated using spdk_nvme_ctrlr_alloc_io_qpair().
*/
struct spdk_nvme_qpair;
/**
* \brief Allocate an I/O queue pair (submission and completion queue).
*
* Each queue pair should only be used from a single thread at a time (mutual exclusion must be
* enforced by the user).
*
* \param ctrlr NVMe controller for which to allocate the I/O queue pair.
* \param qprio Queue priority for weighted round robin arbitration. If a different arbitration
* method is in use, pass 0.
*/
struct spdk_nvme_qpair *spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
enum spdk_nvme_qprio qprio);
/**
* \brief Free an I/O queue pair that was allocated by spdk_nvme_ctrlr_alloc_io_qpair().
*/
int spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair);
/**
* \brief Send the given NVM I/O command to the NVMe controller.
*
@ -182,33 +207,34 @@ void spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
* When constructing the nvme_command it is not necessary to fill out the PRP
* list/SGL or the CID. The driver will handle both of those for you.
*
* This function is thread safe and can be called at any point after
* spdk_nvme_register_io_thread().
*
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*/
int spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_qpair *qpair,
struct spdk_nvme_cmd *cmd,
void *buf, uint32_t len,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Process any outstanding completions for I/O submitted on the current thread.
* \brief Process any outstanding completions for I/O submitted on a queue pair.
*
* This will only process completions for I/O that were submitted on the same thread
* that this function is called from. This call is also non-blocking, i.e. it only
* This call is non-blocking, i.e. it only
* processes completions that are ready at the time of this function call. It does not
* wait for outstanding commands to finish.
*
* \param qpair Queue pair to check for completions.
* \param max_completions Limit the number of completions to be processed in one call, or 0
* for unlimited.
*
* \return Number of completions processed (may be 0) or negative on error.
*
* This function is thread safe and can be called at any point while the controller is attached to
* This function may be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* The caller must ensure that each queue pair is only used from one thread at a time.
*/
int32_t spdk_nvme_ctrlr_process_io_completions(struct spdk_nvme_ctrlr *ctrlr,
int32_t spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
uint32_t max_completions);
/**
@ -494,6 +520,7 @@ typedef int (*spdk_nvme_req_next_sge_cb)(void *cb_arg, uint64_t *address, uint32
* \brief Submits a write I/O to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the write I/O
* \param qpair I/O queue pair to submit the request
* \param payload virtual address pointer to the data payload
* \param lba starting LBA to write the data
* \param lba_count length (in sectors) for the write operation
@ -505,10 +532,10 @@ typedef int (*spdk_nvme_req_next_sge_cb)(void *cb_arg, uint64_t *address, uint32
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* spdk_nvme_register_io_thread().
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*/
int spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, void *payload,
int spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *payload,
uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
void *cb_arg, uint32_t io_flags);
@ -516,6 +543,7 @@ int spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, void *payload,
* \brief Submits a write I/O to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the write I/O
* \param qpair I/O queue pair to submit the request
* \param lba starting LBA to write the data
* \param lba_count length (in sectors) for the write operation
* \param cb_fn callback function to invoke when the I/O is completed
@ -528,10 +556,11 @@ int spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, void *payload,
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* spdk_nvme_register_io_thread().
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*/
int spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_count,
int spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
uint64_t lba, uint32_t lba_count,
spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
spdk_nvme_req_next_sge_cb next_sge_fn);
@ -540,6 +569,7 @@ int spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_
* \brief Submits a write zeroes I/O to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the write zeroes I/O
* \param qpair I/O queue pair to submit the request
* \param lba starting LBA for this command
* \param lba_count length (in sectors) for the write zero operation
* \param cb_fn callback function to invoke when the I/O is completed
@ -550,17 +580,19 @@ int spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* spdk_nvme_register_io_thread().
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*/
int spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, uint64_t lba,
uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
int spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
uint64_t lba, uint32_t lba_count,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags);
/**
* \brief Submits a read I/O to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the read I/O
* \param qpair I/O queue pair to submit the request
* \param payload virtual address pointer to the data payload
* \param lba starting LBA to read the data
* \param lba_count length (in sectors) for the read operation
@ -571,10 +603,10 @@ int spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, uint64_t lba,
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* spdk_nvme_register_io_thread().
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*/
int spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, void *payload,
int spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *payload,
uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
void *cb_arg, uint32_t io_flags);
@ -582,6 +614,7 @@ int spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, void *payload,
* \brief Submits a read I/O to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the read I/O
* \param qpair I/O queue pair to submit the request
* \param lba starting LBA to read the data
* \param lba_count length (in sectors) for the read operation
* \param cb_fn callback function to invoke when the I/O is completed
@ -594,10 +627,11 @@ int spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, void *payload,
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* spdk_nvme_register_io_thread().
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*/
int spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_count,
int spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
uint64_t lba, uint32_t lba_count,
spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
spdk_nvme_req_next_sge_cb next_sge_fn);
@ -606,6 +640,7 @@ int spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_c
* \brief Submits a deallocation request to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the deallocation request
* \param qpair I/O queue pair to submit the request
* \param payload virtual address pointer to the list of LBA ranges to
* deallocate
* \param num_ranges number of ranges in the list pointed to by payload; must be
@ -616,32 +651,35 @@ int spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_c
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* spdk_nvme_register_io_thread().
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*/
int spdk_nvme_ns_cmd_deallocate(struct spdk_nvme_ns *ns, void *payload,
uint16_t num_ranges, spdk_nvme_cmd_cb cb_fn,
void *cb_arg);
int spdk_nvme_ns_cmd_deallocate(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
void *payload, uint16_t num_ranges,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Submits a flush request to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the flush request
* \param qpair I/O queue pair to submit the request
* \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function
*
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* spdk_nvme_register_io_thread().
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*/
int spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
int spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Submits a reservation register to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the reservation register request
* \param qpair I/O queue pair to submit the request
* \param payload virtual address pointer to the reservation register data
* \param ignore_key '1' the current reservation key check is disabled
* \param action specifies the registration action
@ -652,10 +690,11 @@ int spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, spdk_nvme_cmd_cb cb_fn, void
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* spdk_nvme_register_io_thread().
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*/
int spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
struct spdk_nvme_reservation_register_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_register_action action,
@ -666,6 +705,7 @@ int spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
* \brief Submits a reservation release to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the reservation release request
* \param qpair I/O queue pair to submit the request
* \param payload virtual address pointer to current reservation key
* \param ignore_key '1' the current reservation key check is disabled
* \param action specifies the reservation release action
@ -676,10 +716,11 @@ int spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* spdk_nvme_register_io_thread().
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*/
int spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
struct spdk_nvme_reservation_key_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_release_action action,
@ -690,6 +731,7 @@ int spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
* \brief Submits a reservation acquire to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the reservation acquire request
* \param qpair I/O queue pair to submit the request
* \param payload virtual address pointer to reservation acquire data
* \param ignore_key '1' the current reservation key check is disabled
* \param action specifies the reservation acquire action
@ -700,10 +742,11 @@ int spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* spdk_nvme_register_io_thread().
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*/
int spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
struct spdk_nvme_reservation_acquire_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_acquire_action action,
@ -714,6 +757,7 @@ int spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
* \brief Submits a reservation report to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the reservation report request
* \param qpair I/O queue pair to submit the request
* \param payload virtual address pointer for reservation status data
* \param len length bytes for reservation status data structure
* \param cb_fn callback function to invoke when the I/O is completed
@ -722,11 +766,13 @@ int spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* spdk_nvme_register_io_thread().
* The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
* The user must ensure that only one thread submits I/O on a given qpair at any given time.
*/
int spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns, void *payload,
uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
int spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
void *payload, uint32_t len,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Get the size, in bytes, of an nvme_request.
@ -739,9 +785,6 @@ int spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns, void *payload,
*/
size_t spdk_nvme_request_size(void);
int spdk_nvme_register_io_thread(void);
void spdk_nvme_unregister_io_thread(void);
#ifdef __cplusplus
}
#endif

View File

@ -39,13 +39,11 @@
struct nvme_driver g_nvme_driver = {
.lock = NVME_MUTEX_INITIALIZER,
.max_io_queues = DEFAULT_MAX_IO_QUEUES,
.init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_driver.init_ctrlrs),
.attached_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_driver.attached_ctrlrs),
};
int32_t spdk_nvme_retry_count;
__thread int nvme_thread_ioq_index = -1;
/**
@ -180,77 +178,6 @@ nvme_free_request(struct nvme_request *req)
nvme_dealloc_request(req);
}
static int
nvme_allocate_ioq_index(void)
{
struct nvme_driver *driver = &g_nvme_driver;
uint32_t i;
nvme_mutex_lock(&driver->lock);
if (driver->ioq_index_pool == NULL) {
driver->ioq_index_pool =
calloc(driver->max_io_queues, sizeof(*driver->ioq_index_pool));
if (driver->ioq_index_pool) {
for (i = 0; i < driver->max_io_queues; i++) {
driver->ioq_index_pool[i] = i;
}
} else {
nvme_mutex_unlock(&driver->lock);
return -1;
}
driver->ioq_index_pool_next = 0;
}
if (driver->ioq_index_pool_next < driver->max_io_queues) {
nvme_thread_ioq_index = driver->ioq_index_pool[driver->ioq_index_pool_next];
driver->ioq_index_pool[driver->ioq_index_pool_next] = -1;
driver->ioq_index_pool_next++;
} else {
nvme_thread_ioq_index = -1;
}
nvme_mutex_unlock(&driver->lock);
return 0;
}
static void
nvme_free_ioq_index(void)
{
struct nvme_driver *driver = &g_nvme_driver;
nvme_mutex_lock(&driver->lock);
if (nvme_thread_ioq_index >= 0) {
driver->ioq_index_pool_next--;
driver->ioq_index_pool[driver->ioq_index_pool_next] = nvme_thread_ioq_index;
nvme_thread_ioq_index = -1;
}
nvme_mutex_unlock(&driver->lock);
}
int
spdk_nvme_register_io_thread(void)
{
int rc = 0;
if (nvme_thread_ioq_index >= 0) {
nvme_printf(NULL, "thread already registered\n");
return -1;
}
rc = nvme_allocate_ioq_index();
if (rc) {
nvme_printf(NULL, "ioq_index_pool alloc failed\n");
return rc;
}
return (nvme_thread_ioq_index >= 0) ? 0 : -1;
}
void
spdk_nvme_unregister_io_thread(void)
{
nvme_free_ioq_index();
}
struct nvme_enum_ctx {
spdk_nvme_probe_cb probe_cb;
void *cb_ctx;

View File

@ -42,6 +42,152 @@
static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_async_event_request *aer);
static int
spdk_nvme_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
struct nvme_completion_poll_status status;
int rc;
status.done = false;
rc = nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, nvme_completion_poll_cb, &status);
if (rc != 0) {
return rc;
}
while (status.done == false) {
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
return -1;
}
status.done = false;
rc = nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, nvme_completion_poll_cb, &status);
if (rc != 0) {
return rc;
}
while (status.done == false) {
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
/* Attempt to delete the completion queue */
status.done = false;
rc = nvme_ctrlr_cmd_delete_io_cq(qpair->ctrlr, qpair, nvme_completion_poll_cb, &status);
if (rc != 0) {
return -1;
}
while (status.done == false) {
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
}
return -1;
}
nvme_qpair_reset(qpair);
return 0;
}
struct spdk_nvme_qpair *
spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
enum spdk_nvme_qprio qprio)
{
struct spdk_nvme_qpair *qpair;
/* Only the low 2 bits (values 0, 1, 2, 3) of QPRIO are valid. */
if ((qprio & 3) != qprio) {
return NULL;
}
nvme_mutex_lock(&ctrlr->ctrlr_lock);
/*
* Get the first available qpair structure.
*/
qpair = TAILQ_FIRST(&ctrlr->free_io_qpairs);
if (qpair == NULL) {
/* No free queue IDs */
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return NULL;
}
/*
* At this point, qpair contains a preallocated submission and completion queue and a
* unique queue ID, but it is not yet created on the controller.
*
* Fill out the submission queue priority and send out the Create I/O Queue commands.
*/
qpair->qprio = qprio;
if (spdk_nvme_ctrlr_create_qpair(ctrlr, qpair) != 0) {
/*
* spdk_nvme_ctrlr_create_qpair() failed, so the qpair structure is still unused.
* Exit here so we don't insert it into the active_io_qpairs list.
*/
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return NULL;
}
TAILQ_REMOVE(&ctrlr->free_io_qpairs, qpair, tailq);
TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return qpair;
}
int
spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
{
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_completion_poll_status status;
int rc;
if (qpair == NULL) {
return 0;
}
ctrlr = qpair->ctrlr;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
/* Delete the I/O submission queue and then the completion queue */
status.done = false;
rc = nvme_ctrlr_cmd_delete_io_sq(ctrlr, qpair, nvme_completion_poll_cb, &status);
if (rc != 0) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
while (status.done == false) {
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return -1;
}
status.done = false;
rc = nvme_ctrlr_cmd_delete_io_cq(ctrlr, qpair, nvme_completion_poll_cb, &status);
if (rc != 0) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return rc;
}
while (status.done == false) {
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return -1;
}
TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
TAILQ_INSERT_HEAD(&ctrlr->free_io_qpairs, qpair, tailq);
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
return 0;
}
static void
nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_intel_log_page_directory *log_page_directory)
@ -230,6 +376,8 @@ nvme_ctrlr_construct_io_qpairs(struct spdk_nvme_ctrlr *ctrlr)
ctrlr);
if (rc)
return -1;
TAILQ_INSERT_TAIL(&ctrlr->free_io_qpairs, qpair, tailq);
}
return 0;
@ -376,6 +524,7 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
{
int rc = 0;
uint32_t i;
struct spdk_nvme_qpair *qpair;
nvme_mutex_lock(&ctrlr->ctrlr_lock);
@ -411,6 +560,16 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
}
}
if (!ctrlr->is_failed) {
/* Reinitialize qpairs */
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
if (spdk_nvme_ctrlr_create_qpair(ctrlr, qpair) != 0) {
nvme_ctrlr_fail(ctrlr);
rc = -1;
}
}
}
ctrlr->is_resetting = false;
nvme_mutex_unlock(&ctrlr->ctrlr_lock);
@ -454,7 +613,6 @@ nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
static int
nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_driver *driver = &g_nvme_driver;
struct nvme_completion_poll_status status;
int cq_allocated, sq_allocated;
int rc;
@ -462,9 +620,7 @@ nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr)
status.done = false;
nvme_mutex_lock(&driver->lock);
max_io_queues = driver->max_io_queues;
nvme_mutex_unlock(&driver->lock);
max_io_queues = DEFAULT_MAX_IO_QUEUES;
rc = nvme_ctrlr_cmd_set_num_queues(ctrlr, max_io_queues,
nvme_completion_poll_cb, &status);
@ -490,62 +646,6 @@ nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr)
ctrlr->num_io_queues = nvme_min(sq_allocated, cq_allocated);
nvme_mutex_lock(&driver->lock);
driver->max_io_queues = nvme_min(driver->max_io_queues, ctrlr->num_io_queues);
nvme_mutex_unlock(&driver->lock);
return 0;
}
static int
nvme_ctrlr_create_qpairs(struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_completion_poll_status status;
struct spdk_nvme_qpair *qpair;
uint32_t i;
int rc;
if (nvme_ctrlr_construct_io_qpairs(ctrlr)) {
nvme_printf(ctrlr, "nvme_ctrlr_construct_io_qpairs failed!\n");
return ENXIO;
}
for (i = 0; i < ctrlr->num_io_queues; i++) {
qpair = &ctrlr->ioq[i];
status.done = false;
rc = nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair,
nvme_completion_poll_cb, &status);
if (rc != 0) {
return rc;
}
while (status.done == false) {
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
return ENXIO;
}
status.done = false;
rc = nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
nvme_completion_poll_cb, &status);
if (rc != 0) {
return rc;
}
while (status.done == false) {
spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0);
}
if (spdk_nvme_cpl_is_error(&status.cpl)) {
nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
return ENXIO;
}
nvme_qpair_reset(qpair);
}
return 0;
}
@ -823,7 +923,7 @@ nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr)
return -1;
}
if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
if (nvme_ctrlr_construct_io_qpairs(ctrlr)) {
return -1;
}
@ -910,6 +1010,9 @@ nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
ctrlr->is_failed = false;
ctrlr->flags = 0;
TAILQ_INIT(&ctrlr->free_io_qpairs);
TAILQ_INIT(&ctrlr->active_io_qpairs);
nvme_mutex_init_recursive(&ctrlr->ctrlr_lock);
return 0;
@ -944,25 +1047,6 @@ nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
nvme_qpair_submit_request(&ctrlr->adminq, req);
}
void
nvme_ctrlr_submit_io_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req)
{
struct spdk_nvme_qpair *qpair;
nvme_assert(nvme_thread_ioq_index >= 0, ("no ioq_index assigned for thread\n"));
qpair = &ctrlr->ioq[nvme_thread_ioq_index];
nvme_qpair_submit_request(qpair, req);
}
int32_t
spdk_nvme_ctrlr_process_io_completions(struct spdk_nvme_ctrlr *ctrlr, uint32_t max_completions)
{
nvme_assert(nvme_thread_ioq_index >= 0, ("no ioq_index assigned for thread\n"));
return spdk_nvme_qpair_process_completions(&ctrlr->ioq[nvme_thread_ioq_index], max_completions);
}
int32_t
spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
{

View File

@ -35,6 +35,7 @@
int
spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_qpair *qpair,
struct spdk_nvme_cmd *cmd,
void *buf, uint32_t len,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
@ -49,7 +50,7 @@ spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
memcpy(&req->cmd, cmd, sizeof(req->cmd));
nvme_ctrlr_submit_io_request(ctrlr, req);
nvme_qpair_submit_request(qpair, req);
return 0;
}
@ -189,6 +190,46 @@ nvme_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
return 0;
}
int
nvme_ctrlr_cmd_delete_io_cq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
req = nvme_allocate_request_null(cb_fn, cb_arg);
if (req == NULL) {
return ENOMEM;
}
cmd = &req->cmd;
cmd->opc = SPDK_NVME_OPC_DELETE_IO_CQ;
cmd->cdw10 = qpair->id;
nvme_ctrlr_submit_admin_request(ctrlr, req);
return 0;
}
int
nvme_ctrlr_cmd_delete_io_sq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
req = nvme_allocate_request_null(cb_fn, cb_arg);
if (req == NULL) {
return ENOMEM;
}
cmd = &req->cmd;
cmd->opc = SPDK_NVME_OPC_DELETE_IO_SQ;
cmd->cdw10 = qpair->id;
nvme_ctrlr_submit_admin_request(ctrlr, req);
return 0;
}
int
nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)

View File

@ -292,8 +292,12 @@ struct spdk_nvme_qpair {
/*
* Fields below this point should not be touched on the normal I/O happy path.
*/
struct spdk_nvme_ctrlr *ctrlr;
/* List entry for spdk_nvme_ctrlr::free_io_qpairs and active_io_qpairs */
TAILQ_ENTRY(spdk_nvme_qpair) tailq;
uint64_t cmd_bus_addr;
uint64_t cpl_bus_addr;
@ -415,15 +419,13 @@ struct spdk_nvme_ctrlr {
* Stored separately from ns since nsdata should not normally be accessed during I/O.
*/
struct spdk_nvme_ns_data *nsdata;
};
extern __thread int nvme_thread_ioq_index;
TAILQ_HEAD(, spdk_nvme_qpair) free_io_qpairs;
TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs;
};
struct nvme_driver {
nvme_mutex_t lock;
uint16_t *ioq_index_pool;
uint32_t max_io_queues;
uint16_t ioq_index_pool_next;
TAILQ_HEAD(, spdk_nvme_ctrlr) init_ctrlrs;
TAILQ_HEAD(, spdk_nvme_ctrlr) attached_ctrlrs;
};
@ -481,6 +483,10 @@ int nvme_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr,
int nvme_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_qpair *io_que,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
int nvme_ctrlr_cmd_delete_io_cq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
int nvme_ctrlr_cmd_delete_io_sq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
int nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
uint32_t num_queues, spdk_nvme_cmd_cb cb_fn,
void *cb_arg);
@ -506,8 +512,6 @@ int nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr);
void nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req);
void nvme_ctrlr_submit_io_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req);
int nvme_qpair_construct(struct spdk_nvme_qpair *qpair, uint16_t id,
uint16_t num_entries,
@ -516,8 +520,6 @@ int nvme_qpair_construct(struct spdk_nvme_qpair *qpair, uint16_t id,
void nvme_qpair_destroy(struct spdk_nvme_qpair *qpair);
void nvme_qpair_enable(struct spdk_nvme_qpair *qpair);
void nvme_qpair_disable(struct spdk_nvme_qpair *qpair);
int32_t spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair,
uint32_t max_completions);
void nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
struct nvme_request *req);
void nvme_qpair_reset(struct spdk_nvme_qpair *qpair);

View File

@ -175,7 +175,8 @@ _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, const struct nvme_payload *payload,
}
int
spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, void *buffer, uint64_t lba,
spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
uint64_t lba,
uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags)
{
@ -187,7 +188,7 @@ spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, void *buffer, uint64_t lba,
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, io_flags);
if (req != NULL) {
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
nvme_qpair_submit_request(qpair, req);
return 0;
} else {
return ENOMEM;
@ -195,7 +196,8 @@ spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, void *buffer, uint64_t lba,
}
int
spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_count,
spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
uint64_t lba, uint32_t lba_count,
spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
spdk_nvme_req_next_sge_cb next_sge_fn)
@ -213,7 +215,7 @@ spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_count
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, io_flags);
if (req != NULL) {
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
nvme_qpair_submit_request(qpair, req);
return 0;
} else {
return ENOMEM;
@ -221,7 +223,8 @@ spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_count
}
int
spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, void *buffer, uint64_t lba,
spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
void *buffer, uint64_t lba,
uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags)
{
@ -233,7 +236,7 @@ spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, void *buffer, uint64_t lba,
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, io_flags);
if (req != NULL) {
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
nvme_qpair_submit_request(qpair, req);
return 0;
} else {
return ENOMEM;
@ -241,7 +244,8 @@ spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, void *buffer, uint64_t lba,
}
int
spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_count,
spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
uint64_t lba, uint32_t lba_count,
spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
spdk_nvme_req_next_sge_cb next_sge_fn)
@ -259,7 +263,7 @@ spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_coun
req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, io_flags);
if (req != NULL) {
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
nvme_qpair_submit_request(qpair, req);
return 0;
} else {
return ENOMEM;
@ -267,8 +271,9 @@ spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_coun
}
int
spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, uint64_t lba,
uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
uint64_t lba, uint32_t lba_count,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags)
{
struct nvme_request *req;
@ -293,13 +298,13 @@ spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, uint64_t lba,
cmd->cdw12 = lba_count - 1;
cmd->cdw12 |= io_flags;
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
nvme_qpair_submit_request(qpair, req);
return 0;
}
int
spdk_nvme_ns_cmd_deallocate(struct spdk_nvme_ns *ns, void *payload,
spdk_nvme_ns_cmd_deallocate(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *payload,
uint16_t num_ranges, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
@ -324,13 +329,14 @@ spdk_nvme_ns_cmd_deallocate(struct spdk_nvme_ns *ns, void *payload,
cmd->cdw10 = num_ranges - 1;
cmd->cdw11 = SPDK_NVME_DSM_ATTR_DEALLOCATE;
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
nvme_qpair_submit_request(qpair, req);
return 0;
}
int
spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -344,13 +350,14 @@ spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, spdk_nvme_cmd_cb cb_fn, void *cb
cmd->opc = SPDK_NVME_OPC_FLUSH;
cmd->nsid = ns->id;
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
nvme_qpair_submit_request(qpair, req);
return 0;
}
int
spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
struct spdk_nvme_reservation_register_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_register_action action,
@ -378,13 +385,14 @@ spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
/* Bits 30-31 */
cmd->cdw10 |= (uint32_t)cptpl << 30;
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
nvme_qpair_submit_request(qpair, req);
return 0;
}
int
spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
struct spdk_nvme_reservation_key_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_release_action action,
@ -411,13 +419,14 @@ spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
/* Bits 8-15 */
cmd->cdw10 |= (uint32_t)type << 8;
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
nvme_qpair_submit_request(qpair, req);
return 0;
}
int
spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
struct spdk_nvme_reservation_acquire_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_acquire_action action,
@ -445,14 +454,16 @@ spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
/* Bits 8-15 */
cmd->cdw10 |= (uint32_t)type << 8;
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
nvme_qpair_submit_request(qpair, req);
return 0;
}
int
spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns, void *payload,
uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
void *payload, uint32_t len,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
uint32_t num_dwords;
struct nvme_request *req;
@ -473,7 +484,7 @@ spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns, void *payload,
cmd->cdw10 = num_dwords;
nvme_ctrlr_submit_io_request(ns->ctrlr, req);
nvme_qpair_submit_request(qpair, req);
return 0;
}

View File

@ -425,27 +425,27 @@ nvme_qpair_check_enabled(struct spdk_nvme_qpair *qpair)
*
* \section async_io I/O commands
*
* The application may submit I/O from one or more threads
* and must call nvme_ctrlr_process_io_completions()
* from each thread that submitted I/O.
* The application may submit I/O from one or more threads on one or more queue pairs
* and must call spdk_nvme_qpair_process_completions()
* for each queue pair that submitted I/O.
*
* When the application calls nvme_ctrlr_process_io_completions(),
* if the NVMe driver detects completed I/Os that were submitted on that thread,
* When the application calls spdk_nvme_qpair_process_completions(),
* if the NVMe driver detects completed I/Os that were submitted on that queue,
* it will invoke the registered callback function
* for each I/O within the context of nvme_ctrlr_process_io_completions().
* for each I/O within the context of spdk_nvme_qpair_process_completions().
*
* \section async_admin Admin commands
*
* The application may submit admin commands from one or more threads
* and must call nvme_ctrlr_process_admin_completions()
* and must call spdk_nvme_ctrlr_process_admin_completions()
* from at least one thread to receive admin command completions.
* The thread that processes admin completions need not be the same thread that submitted the
* admin commands.
*
* When the application calls nvme_ctrlr_process_admin_completions(),
* When the application calls spdk_nvme_ctrlr_process_admin_completions(),
* if the NVMe driver detects completed admin commands submitted from any thread,
* it will invote the registered callback function
* for each command within the context of nvme_ctrlr_process_admin_completions().
* for each command within the context of spdk_nvme_ctrlr_process_admin_completions().
*
* It is the application's responsibility to manage the order of submitted admin commands.
* If certain admin commands must be submitted while no other commands are outstanding,

View File

@ -64,6 +64,7 @@ struct ns_entry {
struct ns_worker_ctx {
struct ns_entry *entry;
struct spdk_nvme_qpair *qpair;
uint64_t io_completed;
uint64_t io_completed_error;
uint64_t io_submitted;
@ -195,10 +196,12 @@ submit_single_io(struct ns_worker_ctx *ns_ctx)
if ((g_rw_percentage == 100) ||
(g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) {
rc = spdk_nvme_ns_cmd_read(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks,
rc = spdk_nvme_ns_cmd_read(entry->ns, ns_ctx->qpair, task->buf,
offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task, 0);
} else {
rc = spdk_nvme_ns_cmd_write(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks,
rc = spdk_nvme_ns_cmd_write(entry->ns, ns_ctx->qpair, task->buf,
offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task, 0);
}
@ -245,7 +248,7 @@ io_complete(void *ctx, const struct spdk_nvme_cpl *completion)
static void
check_io(struct ns_worker_ctx *ns_ctx)
{
spdk_nvme_ctrlr_process_io_completions(ns_ctx->entry->ctrlr, 0);
spdk_nvme_qpair_process_completions(ns_ctx->qpair, 0);
}
static void
@ -275,14 +278,14 @@ work_fn(void *arg)
printf("Starting thread on core %u\n", worker->lcore);
if (spdk_nvme_register_io_thread() != 0) {
fprintf(stderr, "spdk_nvme_register_io_thread() failed on core %u\n", worker->lcore);
return -1;
}
/* Submit initial I/O for each namespace. */
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
ns_ctx->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_ctx->entry->ctrlr, 0);
if (ns_ctx->qpair == NULL) {
fprintf(stderr, "spdk_nvme_ctrlr_alloc_io_qpair() failed on core %u\n", worker->lcore);
return -1;
}
submit_io(ns_ctx, g_queue_depth);
ns_ctx = ns_ctx->next;
}
@ -319,11 +322,10 @@ work_fn(void *arg)
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
drain_io(ns_ctx);
spdk_nvme_ctrlr_free_io_qpair(ns_ctx->qpair);
ns_ctx = ns_ctx->next;
}
spdk_nvme_unregister_io_thread();
return 0;
}

View File

@ -290,6 +290,7 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
struct io_request *req;
struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair;
const struct spdk_nvme_ns_data *nsdata;
ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, 1);
@ -318,18 +319,25 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
return 0;
}
qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, 0);
if (!qpair) {
rte_free(req);
return -1;
}
nseg = req->nseg;
for (i = 0; i < nseg; i++) {
memset(req->iovs[i].iov_base, DATA_PATTERN, req->iovs[i].iov_len);
}
rc = spdk_nvme_ns_cmd_writev(ns, BASE_LBA_START, lba_count,
rc = spdk_nvme_ns_cmd_writev(ns, qpair, BASE_LBA_START, lba_count,
io_complete, req, 0,
nvme_request_reset_sgl,
nvme_request_next_sge);
if (rc != 0) {
fprintf(stderr, "Writev Failed\n");
spdk_nvme_ctrlr_free_io_qpair(qpair);
rte_free(req);
return -1;
}
@ -337,10 +345,11 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
io_complete_flag = 0;
while (!io_complete_flag)
spdk_nvme_ctrlr_process_io_completions(dev->ctrlr, 1);
spdk_nvme_qpair_process_completions(qpair, 1);
if (io_complete_flag != 1) {
fprintf(stderr, "%s Writev Failed\n", dev->name);
spdk_nvme_ctrlr_free_io_qpair(qpair);
rte_free(req);
return -1;
}
@ -352,22 +361,24 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
memset(req->iovs[i].iov_base, 0, req->iovs[i].iov_len);
}
rc = spdk_nvme_ns_cmd_readv(ns, BASE_LBA_START, lba_count,
rc = spdk_nvme_ns_cmd_readv(ns, qpair, BASE_LBA_START, lba_count,
io_complete, req, 0,
nvme_request_reset_sgl,
nvme_request_next_sge);
if (rc != 0) {
fprintf(stderr, "Readv Failed\n");
spdk_nvme_ctrlr_free_io_qpair(qpair);
rte_free(req);
return -1;
}
while (!io_complete_flag)
spdk_nvme_ctrlr_process_io_completions(dev->ctrlr, 1);
spdk_nvme_qpair_process_completions(qpair, 1);
if (io_complete_flag != 1) {
fprintf(stderr, "%s Readv Failed\n", dev->name);
spdk_nvme_ctrlr_free_io_qpair(qpair);
rte_free(req);
return -1;
}
@ -377,6 +388,7 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
for (j = 0; j < req->iovs[i].iov_len; j++) {
if (buf[j] != DATA_PATTERN) {
fprintf(stderr, "Write/Read Sucess, But %s Memcmp Failed\n", dev->name);
spdk_nvme_ctrlr_free_io_qpair(qpair);
rte_free(req);
return -1;
}
@ -384,6 +396,7 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
}
fprintf(stdout, "%s %s Test Passed\n", dev->name, __func__);
spdk_nvme_ctrlr_free_io_qpair(qpair);
rte_free(req);
return rc;
}
@ -468,12 +481,7 @@ int main(int argc, char **argv)
exit(1);
}
if (num_devs) {
rc = spdk_nvme_register_io_thread();
if (rc != 0)
return rc;
}
rc = 0;
foreach_dev(iter) {
if (writev_readv_tests(iter, build_io_request_0)
|| writev_readv_tests(iter, build_io_request_1)
@ -482,6 +490,7 @@ int main(int argc, char **argv)
|| writev_readv_tests(iter, build_io_request_4)
|| writev_readv_tests(iter, build_io_request_5)
|| writev_readv_tests(iter, build_io_request_6)) {
rc = 1;
printf("%s: failed sgl tests\n", iter->name);
}
}
@ -494,8 +503,5 @@ int main(int argc, char **argv)
spdk_nvme_detach(dev->ctrlr);
}
if (num_devs)
spdk_nvme_unregister_io_thread();
return rc;
}

View File

@ -37,10 +37,6 @@
char outbuf[OUTBUF_SIZE];
volatile int sync_start = 0;
volatile int threads_pass = 0;
volatile int threads_fail = 0;
uint64_t nvme_vtophys(void *buf)
{
return (uintptr_t)buf;
@ -69,101 +65,6 @@ nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr)
return 0;
}
static void prepare_for_test(uint32_t max_io_queues)
{
struct nvme_driver *driver = &g_nvme_driver;
driver->max_io_queues = max_io_queues;
if (driver->ioq_index_pool != NULL) {
free(driver->ioq_index_pool);
driver->ioq_index_pool = NULL;
}
driver->ioq_index_pool_next = 0;
nvme_thread_ioq_index = -1;
sync_start = 0;
threads_pass = 0;
threads_fail = 0;
}
static void *
nvme_thread(void *arg)
{
int rc;
/* Try to synchronize the nvme_register_io_thread() calls
* as much as possible to ensure the mutex locking is tested
* correctly.
*/
while (sync_start == 0)
;
rc = spdk_nvme_register_io_thread();
if (rc == 0) {
__sync_fetch_and_add(&threads_pass, 1);
} else {
__sync_fetch_and_add(&threads_fail, 1);
}
pthread_exit(NULL);
}
static void
test1(void)
{
struct nvme_driver *driver = &g_nvme_driver;
int rc;
int last_index;
prepare_for_test(1);
CU_ASSERT(nvme_thread_ioq_index == -1);
rc = spdk_nvme_register_io_thread();
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_thread_ioq_index >= 0);
CU_ASSERT(driver->ioq_index_pool_next == 1);
/* try to register thread again - this should fail */
last_index = nvme_thread_ioq_index;
rc = spdk_nvme_register_io_thread();
CU_ASSERT(rc != 0);
/* assert that the ioq_index was unchanged */
CU_ASSERT(nvme_thread_ioq_index == last_index);
spdk_nvme_unregister_io_thread();
CU_ASSERT(nvme_thread_ioq_index == -1);
CU_ASSERT(driver->ioq_index_pool_next == 0);
}
static void
test2(void)
{
int num_threads = 16;
int i;
pthread_t td;
/*
* Start 16 threads, but only simulate a maximum of 12 I/O
* queues. 12 threads should be able to successfully
* register, while the other 4 should fail.
*/
prepare_for_test(12);
for (i = 0; i < num_threads; i++) {
pthread_create(&td, NULL, nvme_thread, NULL);
}
sync_start = 1;
while ((threads_pass + threads_fail) < num_threads)
;
CU_ASSERT(threads_pass == 12);
CU_ASSERT(threads_fail == 4);
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
@ -179,6 +80,7 @@ int main(int argc, char **argv)
return CU_get_error();
}
#if 0
if (
CU_add_test(suite, "test1", test1) == NULL
|| CU_add_test(suite, "test2", test2) == NULL
@ -186,6 +88,7 @@ int main(int argc, char **argv)
CU_cleanup_registry();
return CU_get_error();
}
#endif
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();

View File

@ -37,7 +37,6 @@
struct nvme_driver g_nvme_driver = {
.lock = NVME_MUTEX_INITIALIZER,
.max_io_queues = DEFAULT_MAX_IO_QUEUES
};
static uint16_t g_pci_vendor_id;
@ -80,6 +79,11 @@ int nvme_qpair_construct(struct spdk_nvme_qpair *qpair, uint16_t id,
uint16_t num_entries, uint16_t num_trackers,
struct spdk_nvme_ctrlr *ctrlr)
{
qpair->id = id;
qpair->num_entries = num_entries;
qpair->qprio = 0;
qpair->ctrlr = ctrlr;
return 0;
}
@ -196,6 +200,22 @@ nvme_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
return 0;
}
int
nvme_ctrlr_cmd_delete_io_cq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
fake_cpl_success(cb_fn, cb_arg);
return 0;
}
int
nvme_ctrlr_cmd_delete_io_sq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
fake_cpl_success(cb_fn, cb_arg);
return 0;
}
int
nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
@ -396,6 +416,78 @@ test_nvme_ctrlr_init_en_0_rdy_0(void)
nvme_ctrlr_destruct(&ctrlr);
}
static void
setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
{
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr, NULL) == 0);
/* Fake out the parts of ctrlr needed for I/O qpair allocation */
ctrlr->num_io_queues = num_io_queues;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct_io_qpairs(ctrlr) == 0);
}
static void
cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
{
nvme_ctrlr_destruct(ctrlr);
}
static void
test_alloc_io_qpair_1(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_qpair *q0;
setup_qpairs(&ctrlr, 1);
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
SPDK_CU_ASSERT_FATAL(q0 != NULL);
/* Only 1 I/O qpair was allocated, so this should fail */
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0) == NULL);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
/*
* Now that the qpair has been returned to the free list,
* we should be able to allocate it again
*/
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
SPDK_CU_ASSERT_FATAL(q0 != NULL);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
cleanup_qpairs(&ctrlr);
}
static void
test_alloc_io_qpair_2(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_qpair *q0, *q1;
setup_qpairs(&ctrlr, 2);
/*
* Allocate 2 qpairs and free them
*/
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
SPDK_CU_ASSERT_FATAL(q0 != NULL);
q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
SPDK_CU_ASSERT_FATAL(q1 != NULL);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
/*
* Allocate 2 qpairs and free them in the reverse order
*/
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
SPDK_CU_ASSERT_FATAL(q0 != NULL);
q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
SPDK_CU_ASSERT_FATAL(q1 != NULL);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
cleanup_qpairs(&ctrlr);
}
static void
test_nvme_ctrlr_fail(void)
{
@ -500,6 +592,8 @@ int main(int argc, char **argv)
test_nvme_ctrlr_init_en_1_rdy_1) == NULL
|| CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0",
test_nvme_ctrlr_init_en_0_rdy_0) == NULL
|| CU_add_test(suite, "alloc_io_qpair 1", test_alloc_io_qpair_1) == NULL
|| CU_add_test(suite, "alloc_io_qpair 2", test_alloc_io_qpair_2) == NULL
|| CU_add_test(suite, "test nvme_ctrlr function nvme_ctrlr_fail", test_nvme_ctrlr_fail) == NULL
|| CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_construct_intel_support_log_page_list",
test_nvme_ctrlr_construct_intel_support_log_page_list) == NULL

View File

@ -242,8 +242,7 @@ nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
}
void
nvme_ctrlr_submit_io_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req)
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
verify_fn(req);
/* stop analyzer from thinking stack variable addresses are stored in a global */
@ -409,11 +408,12 @@ static void
test_io_raw_cmd(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_qpair qpair = {};
struct spdk_nvme_cmd cmd = {};
verify_fn = verify_io_raw_cmd;
spdk_nvme_ctrlr_cmd_io_raw(&ctrlr, &cmd, NULL, 1, NULL, NULL);
spdk_nvme_ctrlr_cmd_io_raw(&ctrlr, &qpair, &cmd, NULL, 1, NULL, NULL);
}
static void

View File

@ -90,14 +90,14 @@ spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
}
void
nvme_ctrlr_submit_io_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req)
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
g_request = req;
}
static void
prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_qpair *qpair,
uint32_t sector_size, uint32_t max_xfer_size,
uint32_t stripe_size)
{
@ -109,6 +109,8 @@ prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->sector_size;
ns->sectors_per_stripe = ns->stripe_size / ns->sector_size;
memset(qpair, 0, sizeof(*qpair));
g_request = NULL;
}
@ -124,18 +126,19 @@ static void
split_test(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_qpair qpair;
struct spdk_nvme_ctrlr ctrlr;
void *payload;
uint64_t lba, cmd_lba;
uint32_t lba_count, cmd_lba_count;
int rc;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 0);
payload = malloc(512);
lba = 0;
lba_count = 1;
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, 0);
rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
@ -154,6 +157,7 @@ split_test2(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
struct nvme_request *child;
void *payload;
uint64_t lba, cmd_lba;
@ -166,12 +170,12 @@ split_test2(void)
* on the max I/O boundary into two I/Os of 128 KB.
*/
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 0);
payload = malloc(256 * 1024);
lba = 0;
lba_count = (256 * 1024) / 512;
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, 0);
rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
@ -207,6 +211,7 @@ split_test3(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
struct nvme_request *child;
void *payload;
uint64_t lba, cmd_lba;
@ -221,12 +226,12 @@ split_test3(void)
* 2) LBA = 266, count = 256 blocks
*/
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 0);
payload = malloc(256 * 1024);
lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
lba_count = (256 * 1024) / 512;
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, 0);
rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
@ -262,6 +267,7 @@ split_test4(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
struct nvme_request *child;
void *payload;
uint64_t lba, cmd_lba;
@ -278,12 +284,12 @@ split_test4(void)
* 3) LBA = 512, count = 10 blocks (finish off the remaining I/O size)
*/
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 128 * 1024);
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 128 * 1024);
payload = malloc(256 * 1024);
lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
lba_count = (256 * 1024) / 512;
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL,
rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
CU_ASSERT(rc == 0);
@ -339,6 +345,7 @@ test_cmd_child_request(void)
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
struct nvme_request *child;
void *payload;
@ -349,22 +356,22 @@ test_cmd_child_request(void)
uint32_t max_io_size = 128 * 1024;
uint32_t sectors_per_max_io = max_io_size / sector_size;
prepare_for_test(&ns, &ctrlr, sector_size, max_io_size, 0);
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, max_io_size, 0);
payload = malloc(128 * 1024);
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, sectors_per_max_io, NULL, NULL, 0);
rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io, NULL, NULL, 0);
CU_ASSERT(rc == 0);
CU_ASSERT(g_request->payload_offset == 0);
CU_ASSERT(g_request->num_children == 0);
nvme_free_request(g_request);
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, sectors_per_max_io - 1, NULL, NULL, 0);
rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io - 1, NULL, NULL, 0);
CU_ASSERT(rc == 0);
CU_ASSERT(g_request->payload_offset == 0);
CU_ASSERT(g_request->num_children == 0);
nvme_free_request(g_request);
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, sectors_per_max_io * 4, NULL, NULL, 0);
rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io * 4, NULL, NULL, 0);
CU_ASSERT(rc == 0);
CU_ASSERT(g_request->num_children == 4);
@ -389,12 +396,13 @@ test_nvme_ns_cmd_flush(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
spdk_nvme_cmd_cb cb_fn = NULL;
void *cb_arg = NULL;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 0);
spdk_nvme_ns_cmd_flush(&ns, cb_fn, cb_arg);
spdk_nvme_ns_cmd_flush(&ns, &qpair, cb_fn, cb_arg);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_FLUSH);
CU_ASSERT(g_request->cmd.nsid == ns.id);
@ -406,14 +414,15 @@ test_nvme_ns_cmd_write_zeroes(void)
{
struct spdk_nvme_ns ns = { 0 };
struct spdk_nvme_ctrlr ctrlr = { 0 };
struct spdk_nvme_qpair qpair;
spdk_nvme_cmd_cb cb_fn = NULL;
void *cb_arg = NULL;
uint64_t cmd_lba;
uint32_t cmd_lba_count;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 0);
spdk_nvme_ns_cmd_write_zeroes(&ns, 0, 2, cb_fn, cb_arg, 0);
spdk_nvme_ns_cmd_write_zeroes(&ns, &qpair, 0, 2, cb_fn, cb_arg, 0);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_ZEROES);
CU_ASSERT(g_request->cmd.nsid == ns.id);
nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
@ -428,16 +437,17 @@ test_nvme_ns_cmd_deallocate(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
spdk_nvme_cmd_cb cb_fn = NULL;
void *cb_arg = NULL;
uint16_t num_ranges = 1;
void *payload = NULL;
int rc = 0;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 0);
payload = malloc(num_ranges * sizeof(struct spdk_nvme_dsm_range));
spdk_nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg);
spdk_nvme_ns_cmd_deallocate(&ns, &qpair, payload, num_ranges, cb_fn, cb_arg);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw10 == num_ranges - 1u);
@ -447,7 +457,7 @@ test_nvme_ns_cmd_deallocate(void)
num_ranges = 256;
payload = malloc(num_ranges * sizeof(struct spdk_nvme_dsm_range));
spdk_nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg);
spdk_nvme_ns_cmd_deallocate(&ns, &qpair, payload, num_ranges, cb_fn, cb_arg);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw10 == num_ranges - 1u);
@ -457,7 +467,7 @@ test_nvme_ns_cmd_deallocate(void)
payload = NULL;
num_ranges = 0;
rc = spdk_nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg);
rc = spdk_nvme_ns_cmd_deallocate(&ns, &qpair, payload, num_ranges, cb_fn, cb_arg);
CU_ASSERT(rc != 0);
}
@ -466,12 +476,13 @@ test_nvme_ns_cmd_readv(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
void *cb_arg;
cb_arg = malloc(512);
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
rc = spdk_nvme_ns_cmd_readv(&ns, 0x1000, 256, NULL, cb_arg, 0, nvme_request_reset_sgl,
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 0);
rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0, nvme_request_reset_sgl,
nvme_request_next_sge);
CU_ASSERT(rc == 0);
@ -482,7 +493,7 @@ test_nvme_ns_cmd_readv(void)
CU_ASSERT(g_request->payload.u.sgl.cb_arg == cb_arg);
CU_ASSERT(g_request->cmd.nsid == ns.id);
rc = spdk_nvme_ns_cmd_readv(&ns, 0x1000, 256, NULL, cb_arg, 0, nvme_request_reset_sgl,
rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0, nvme_request_reset_sgl,
NULL);
CU_ASSERT(rc != 0);
@ -495,12 +506,13 @@ test_nvme_ns_cmd_writev(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
void *cb_arg;
cb_arg = malloc(512);
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
rc = spdk_nvme_ns_cmd_writev(&ns, 0x1000, 256, NULL, cb_arg, 0,
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 0);
rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
nvme_request_reset_sgl,
nvme_request_next_sge);
@ -512,7 +524,7 @@ test_nvme_ns_cmd_writev(void)
CU_ASSERT(g_request->payload.u.sgl.cb_arg == cb_arg);
CU_ASSERT(g_request->cmd.nsid == ns.id);
rc = spdk_nvme_ns_cmd_writev(&ns, 0x1000, 256, NULL, cb_arg, 0,
rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
NULL, nvme_request_next_sge);
CU_ASSERT(rc != 0);
@ -525,17 +537,18 @@ test_io_flags(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
void *payload;
uint64_t lba;
uint32_t lba_count;
int rc;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 128 * 1024);
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 128 * 1024);
payload = malloc(256 * 1024);
lba = 0;
lba_count = (4 * 1024) / 512;
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL,
rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
CU_ASSERT(rc == 0);
CU_ASSERT_FATAL(g_request != NULL);
@ -543,7 +556,7 @@ test_io_flags(void)
CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
nvme_free_request(g_request);
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL,
rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
SPDK_NVME_IO_FLAGS_LIMITED_RETRY);
CU_ASSERT(rc == 0);
CU_ASSERT_FATAL(g_request != NULL);
@ -560,6 +573,7 @@ test_nvme_ns_cmd_reservation_register(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
struct spdk_nvme_reservation_register_data *payload;
bool ignore_key = 1;
spdk_nvme_cmd_cb cb_fn = NULL;
@ -567,10 +581,10 @@ test_nvme_ns_cmd_reservation_register(void)
int rc = 0;
uint32_t tmp_cdw10;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 0);
payload = malloc(sizeof(struct spdk_nvme_reservation_register_data));
rc = spdk_nvme_ns_cmd_reservation_register(&ns, payload, ignore_key,
rc = spdk_nvme_ns_cmd_reservation_register(&ns, &qpair, payload, ignore_key,
SPDK_NVME_RESERVE_REGISTER_KEY,
SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
cb_fn, cb_arg);
@ -594,6 +608,7 @@ test_nvme_ns_cmd_reservation_release(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
struct spdk_nvme_reservation_key_data *payload;
bool ignore_key = 1;
spdk_nvme_cmd_cb cb_fn = NULL;
@ -601,10 +616,10 @@ test_nvme_ns_cmd_reservation_release(void)
int rc = 0;
uint32_t tmp_cdw10;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 0);
payload = malloc(sizeof(struct spdk_nvme_reservation_key_data));
rc = spdk_nvme_ns_cmd_reservation_release(&ns, payload, ignore_key,
rc = spdk_nvme_ns_cmd_reservation_release(&ns, &qpair, payload, ignore_key,
SPDK_NVME_RESERVE_RELEASE,
SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
cb_fn, cb_arg);
@ -628,6 +643,7 @@ test_nvme_ns_cmd_reservation_acquire(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
struct spdk_nvme_reservation_acquire_data *payload;
bool ignore_key = 1;
spdk_nvme_cmd_cb cb_fn = NULL;
@ -635,10 +651,10 @@ test_nvme_ns_cmd_reservation_acquire(void)
int rc = 0;
uint32_t tmp_cdw10;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 0);
payload = malloc(sizeof(struct spdk_nvme_reservation_acquire_data));
rc = spdk_nvme_ns_cmd_reservation_acquire(&ns, payload, ignore_key,
rc = spdk_nvme_ns_cmd_reservation_acquire(&ns, &qpair, payload, ignore_key,
SPDK_NVME_RESERVE_ACQUIRE,
SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
cb_fn, cb_arg);
@ -662,15 +678,16 @@ test_nvme_ns_cmd_reservation_report(void)
{
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
struct spdk_nvme_reservation_status_data *payload;
spdk_nvme_cmd_cb cb_fn = NULL;
void *cb_arg = NULL;
int rc = 0;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
prepare_for_test(&ns, &ctrlr, &qpair, 512, 128 * 1024, 0);
payload = malloc(sizeof(struct spdk_nvme_reservation_status_data));
rc = spdk_nvme_ns_cmd_reservation_report(&ns, payload, 0x1000,
rc = spdk_nvme_ns_cmd_reservation_report(&ns, &qpair, payload, 0x1000,
cb_fn, cb_arg);
CU_ASSERT(rc == 0);

View File

@ -40,7 +40,6 @@
struct nvme_driver g_nvme_driver = {
.lock = NVME_MUTEX_INITIALIZER,
.max_io_queues = DEFAULT_MAX_IO_QUEUES,
};
int32_t spdk_nvme_retry_count = 1;
@ -226,6 +225,8 @@ prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
{
memset(ctrlr, 0, sizeof(*ctrlr));
ctrlr->regs = regs;
TAILQ_INIT(&ctrlr->free_io_qpairs);
TAILQ_INIT(&ctrlr->active_io_qpairs);
nvme_qpair_construct(qpair, 1, 128, 32, ctrlr);
CU_ASSERT(qpair->sq_tail == 0);
@ -566,6 +567,8 @@ static void test_nvme_qpair_destroy(void)
memset(&ctrlr, 0, sizeof(ctrlr));
ctrlr.regs = &regs;
TAILQ_INIT(&ctrlr.free_io_qpairs);
TAILQ_INIT(&ctrlr.active_io_qpairs);
nvme_qpair_construct(&qpair, 1, 128, 32, &ctrlr);
nvme_qpair_destroy(&qpair);