2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2022-11-01 20:26:26 +00:00
|
|
|
* Copyright (C) 2016 Intel Corporation.
|
2016-06-07 21:32:27 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*/
|
|
|
|
|
2017-05-02 18:18:25 +00:00
|
|
|
#include "spdk/stdinc.h"
|
2016-06-07 21:32:27 +00:00
|
|
|
|
|
|
|
#include "spdk/nvme.h"
|
2019-06-11 11:55:22 +00:00
|
|
|
#include "spdk/vmd.h"
|
2020-10-13 15:35:19 +00:00
|
|
|
#include "spdk/nvme_zns.h"
|
2016-08-10 17:41:12 +00:00
|
|
|
#include "spdk/env.h"
|
2021-12-06 06:00:59 +00:00
|
|
|
#include "spdk/string.h"
|
|
|
|
#include "spdk/log.h"
|
2016-06-07 21:32:27 +00:00
|
|
|
|
|
|
|
struct ctrlr_entry {
|
2020-09-27 02:51:01 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
|
|
|
TAILQ_ENTRY(ctrlr_entry) link;
|
|
|
|
char name[1024];
|
2016-06-07 21:32:27 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ns_entry {
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
|
|
|
struct spdk_nvme_ns *ns;
|
2020-09-27 02:51:01 +00:00
|
|
|
TAILQ_ENTRY(ns_entry) link;
|
2016-06-07 21:32:27 +00:00
|
|
|
struct spdk_nvme_qpair *qpair;
|
|
|
|
};
|
|
|
|
|
2020-09-27 02:51:01 +00:00
|
|
|
static TAILQ_HEAD(, ctrlr_entry) g_controllers = TAILQ_HEAD_INITIALIZER(g_controllers);
|
|
|
|
static TAILQ_HEAD(, ns_entry) g_namespaces = TAILQ_HEAD_INITIALIZER(g_namespaces);
|
2021-12-06 06:00:59 +00:00
|
|
|
static struct spdk_nvme_transport_id g_trid = {};
|
2016-06-07 21:32:27 +00:00
|
|
|
|
2019-06-11 11:55:22 +00:00
|
|
|
static bool g_vmd = false;
|
|
|
|
|
2016-06-07 21:32:27 +00:00
|
|
|
static void
|
|
|
|
register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
|
|
|
|
{
|
|
|
|
struct ns_entry *entry;
|
|
|
|
|
|
|
|
if (!spdk_nvme_ns_is_active(ns)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
entry = malloc(sizeof(struct ns_entry));
|
|
|
|
if (entry == NULL) {
|
|
|
|
perror("ns_entry malloc");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
entry->ctrlr = ctrlr;
|
|
|
|
entry->ns = ns;
|
2020-09-27 02:51:01 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_namespaces, entry, link);
|
2016-06-07 21:32:27 +00:00
|
|
|
|
|
|
|
printf(" Namespace ID: %d size: %juGB\n", spdk_nvme_ns_get_id(ns),
|
|
|
|
spdk_nvme_ns_get_size(ns) / 1000000000);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct hello_world_sequence {
|
|
|
|
struct ns_entry *ns_entry;
|
|
|
|
char *buf;
|
2017-04-24 17:47:25 +00:00
|
|
|
unsigned using_cmb_io;
|
2016-06-07 21:32:27 +00:00
|
|
|
int is_completed;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
read_complete(void *arg, const struct spdk_nvme_cpl *completion)
|
|
|
|
{
|
|
|
|
struct hello_world_sequence *sequence = arg;
|
|
|
|
|
2019-06-05 20:56:42 +00:00
|
|
|
/* Assume the I/O was successful */
|
|
|
|
sequence->is_completed = 1;
|
|
|
|
/* See if an error occurred. If so, display information
|
|
|
|
* about it, and set completion value so that I/O
|
|
|
|
* caller is aware that an error occurred.
|
|
|
|
*/
|
|
|
|
if (spdk_nvme_cpl_is_error(completion)) {
|
|
|
|
spdk_nvme_qpair_print_completion(sequence->ns_entry->qpair, (struct spdk_nvme_cpl *)completion);
|
|
|
|
fprintf(stderr, "I/O error status: %s\n", spdk_nvme_cpl_get_status_string(&completion->status));
|
|
|
|
fprintf(stderr, "Read I/O failed, aborting run\n");
|
|
|
|
sequence->is_completed = 2;
|
2020-10-22 09:22:30 +00:00
|
|
|
exit(1);
|
2019-06-05 20:56:42 +00:00
|
|
|
}
|
|
|
|
|
2016-06-07 21:32:27 +00:00
|
|
|
/*
|
|
|
|
* The read I/O has completed. Print the contents of the
|
|
|
|
* buffer, free the buffer, then mark the sequence as
|
|
|
|
* completed. This will trigger the hello_world() function
|
|
|
|
* to exit its polling loop.
|
|
|
|
*/
|
|
|
|
printf("%s", sequence->buf);
|
2018-07-18 00:08:04 +00:00
|
|
|
spdk_free(sequence->buf);
|
2016-06-07 21:32:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
write_complete(void *arg, const struct spdk_nvme_cpl *completion)
|
|
|
|
{
|
|
|
|
struct hello_world_sequence *sequence = arg;
|
|
|
|
struct ns_entry *ns_entry = sequence->ns_entry;
|
|
|
|
int rc;
|
|
|
|
|
2019-06-05 20:56:42 +00:00
|
|
|
/* See if an error occurred. If so, display information
|
|
|
|
* about it, and set completion value so that I/O
|
|
|
|
* caller is aware that an error occurred.
|
|
|
|
*/
|
|
|
|
if (spdk_nvme_cpl_is_error(completion)) {
|
|
|
|
spdk_nvme_qpair_print_completion(sequence->ns_entry->qpair, (struct spdk_nvme_cpl *)completion);
|
|
|
|
fprintf(stderr, "I/O error status: %s\n", spdk_nvme_cpl_get_status_string(&completion->status));
|
|
|
|
fprintf(stderr, "Write I/O failed, aborting run\n");
|
|
|
|
sequence->is_completed = 2;
|
|
|
|
exit(1);
|
|
|
|
}
|
2016-06-07 21:32:27 +00:00
|
|
|
/*
|
|
|
|
* The write I/O has completed. Free the buffer associated with
|
|
|
|
* the write I/O and allocate a new zeroed buffer for reading
|
|
|
|
* the data back from the NVMe namespace.
|
|
|
|
*/
|
2017-04-24 17:47:25 +00:00
|
|
|
if (sequence->using_cmb_io) {
|
2020-02-10 21:13:53 +00:00
|
|
|
spdk_nvme_ctrlr_unmap_cmb(ns_entry->ctrlr);
|
2017-04-24 17:47:25 +00:00
|
|
|
} else {
|
2018-07-18 00:08:04 +00:00
|
|
|
spdk_free(sequence->buf);
|
2017-04-24 17:47:25 +00:00
|
|
|
}
|
2018-07-18 00:08:04 +00:00
|
|
|
sequence->buf = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
|
2016-06-07 21:32:27 +00:00
|
|
|
|
|
|
|
rc = spdk_nvme_ns_cmd_read(ns_entry->ns, ns_entry->qpair, sequence->buf,
|
|
|
|
0, /* LBA start */
|
|
|
|
1, /* number of LBAs */
|
|
|
|
read_complete, (void *)sequence, 0);
|
|
|
|
if (rc != 0) {
|
|
|
|
fprintf(stderr, "starting read I/O failed\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-13 15:35:19 +00:00
|
|
|
static void
|
|
|
|
reset_zone_complete(void *arg, const struct spdk_nvme_cpl *completion)
|
|
|
|
{
|
|
|
|
struct hello_world_sequence *sequence = arg;
|
|
|
|
|
|
|
|
/* Assume the I/O was successful */
|
|
|
|
sequence->is_completed = 1;
|
|
|
|
/* See if an error occurred. If so, display information
|
|
|
|
* about it, and set completion value so that I/O
|
|
|
|
* caller is aware that an error occurred.
|
|
|
|
*/
|
|
|
|
if (spdk_nvme_cpl_is_error(completion)) {
|
|
|
|
spdk_nvme_qpair_print_completion(sequence->ns_entry->qpair, (struct spdk_nvme_cpl *)completion);
|
|
|
|
fprintf(stderr, "I/O error status: %s\n", spdk_nvme_cpl_get_status_string(&completion->status));
|
|
|
|
fprintf(stderr, "Reset zone I/O failed, aborting run\n");
|
|
|
|
sequence->is_completed = 2;
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
reset_zone_and_wait_for_completion(struct hello_world_sequence *sequence)
|
|
|
|
{
|
|
|
|
if (spdk_nvme_zns_reset_zone(sequence->ns_entry->ns, sequence->ns_entry->qpair,
|
|
|
|
0, /* starting LBA of the zone to reset */
|
|
|
|
false, /* don't reset all zones */
|
|
|
|
reset_zone_complete,
|
|
|
|
sequence)) {
|
|
|
|
fprintf(stderr, "starting reset zone I/O failed\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
while (!sequence->is_completed) {
|
|
|
|
spdk_nvme_qpair_process_completions(sequence->ns_entry->qpair, 0);
|
|
|
|
}
|
|
|
|
sequence->is_completed = 0;
|
|
|
|
}
|
|
|
|
|
2016-06-07 21:32:27 +00:00
|
|
|
static void
|
|
|
|
hello_world(void)
|
|
|
|
{
|
|
|
|
struct ns_entry *ns_entry;
|
|
|
|
struct hello_world_sequence sequence;
|
|
|
|
int rc;
|
2020-02-10 21:13:53 +00:00
|
|
|
size_t sz;
|
2016-06-07 21:32:27 +00:00
|
|
|
|
2020-09-27 02:51:01 +00:00
|
|
|
TAILQ_FOREACH(ns_entry, &g_namespaces, link) {
|
2016-06-07 21:32:27 +00:00
|
|
|
/*
|
|
|
|
* Allocate an I/O qpair that we can use to submit read/write requests
|
|
|
|
* to namespaces on the controller. NVMe controllers typically support
|
|
|
|
* many qpairs per controller. Any I/O qpair allocated for a controller
|
|
|
|
* can submit I/O to any namespace on that controller.
|
|
|
|
*
|
|
|
|
* The SPDK NVMe driver provides no synchronization for qpair accesses -
|
|
|
|
* the application must ensure only a single thread submits I/O to a
|
|
|
|
* qpair, and that same thread must also check for completions on that
|
|
|
|
* qpair. This enables extremely efficient I/O processing by making all
|
|
|
|
* I/O operations completely lockless.
|
|
|
|
*/
|
2017-07-14 23:08:05 +00:00
|
|
|
ns_entry->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ns_entry->ctrlr, NULL, 0);
|
2016-06-07 21:32:27 +00:00
|
|
|
if (ns_entry->qpair == NULL) {
|
2016-08-08 18:29:52 +00:00
|
|
|
printf("ERROR: spdk_nvme_ctrlr_alloc_io_qpair() failed\n");
|
2016-06-07 21:32:27 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-05-25 18:21:30 +00:00
|
|
|
* Use spdk_dma_zmalloc to allocate a 4KB zeroed buffer. This memory
|
2016-08-17 20:35:18 +00:00
|
|
|
* will be pinned, which is required for data buffers used for SPDK NVMe
|
|
|
|
* I/O operations.
|
2016-06-07 21:32:27 +00:00
|
|
|
*/
|
2017-04-24 17:47:25 +00:00
|
|
|
sequence.using_cmb_io = 1;
|
2020-02-10 21:13:53 +00:00
|
|
|
sequence.buf = spdk_nvme_ctrlr_map_cmb(ns_entry->ctrlr, &sz);
|
|
|
|
if (sequence.buf == NULL || sz < 0x1000) {
|
2017-04-24 17:47:25 +00:00
|
|
|
sequence.using_cmb_io = 0;
|
2018-07-18 00:08:04 +00:00
|
|
|
sequence.buf = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
|
2017-04-24 17:47:25 +00:00
|
|
|
}
|
|
|
|
if (sequence.buf == NULL) {
|
|
|
|
printf("ERROR: write buffer allocation failed\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (sequence.using_cmb_io) {
|
|
|
|
printf("INFO: using controller memory buffer for IO\n");
|
|
|
|
} else {
|
|
|
|
printf("INFO: using host memory buffer for IO\n");
|
|
|
|
}
|
2016-06-07 21:32:27 +00:00
|
|
|
sequence.is_completed = 0;
|
|
|
|
sequence.ns_entry = ns_entry;
|
|
|
|
|
2020-10-13 15:35:19 +00:00
|
|
|
/*
|
|
|
|
* If the namespace is a Zoned Namespace, rather than a regular
|
|
|
|
* NVM namespace, we need to reset the first zone, before we
|
|
|
|
* write to it. This not needed for regular NVM namespaces.
|
|
|
|
*/
|
|
|
|
if (spdk_nvme_ns_get_csi(ns_entry->ns) == SPDK_NVME_CSI_ZNS) {
|
|
|
|
reset_zone_and_wait_for_completion(&sequence);
|
|
|
|
}
|
|
|
|
|
2016-06-07 21:32:27 +00:00
|
|
|
/*
|
|
|
|
* Print "Hello world!" to sequence.buf. We will write this data to LBA
|
|
|
|
* 0 on the namespace, and then later read it back into a separate buffer
|
|
|
|
* to demonstrate the full I/O path.
|
|
|
|
*/
|
2017-03-24 02:59:48 +00:00
|
|
|
snprintf(sequence.buf, 0x1000, "%s", "Hello world!\n");
|
2016-06-07 21:32:27 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Write the data buffer to LBA 0 of this namespace. "write_complete" and
|
|
|
|
* "&sequence" are specified as the completion callback function and
|
|
|
|
* argument respectively. write_complete() will be called with the
|
|
|
|
* value of &sequence as a parameter when the write I/O is completed.
|
|
|
|
* This allows users to potentially specify different completion
|
|
|
|
* callback routines for each I/O, as well as pass a unique handle
|
|
|
|
* as an argument so the application knows which I/O has completed.
|
|
|
|
*
|
|
|
|
* Note that the SPDK NVMe driver will only check for completions
|
|
|
|
* when the application calls spdk_nvme_qpair_process_completions().
|
|
|
|
* It is the responsibility of the application to trigger the polling
|
|
|
|
* process.
|
|
|
|
*/
|
|
|
|
rc = spdk_nvme_ns_cmd_write(ns_entry->ns, ns_entry->qpair, sequence.buf,
|
|
|
|
0, /* LBA start */
|
|
|
|
1, /* number of LBAs */
|
|
|
|
write_complete, &sequence, 0);
|
|
|
|
if (rc != 0) {
|
|
|
|
fprintf(stderr, "starting write I/O failed\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Poll for completions. 0 here means process all available completions.
|
|
|
|
* In certain usage models, the caller may specify a positive integer
|
|
|
|
* instead of 0 to signify the maximum number of completions it should
|
|
|
|
* process. This function will never block - if there are no
|
|
|
|
* completions pending on the specified qpair, it will return immediately.
|
|
|
|
*
|
|
|
|
* When the write I/O completes, write_complete() will submit a new I/O
|
|
|
|
* to read LBA 0 into a separate buffer, specifying read_complete() as its
|
|
|
|
* completion routine. When the read I/O completes, read_complete() will
|
|
|
|
* print the buffer contents and set sequence.is_completed = 1. That will
|
|
|
|
* break this loop and then exit the program.
|
|
|
|
*/
|
|
|
|
while (!sequence.is_completed) {
|
|
|
|
spdk_nvme_qpair_process_completions(ns_entry->qpair, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the I/O qpair. This typically is done when an application exits.
|
|
|
|
* But SPDK does support freeing and then reallocating qpairs during
|
|
|
|
* operation. It is the responsibility of the caller to ensure all
|
|
|
|
* pending I/O are completed before trying to free the qpair.
|
|
|
|
*/
|
|
|
|
spdk_nvme_ctrlr_free_io_qpair(ns_entry->qpair);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2016-12-09 22:09:28 +00:00
|
|
|
probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
2016-10-31 23:55:14 +00:00
|
|
|
struct spdk_nvme_ctrlr_opts *opts)
|
2016-06-07 21:32:27 +00:00
|
|
|
{
|
2016-12-09 22:09:28 +00:00
|
|
|
printf("Attaching to %s\n", trid->traddr);
|
2016-06-07 21:32:27 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-12-09 22:09:28 +00:00
|
|
|
attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
|
2016-10-31 23:55:14 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
|
2016-06-07 21:32:27 +00:00
|
|
|
{
|
2021-11-03 19:07:12 +00:00
|
|
|
int nsid;
|
2016-06-07 21:32:27 +00:00
|
|
|
struct ctrlr_entry *entry;
|
2017-03-24 05:37:22 +00:00
|
|
|
struct spdk_nvme_ns *ns;
|
2019-08-29 15:21:13 +00:00
|
|
|
const struct spdk_nvme_ctrlr_data *cdata;
|
2016-06-07 21:32:27 +00:00
|
|
|
|
|
|
|
entry = malloc(sizeof(struct ctrlr_entry));
|
|
|
|
if (entry == NULL) {
|
|
|
|
perror("ctrlr_entry malloc");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2016-12-09 22:09:28 +00:00
|
|
|
printf("Attached to %s\n", trid->traddr);
|
2016-06-07 21:32:27 +00:00
|
|
|
|
2019-08-29 15:21:13 +00:00
|
|
|
/*
|
|
|
|
* spdk_nvme_ctrlr is the logical abstraction in SPDK for an NVMe
|
|
|
|
* controller. During initialization, the IDENTIFY data for the
|
|
|
|
* controller is read using an NVMe admin command, and that data
|
|
|
|
* can be retrieved using spdk_nvme_ctrlr_get_data() to get
|
|
|
|
* detailed information on the controller. Refer to the NVMe
|
|
|
|
* specification for more details on IDENTIFY for NVMe controllers.
|
|
|
|
*/
|
|
|
|
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
|
|
|
|
|
2016-06-07 21:32:27 +00:00
|
|
|
snprintf(entry->name, sizeof(entry->name), "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
|
|
|
|
|
|
|
|
entry->ctrlr = ctrlr;
|
2020-09-27 02:51:01 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_controllers, entry, link);
|
2016-06-07 21:32:27 +00:00
|
|
|
|
|
|
|
/*
|
2017-02-15 23:36:22 +00:00
|
|
|
* Each controller has one or more namespaces. An NVMe namespace is basically
|
2016-06-07 21:32:27 +00:00
|
|
|
* equivalent to a SCSI LUN. The controller's IDENTIFY data tells us how
|
|
|
|
* many namespaces exist on the controller. For Intel(R) P3X00 controllers,
|
|
|
|
* it will just be one namespace.
|
|
|
|
*
|
|
|
|
* Note that in NVMe, namespace IDs start at 1, not 0.
|
|
|
|
*/
|
2021-11-03 19:07:12 +00:00
|
|
|
for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr); nsid != 0;
|
|
|
|
nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) {
|
2017-03-24 05:37:22 +00:00
|
|
|
ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
|
|
|
|
if (ns == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
register_ns(ctrlr, ns);
|
2016-06-07 21:32:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
cleanup(void)
|
|
|
|
{
|
2020-09-27 02:51:01 +00:00
|
|
|
struct ns_entry *ns_entry, *tmp_ns_entry;
|
|
|
|
struct ctrlr_entry *ctrlr_entry, *tmp_ctrlr_entry;
|
2020-10-15 23:51:35 +00:00
|
|
|
struct spdk_nvme_detach_ctx *detach_ctx = NULL;
|
2016-06-07 21:32:27 +00:00
|
|
|
|
2020-09-27 02:51:01 +00:00
|
|
|
TAILQ_FOREACH_SAFE(ns_entry, &g_namespaces, link, tmp_ns_entry) {
|
|
|
|
TAILQ_REMOVE(&g_namespaces, ns_entry, link);
|
2016-06-07 21:32:27 +00:00
|
|
|
free(ns_entry);
|
|
|
|
}
|
|
|
|
|
2020-09-27 02:51:01 +00:00
|
|
|
TAILQ_FOREACH_SAFE(ctrlr_entry, &g_controllers, link, tmp_ctrlr_entry) {
|
|
|
|
TAILQ_REMOVE(&g_controllers, ctrlr_entry, link);
|
2020-10-15 23:51:35 +00:00
|
|
|
spdk_nvme_detach_async(ctrlr_entry->ctrlr, &detach_ctx);
|
2016-06-07 21:32:27 +00:00
|
|
|
free(ctrlr_entry);
|
|
|
|
}
|
2020-10-15 23:51:35 +00:00
|
|
|
|
2021-06-24 20:30:55 +00:00
|
|
|
if (detach_ctx) {
|
|
|
|
spdk_nvme_detach_poll(detach_ctx);
|
2020-10-15 23:51:35 +00:00
|
|
|
}
|
2016-06-07 21:32:27 +00:00
|
|
|
}
|
|
|
|
|
2019-06-11 11:55:22 +00:00
|
|
|
static void
|
|
|
|
usage(const char *program_name)
|
|
|
|
{
|
|
|
|
printf("%s [options]", program_name);
|
2021-12-06 06:00:59 +00:00
|
|
|
printf("\t\n");
|
2019-06-11 11:55:22 +00:00
|
|
|
printf("options:\n");
|
2021-12-06 06:00:59 +00:00
|
|
|
printf("\t[-d DPDK huge memory size in MB]\n");
|
|
|
|
printf("\t[-g use single file descriptor for DPDK memory segments]\n");
|
|
|
|
printf("\t[-i shared memory group ID]\n");
|
|
|
|
printf("\t[-r remote NVMe over Fabrics target address]\n");
|
|
|
|
printf("\t[-V enumerate VMD]\n");
|
|
|
|
#ifdef DEBUG
|
|
|
|
printf("\t[-L enable debug logging]\n");
|
|
|
|
#else
|
2022-09-02 07:32:46 +00:00
|
|
|
printf("\t[-L enable debug logging (flag disabled, must reconfigure with --enable-debug)]\n");
|
2021-12-06 06:00:59 +00:00
|
|
|
#endif
|
2019-06-11 11:55:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2021-12-06 06:00:59 +00:00
|
|
|
parse_args(int argc, char **argv, struct spdk_env_opts *env_opts)
|
2019-06-11 11:55:22 +00:00
|
|
|
{
|
2021-12-06 06:00:59 +00:00
|
|
|
int op, rc;
|
2019-06-11 11:55:22 +00:00
|
|
|
|
2021-12-06 06:00:59 +00:00
|
|
|
spdk_nvme_trid_populate_transport(&g_trid, SPDK_NVME_TRANSPORT_PCIE);
|
|
|
|
snprintf(g_trid.subnqn, sizeof(g_trid.subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN);
|
|
|
|
|
|
|
|
while ((op = getopt(argc, argv, "d:gi:r:L:V")) != -1) {
|
2019-06-11 11:55:22 +00:00
|
|
|
switch (op) {
|
|
|
|
case 'V':
|
|
|
|
g_vmd = true;
|
|
|
|
break;
|
2021-12-06 06:00:59 +00:00
|
|
|
case 'i':
|
|
|
|
env_opts->shm_id = spdk_strtol(optarg, 10);
|
|
|
|
if (env_opts->shm_id < 0) {
|
|
|
|
fprintf(stderr, "Invalid shared memory ID\n");
|
|
|
|
return env_opts->shm_id;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'g':
|
|
|
|
env_opts->hugepage_single_segments = true;
|
|
|
|
break;
|
|
|
|
case 'r':
|
|
|
|
if (spdk_nvme_transport_id_parse(&g_trid, optarg) != 0) {
|
|
|
|
fprintf(stderr, "Error parsing transport address\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'd':
|
|
|
|
env_opts->mem_size = spdk_strtol(optarg, 10);
|
|
|
|
if (env_opts->mem_size < 0) {
|
|
|
|
fprintf(stderr, "Invalid DPDK memory size\n");
|
|
|
|
return env_opts->mem_size;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'L':
|
|
|
|
rc = spdk_log_set_flag(optarg);
|
|
|
|
if (rc < 0) {
|
|
|
|
fprintf(stderr, "unknown flag\n");
|
|
|
|
usage(argv[0]);
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
#ifdef DEBUG
|
|
|
|
spdk_log_set_print_level(SPDK_LOG_DEBUG);
|
|
|
|
#endif
|
|
|
|
break;
|
2019-06-11 11:55:22 +00:00
|
|
|
default:
|
|
|
|
usage(argv[0]);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-06-22 21:35:04 +00:00
|
|
|
int
|
|
|
|
main(int argc, char **argv)
|
2016-06-07 21:32:27 +00:00
|
|
|
{
|
|
|
|
int rc;
|
2017-01-12 18:25:17 +00:00
|
|
|
struct spdk_env_opts opts;
|
2016-06-07 21:32:27 +00:00
|
|
|
|
|
|
|
/*
|
2017-01-12 18:25:17 +00:00
|
|
|
* SPDK relies on an abstraction around the local environment
|
|
|
|
* named env that handles memory allocation and PCI device operations.
|
|
|
|
* This library must be initialized first.
|
2016-06-07 21:32:27 +00:00
|
|
|
*
|
|
|
|
*/
|
2017-01-12 18:25:17 +00:00
|
|
|
spdk_env_opts_init(&opts);
|
2021-12-06 06:00:59 +00:00
|
|
|
rc = parse_args(argc, argv, &opts);
|
|
|
|
if (rc != 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-01-12 18:25:17 +00:00
|
|
|
opts.name = "hello_world";
|
2017-12-18 19:57:01 +00:00
|
|
|
if (spdk_env_init(&opts) < 0) {
|
|
|
|
fprintf(stderr, "Unable to initialize SPDK env\n");
|
|
|
|
return 1;
|
|
|
|
}
|
2016-06-07 21:32:27 +00:00
|
|
|
|
|
|
|
printf("Initializing NVMe Controllers\n");
|
|
|
|
|
2019-06-11 11:55:22 +00:00
|
|
|
if (g_vmd && spdk_vmd_init()) {
|
|
|
|
fprintf(stderr, "Failed to initialize VMD."
|
|
|
|
" Some NVMe devices can be unavailable.\n");
|
|
|
|
}
|
|
|
|
|
2016-06-07 21:32:27 +00:00
|
|
|
/*
|
|
|
|
* Start the SPDK NVMe enumeration process. probe_cb will be called
|
|
|
|
* for each NVMe controller found, giving our application a choice on
|
|
|
|
* whether to attach to each controller. attach_cb will then be
|
|
|
|
* called for each controller after the SPDK NVMe driver has completed
|
|
|
|
* initializing the controller we chose to attach.
|
|
|
|
*/
|
2021-12-06 06:00:59 +00:00
|
|
|
rc = spdk_nvme_probe(&g_trid, NULL, probe_cb, attach_cb, NULL);
|
2016-06-07 21:32:27 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
fprintf(stderr, "spdk_nvme_probe() failed\n");
|
2021-12-29 11:30:13 +00:00
|
|
|
rc = 1;
|
|
|
|
goto exit;
|
2016-06-07 21:32:27 +00:00
|
|
|
}
|
|
|
|
|
2020-09-27 02:51:01 +00:00
|
|
|
if (TAILQ_EMPTY(&g_controllers)) {
|
2017-05-23 20:53:45 +00:00
|
|
|
fprintf(stderr, "no NVMe controllers found\n");
|
2021-12-29 11:30:13 +00:00
|
|
|
rc = 1;
|
|
|
|
goto exit;
|
2017-05-23 20:53:45 +00:00
|
|
|
}
|
|
|
|
|
2016-06-07 21:32:27 +00:00
|
|
|
printf("Initialization complete.\n");
|
|
|
|
hello_world();
|
|
|
|
cleanup();
|
2020-01-27 11:09:36 +00:00
|
|
|
if (g_vmd) {
|
|
|
|
spdk_vmd_fini();
|
|
|
|
}
|
|
|
|
|
2021-12-29 11:30:13 +00:00
|
|
|
exit:
|
|
|
|
cleanup();
|
|
|
|
spdk_env_fini();
|
|
|
|
return rc;
|
2016-06-07 21:32:27 +00:00
|
|
|
}
|