nvme: refactor nvme_attach() into nvme_probe()

The new probing API will find all NVMe devices on the system and ask the
caller whether to attach to each one.  The caller will then receive a
callback once each controller has finished initializing and has been
attached to the driver.

This will enable cleanup of the PCI abstraction layer (enabling us to
use DPDK PCI functionality) as well as allowing future work on parallel
NVMe controller startup and PCIe hotplug support.

Change-Id: I3cdde7bfab0bc0bea1993dd549b9b0e8d36db9be
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-01-29 13:15:29 -07:00
parent 20c767e796
commit 8374a727a9
12 changed files with 453 additions and 294 deletions

View File

@ -41,7 +41,7 @@
\section key_functions Key Functions
- nvme_attach() \copybrief nvme_attach()
- nvme_probe() \copybrief nvme_probe()
- nvme_ns_cmd_read() \copybrief nvme_ns_cmd_read()
- nvme_ns_cmd_write() \copybrief nvme_ns_cmd_write()
- nvme_ns_cmd_deallocate() \copybrief nvme_ns_cmd_deallocate()

View File

@ -768,6 +768,32 @@ parse_args(int argc, char **argv)
return 0;
}
static bool
probe_cb(void *cb_ctx, void *pci_dev)
{
struct pci_device *dev = pci_dev;
if (pci_device_has_non_uio_driver(dev)) {
fprintf(stderr, "non-uio kernel driver attached to NVMe\n");
fprintf(stderr, " controller at PCI address %04x:%02x:%02x.%02x\n",
spdk_pci_device_get_domain(dev),
spdk_pci_device_get_bus(dev),
spdk_pci_device_get_dev(dev),
spdk_pci_device_get_func(dev));
fprintf(stderr, " skipping...\n");
return false;
}
return true;
}
static void
attach_cb(void *cb_ctx, void *pci_dev, struct nvme_controller *ctrlr)
{
print_controller(ctrlr, pci_dev);
nvme_detach(ctrlr);
}
static const char *ealargs[] = {
"identify",
"-c 0x1",
@ -776,9 +802,6 @@ static const char *ealargs[] = {
int main(int argc, char **argv)
{
struct pci_device_iterator *pci_dev_iter;
struct pci_device *pci_dev;
struct pci_id_match match;
int rc;
rc = parse_args(argc, argv);
@ -806,43 +829,13 @@ int main(int argc, char **argv)
pci_system_init();
match.vendor_id = PCI_MATCH_ANY;
match.subvendor_id = PCI_MATCH_ANY;
match.subdevice_id = PCI_MATCH_ANY;
match.device_id = PCI_MATCH_ANY;
match.device_class = NVME_CLASS_CODE;
match.device_class_mask = 0xFFFFFF;
pci_dev_iter = pci_id_match_iterator_create(&match);
rc = 0;
while ((pci_dev = pci_device_next(pci_dev_iter))) {
struct nvme_controller *ctrlr;
if (pci_device_has_non_uio_driver(pci_dev)) {
fprintf(stderr, "non-uio kernel driver attached to nvme\n");
fprintf(stderr, " controller at pci bdf %d:%d:%d\n",
pci_dev->bus, pci_dev->dev, pci_dev->func);
fprintf(stderr, " skipping...\n");
continue;
}
pci_device_probe(pci_dev);
ctrlr = nvme_attach(pci_dev);
if (ctrlr == NULL) {
fprintf(stderr, "failed to attach to NVMe controller at PCI BDF %d:%d:%d\n",
pci_dev->bus, pci_dev->dev, pci_dev->func);
if (nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
rc = 1;
continue;
}
print_controller(ctrlr, pci_dev);
nvme_detach(ctrlr);
}
cleanup();
pci_iterator_destroy(pci_dev_iter);
return rc;
}

View File

@ -819,55 +819,58 @@ register_workers(void)
return 0;
}
static int
register_controllers(void)
static bool
probe_cb(void *cb_ctx, void *pci_dev)
{
struct pci_device_iterator *pci_dev_iter;
struct pci_device *pci_dev;
struct pci_id_match match;
int rc;
struct pci_device *dev = pci_dev;
printf("Initializing NVMe Controllers\n");
pci_system_init();
match.vendor_id = PCI_MATCH_ANY;
match.subvendor_id = PCI_MATCH_ANY;
match.subdevice_id = PCI_MATCH_ANY;
match.device_id = PCI_MATCH_ANY;
match.device_class = NVME_CLASS_CODE;
match.device_class_mask = 0xFFFFFF;
pci_dev_iter = pci_id_match_iterator_create(&match);
rc = 0;
while ((pci_dev = pci_device_next(pci_dev_iter))) {
struct nvme_controller *ctrlr;
if (pci_device_has_non_uio_driver(pci_dev)) {
fprintf(stderr, "non-uio kernel driver attached to nvme\n");
fprintf(stderr, " controller at pci bdf %d:%d:%d\n",
pci_dev->bus, pci_dev->dev, pci_dev->func);
if (pci_device_has_non_uio_driver(dev)) {
fprintf(stderr, "non-uio kernel driver attached to NVMe\n");
fprintf(stderr, " controller at PCI address %04x:%02x:%02x.%02x\n",
spdk_pci_device_get_domain(dev),
spdk_pci_device_get_bus(dev),
spdk_pci_device_get_dev(dev),
spdk_pci_device_get_func(dev));
fprintf(stderr, " skipping...\n");
continue;
return false;
}
pci_device_probe(pci_dev);
printf("Attaching to %04x:%02x:%02x.%02x\n",
spdk_pci_device_get_domain(dev),
spdk_pci_device_get_bus(dev),
spdk_pci_device_get_dev(dev),
spdk_pci_device_get_func(dev));
ctrlr = nvme_attach(pci_dev);
if (ctrlr == NULL) {
fprintf(stderr, "nvme_attach failed for controller at pci bdf %d:%d:%d\n",
pci_dev->bus, pci_dev->dev, pci_dev->func);
rc = 1;
continue;
return true;
}
static void
attach_cb(void *cb_ctx, void *pci_dev, struct nvme_controller *ctrlr)
{
struct pci_device *dev = pci_dev;
printf("Attached to %04x:%02x:%02x.%02x\n",
spdk_pci_device_get_domain(dev),
spdk_pci_device_get_bus(dev),
spdk_pci_device_get_dev(dev),
spdk_pci_device_get_func(dev));
register_ctrlr(ctrlr);
}
pci_iterator_destroy(pci_dev_iter);
static int
register_controllers(void)
{
printf("Initializing NVMe Controllers\n");
return rc;
pci_system_init();
if (nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
return 1;
}
return 0;
}
static void

View File

@ -378,6 +378,36 @@ reserve_controller(struct nvme_controller *ctrlr, struct pci_device *pci_dev)
reservation_ns_release(ctrlr, 1);
}
static bool
probe_cb(void *cb_ctx, void *pci_dev)
{
struct pci_device *dev = pci_dev;
if (pci_device_has_non_uio_driver(dev)) {
fprintf(stderr, "non-uio kernel driver attached to NVMe\n");
fprintf(stderr, " controller at PCI address %04x:%02x:%02x.%02x\n",
spdk_pci_device_get_domain(dev),
spdk_pci_device_get_bus(dev),
spdk_pci_device_get_dev(dev),
spdk_pci_device_get_func(dev));
fprintf(stderr, " skipping...\n");
return false;
}
return true;
}
static void
attach_cb(void *cb_ctx, void *pci_dev, struct nvme_controller *ctrlr)
{
struct dev *dev;
/* add to dev list */
dev = &devs[num_devs++];
dev->pci_dev = pci_dev;
dev->ctrlr = ctrlr;
}
static const char *ealargs[] = {
"reserve",
"-c 0x1",
@ -386,10 +416,7 @@ static const char *ealargs[] = {
int main(int argc, char **argv)
{
struct pci_device_iterator *pci_dev_iter;
struct pci_device *pci_dev;
struct dev *iter;
struct pci_id_match match;
int rc, i;
rc = rte_eal_init(sizeof(ealargs) / sizeof(ealargs[0]),
@ -412,45 +439,11 @@ int main(int argc, char **argv)
pci_system_init();
match.vendor_id = PCI_MATCH_ANY;
match.subvendor_id = PCI_MATCH_ANY;
match.subdevice_id = PCI_MATCH_ANY;
match.device_id = PCI_MATCH_ANY;
match.device_class = NVME_CLASS_CODE;
match.device_class_mask = 0xFFFFFF;
pci_dev_iter = pci_id_match_iterator_create(&match);
rc = 0;
while ((pci_dev = pci_device_next(pci_dev_iter))) {
struct nvme_controller *ctrlr;
struct dev *dev;
if (pci_device_has_non_uio_driver(pci_dev)) {
fprintf(stderr, "non-uio kernel driver attached to nvme\n");
fprintf(stderr, " controller at pci bdf %d:%d:%d\n",
pci_dev->bus, pci_dev->dev, pci_dev->func);
fprintf(stderr, " skipping...\n");
continue;
if (nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
return 1;
}
pci_device_probe(pci_dev);
ctrlr = nvme_attach(pci_dev);
if (ctrlr == NULL) {
fprintf(stderr, "failed to attach to NVMe controller at PCI BDF %d:%d:%d\n",
pci_dev->bus, pci_dev->dev, pci_dev->func);
rc = 1;
continue;
}
/* add to dev list */
dev = &devs[num_devs++];
dev->pci_dev = pci_dev;
dev->ctrlr = ctrlr;
}
pci_iterator_destroy(pci_dev_iter);
if (num_devs) {
rc = nvme_register_io_thread();
if (rc != 0)

View File

@ -48,25 +48,39 @@ extern int32_t nvme_retry_count;
extern "C" {
#endif
/** \brief Opaque handle to a controller. Obtained by calling nvme_attach(). */
/** \brief Opaque handle to a controller. Returned by \ref nvme_probe()'s attach_cb. */
struct nvme_controller;
/**
* \brief Attaches specified device to the NVMe driver.
* Callback for nvme_probe() enumeration.
*
* On success, the nvme_controller handle is valid for other nvme_ctrlr_* functions.
* On failure, the return value will be NULL.
* \return true to attach to this device.
*/
typedef bool (*nvme_probe_cb)(void *cb_ctx, void *pci_dev);
/**
* Callback for nvme_probe() to report a device that has been attached to the userspace NVMe driver.
*/
typedef void (*nvme_attach_cb)(void *cb_ctx, void *pci_dev, struct nvme_controller *ctrlr);
/**
* \brief Enumerate the NVMe devices attached to the system and attach the userspace NVMe driver
* to them if desired.
*
* This function should be called from a single thread while no other threads or drivers
* are actively using the NVMe device.
* \param probe_cb will be called once per NVMe device found in the system.
* \param attach_cb will be called for devices for which probe_cb returned true once that NVMe
* controller has been attached to the userspace driver.
*
* If called more than once, only devices that are not already attached to the SPDK NVMe driver
* will be reported.
*
* To stop using the the controller and release its associated resources,
* call \ref nvme_detach with the nvme_controller instance returned by this function.
*/
struct nvme_controller *nvme_attach(void *devhandle);
int nvme_probe(void *cb_ctx, nvme_probe_cb probe_cb, nvme_attach_cb attach_cb);
/**
* \brief Detaches specified device returned by \ref nvme_attach() from the NVMe driver.
* \brief Detaches specified device returned by \ref nvme_probe()'s attach_cb from the NVMe driver.
*
* On success, the nvme_controller handle is no longer valid.
*
@ -91,7 +105,8 @@ int nvme_ctrlr_reset(struct nvme_controller *ctrlr);
/**
* \brief Get the identify controller data as defined by the NVMe specification.
*
* This function is thread safe and can be called at any point after nvme_attach().
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
*/
const struct nvme_controller_data *nvme_ctrlr_get_data(struct nvme_controller *ctrlr);
@ -99,7 +114,8 @@ const struct nvme_controller_data *nvme_ctrlr_get_data(struct nvme_controller *c
/**
* \brief Get the number of namespaces for the given NVMe controller.
*
* This function is thread safe and can be called at any point after nvme_attach().
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* This is equivalent to calling nvme_ctrlr_get_data() to get the
* nvme_controller_data and then reading the nn field.
@ -110,7 +126,8 @@ uint32_t nvme_ctrlr_get_num_ns(struct nvme_controller *ctrlr);
/**
* \brief Determine if a particular log page is supported by the given NVMe controller.
*
* This function is thread safe and can be called at any point after nvme_attach().
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* \sa nvme_ctrlr_cmd_get_log_page()
*/
@ -119,7 +136,8 @@ bool nvme_ctrlr_is_log_page_supported(struct nvme_controller *ctrlr, uint8_t log
/**
* \brief Determine if a particular feature is supported by the given NVMe controller.
*
* This function is thread safe and can be called at any point after nvme_attach().
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* \sa nvme_ctrlr_cmd_get_feature()
*/
@ -180,7 +198,8 @@ int nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr,
*
* \return Number of completions processed (may be 0) or negative on error.
*
* This function is thread safe and can be called at any point after nvme_attach().
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
*/
int32_t nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr, uint32_t max_completions);
@ -195,8 +214,8 @@ int32_t nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr, uint32_
* When constructing the nvme_command it is not necessary to fill out the PRP
* list/SGL or the CID. The driver will handle both of those for you.
*
* This function is thread safe and can be called at any point after
* \ref nvme_attach().
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* Call \ref nvme_ctrlr_process_admin_completions() to poll for completion
* of commands submitted through this function.
@ -217,7 +236,8 @@ int nvme_ctrlr_cmd_admin_raw(struct nvme_controller *ctrlr,
*
* \return Number of completions processed (may be 0) or negative on error.
*
* This function is thread safe and can be called at any point after nvme_attach().
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
int32_t nvme_ctrlr_process_admin_completions(struct nvme_controller *ctrlr);
@ -232,8 +252,8 @@ struct nvme_namespace;
* be any gaps in the numbering. The number of namespaces is obtained by calling
* nvme_ctrlr_get_num_ns().
*
* This function is thread safe and can be called at any point after nvme_attach().
*
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
struct nvme_namespace *nvme_ctrlr_get_ns(struct nvme_controller *ctrlr, uint32_t ns_id);
@ -249,7 +269,8 @@ struct nvme_namespace *nvme_ctrlr_get_ns(struct nvme_controller *ctrlr, uint32_t
*
* \return 0 if successfully submitted, ENOMEM if resources could not be allocated for this request
*
* This function is thread safe and can be called at any point after nvme_attach().
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* Call \ref nvme_ctrlr_process_admin_completions() to poll for completion
* of commands submitted through this function.
@ -274,7 +295,8 @@ int nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr,
*
* \return 0 if successfully submitted, ENOMEM if resources could not be allocated for this request
*
* This function is thread safe and can be called at any point after nvme_attach().
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* Call \ref nvme_ctrlr_process_admin_completions() to poll for completion
* of commands submitted through this function.
@ -298,7 +320,8 @@ int nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
*
* \return 0 if successfully submitted, ENOMEM if resources could not be allocated for this request
*
* This function is thread safe and can be called at any point after nvme_attach().
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* Call \ref nvme_ctrlr_process_admin_completions() to poll for completion
* of commands submitted through this function.
@ -313,48 +336,48 @@ int nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
/**
* \brief Get the identify namespace data as defined by the NVMe specification.
*
* This function is thread safe and can be called at any point after nvme_attach().
*
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
const struct nvme_namespace_data *nvme_ns_get_data(struct nvme_namespace *ns);
/**
* \brief Get the namespace id (index number) from the given namespace handle.
*
* This function is thread safe and can be called at any point after nvme_attach().
*
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
uint32_t nvme_ns_get_id(struct nvme_namespace *ns);
/**
* \brief Get the maximum transfer size, in bytes, for an I/O sent to the given namespace.
*
* This function is thread safe and can be called at any point after nvme_attach().
*
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
uint32_t nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns);
/**
* \brief Get the sector size, in bytes, of the given namespace.
*
* This function is thread safe and can be called at any point after nvme_attach().
*
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
uint32_t nvme_ns_get_sector_size(struct nvme_namespace *ns);
/**
* \brief Get the number of sectors for the given namespace.
*
* This function is thread safe and can be called at any point after nvme_attach().
*
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
uint64_t nvme_ns_get_num_sectors(struct nvme_namespace *ns);
/**
* \brief Get the size, in bytes, of the given namespace.
*
* This function is thread safe and can be called at any point after nvme_attach().
*
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
uint64_t nvme_ns_get_size(struct nvme_namespace *ns);
@ -373,8 +396,8 @@ enum nvme_namespace_flags {
*
* See nvme_namespace_flags for the possible flags returned.
*
* This function is thread safe and can be called at any point after nvme_attach().
*
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
uint32_t nvme_ns_get_flags(struct nvme_namespace *ns);

View File

@ -39,7 +39,9 @@
struct nvme_driver g_nvme_driver = {
.lock = NVME_MUTEX_INITIALIZER,
.max_io_queues = DEFAULT_MAX_IO_QUEUES
.max_io_queues = DEFAULT_MAX_IO_QUEUES,
.init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_driver.init_ctrlrs),
.attached_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_driver.attached_ctrlrs),
};
int32_t nvme_retry_count;
@ -52,19 +54,21 @@ __thread int nvme_thread_ioq_index = -1;
\msc
app [label="Application"], nvme [label="NVMe Driver"];
app=>nvme [label="nvme_attach(devhandle)"];
app<<nvme [label="nvme_controller ptr"];
app=>nvme [label="nvme_ctrlr_start(nvme_controller ptr)"];
app=>nvme [label="nvme_probe()"];
app<<nvme [label="probe_cb(pci_dev)"];
nvme=>nvme [label="nvme_attach(devhandle)"];
nvme=>nvme [label="nvme_ctrlr_start(nvme_controller ptr)"];
nvme=>nvme [label="identify controller"];
nvme=>nvme [label="create queue pairs"];
nvme=>nvme [label="identify namespace(s)"];
app<<nvme [label="attach_cb(pci_dev, nvme_controller)"];
app=>app [label="create block devices based on controller's namespaces"];
\endmsc
*/
struct nvme_controller *
static struct nvme_controller *
nvme_attach(void *devhandle)
{
struct nvme_controller *ctrlr;
@ -84,20 +88,21 @@ nvme_attach(void *devhandle)
return NULL;
}
if (nvme_ctrlr_start(ctrlr) != 0) {
nvme_ctrlr_destruct(ctrlr);
nvme_free(ctrlr);
return NULL;
}
return ctrlr;
}
int
nvme_detach(struct nvme_controller *ctrlr)
{
struct nvme_driver *driver = &g_nvme_driver;
nvme_mutex_lock(&driver->lock);
nvme_ctrlr_destruct(ctrlr);
TAILQ_REMOVE(&g_nvme_driver.attached_ctrlrs, ctrlr, tailq);
nvme_free(ctrlr);
nvme_mutex_unlock(&driver->lock);
return 0;
}
@ -247,3 +252,94 @@ nvme_unregister_io_thread(void)
nvme_free_ioq_index();
}
struct nvme_enum_ctx {
nvme_probe_cb probe_cb;
void *cb_ctx;
};
/* This function must only be called while holding g_nvme_driver.lock */
static int
nvme_enum_cb(void *ctx, void *pci_dev)
{
struct nvme_enum_ctx *enum_ctx = ctx;
struct nvme_controller *ctrlr;
/* Verify that this controller is not already attached */
TAILQ_FOREACH(ctrlr, &g_nvme_driver.attached_ctrlrs, tailq) {
/* NOTE: This assumes that the PCI abstraction layer will use the same device handle
* across enumerations; we could compare by BDF instead if this is not true.
*/
if (pci_dev == ctrlr->devhandle) {
return 0;
}
}
if (enum_ctx->probe_cb(enum_ctx->cb_ctx, pci_dev)) {
ctrlr = nvme_attach(pci_dev);
if (ctrlr == NULL) {
nvme_printf(NULL, "nvme_attach() failed\n");
return -1;
}
TAILQ_INSERT_TAIL(&g_nvme_driver.init_ctrlrs, ctrlr, tailq);
}
return 0;
}
int
nvme_probe(void *cb_ctx, nvme_probe_cb probe_cb, nvme_attach_cb attach_cb)
{
int rc, start_rc;
struct nvme_enum_ctx enum_ctx;
struct nvme_controller *ctrlr;
nvme_mutex_lock(&g_nvme_driver.lock);
enum_ctx.probe_cb = probe_cb;
enum_ctx.cb_ctx = cb_ctx;
rc = nvme_pci_enumerate(nvme_enum_cb, &enum_ctx);
/*
* Keep going even if one or more nvme_attach() calls failed,
* but maintain the value of rc to signal errors when we return.
*/
/* TODO: This could be reworked to start all the controllers in parallel. */
while (!TAILQ_EMPTY(&g_nvme_driver.init_ctrlrs)) {
/* Remove ctrlr from init_ctrlrs and attempt to start it */
ctrlr = TAILQ_FIRST(&g_nvme_driver.init_ctrlrs);
TAILQ_REMOVE(&g_nvme_driver.init_ctrlrs, ctrlr, tailq);
/*
* Drop the driver lock while calling nvme_ctrlr_start() since it needs to acquire
* the driver lock internally.
*
* TODO: Rethink the locking - maybe reset should take the lock so that start() and
* the functions it calls (in particular nvme_ctrlr_set_num_qpairs())
* can assume it is held.
*/
nvme_mutex_unlock(&g_nvme_driver.lock);
start_rc = nvme_ctrlr_start(ctrlr);
nvme_mutex_lock(&g_nvme_driver.lock);
if (start_rc == 0) {
TAILQ_INSERT_TAIL(&g_nvme_driver.attached_ctrlrs, ctrlr, tailq);
/*
* Unlock while calling attach_cb() so the user can call other functions
* that may take the driver lock, like nvme_detach().
*/
nvme_mutex_unlock(&g_nvme_driver.lock);
attach_cb(cb_ctx, ctrlr->devhandle, ctrlr);
nvme_mutex_lock(&g_nvme_driver.lock);
} else {
nvme_ctrlr_destruct(ctrlr);
nvme_free(ctrlr);
rc = -1;
}
}
nvme_mutex_unlock(&g_nvme_driver.lock);
return rc;
}

View File

@ -42,6 +42,9 @@
#include <rte_mempool.h>
#include <rte_memcpy.h>
#include "spdk/pci.h"
#include "spdk/nvme_spec.h"
/**
* \file
*
@ -114,6 +117,37 @@ extern struct rte_mempool *request_mempool;
*/
#define nvme_dealloc_request(buf) rte_mempool_put(request_mempool, buf)
static inline int
nvme_pci_enumerate(int (*enum_cb)(void *enum_ctx, void *pci_dev), void *enum_ctx)
{
struct pci_device_iterator *pci_dev_iter;
struct pci_device *pci_dev;
struct pci_id_match match;
int rc;
match.vendor_id = PCI_MATCH_ANY;
match.subvendor_id = PCI_MATCH_ANY;
match.subdevice_id = PCI_MATCH_ANY;
match.device_id = PCI_MATCH_ANY;
match.device_class = NVME_CLASS_CODE;
match.device_class_mask = 0xFFFFFF;
pci_dev_iter = pci_id_match_iterator_create(&match);
rc = 0;
while ((pci_dev = pci_device_next(pci_dev_iter))) {
pci_device_probe(pci_dev);
if (enum_cb(enum_ctx, pci_dev)) {
rc = -1;
}
}
pci_iterator_destroy(pci_dev_iter);
return rc;
}
/**
*
*/

View File

@ -309,6 +309,8 @@ struct nvme_controller {
/* Cold data (not accessed in normal I/O path) is after this point. */
TAILQ_ENTRY(nvme_controller) tailq;
/** All the log pages supported */
bool log_page_supported[256];
@ -360,6 +362,8 @@ struct nvme_driver {
uint16_t *ioq_index_pool;
uint32_t max_io_queues;
uint16_t ioq_index_pool_next;
TAILQ_HEAD(, nvme_controller) init_ctrlrs;
TAILQ_HEAD(, nvme_controller) attached_ctrlrs;
};
extern struct nvme_driver g_nvme_driver;

View File

@ -188,6 +188,56 @@ static void aer_cb(void *arg, const struct nvme_completion *cpl)
get_health_log_page(dev);
}
static bool
probe_cb(void *cb_ctx, void *pci_dev)
{
struct pci_device *dev = pci_dev;
if (pci_device_has_non_uio_driver(dev)) {
fprintf(stderr, "non-uio kernel driver attached to NVMe\n");
fprintf(stderr, " controller at PCI address %04x:%02x:%02x.%02x\n",
spdk_pci_device_get_domain(dev),
spdk_pci_device_get_bus(dev),
spdk_pci_device_get_dev(dev),
spdk_pci_device_get_func(dev));
fprintf(stderr, " skipping...\n");
return false;
}
printf("Attaching to %04x:%02x:%02x.%02x\n",
spdk_pci_device_get_domain(dev),
spdk_pci_device_get_bus(dev),
spdk_pci_device_get_dev(dev),
spdk_pci_device_get_func(dev));
return true;
}
static void
attach_cb(void *cb_ctx, void *pdev, struct nvme_controller *ctrlr)
{
struct dev *dev;
struct pci_device *pci_dev = pdev;
/* add to dev list */
dev = &devs[num_devs++];
dev->ctrlr = ctrlr;
dev->pci_dev = pci_dev;
snprintf(dev->name, sizeof(dev->name), "%04x:%02x:%02x.%02x",
pci_dev->domain, pci_dev->bus, pci_dev->dev, pci_dev->func);
printf("Attached to %s\n", dev->name);
dev->health_page = rte_zmalloc("nvme health", sizeof(*dev->health_page), 4096);
if (dev->health_page == NULL) {
printf("Allocation error (health page)\n");
failed = 1;
}
}
static const char *ealargs[] = {
"aer",
"-c 0x1",
@ -196,10 +246,7 @@ static const char *ealargs[] = {
int main(int argc, char **argv)
{
struct pci_device_iterator *pci_dev_iter;
struct pci_device *pci_dev;
struct dev *dev;
struct pci_id_match match;
int rc, i;
printf("Asynchronous Event Request test\n");
@ -224,53 +271,15 @@ int main(int argc, char **argv)
pci_system_init();
match.vendor_id = PCI_MATCH_ANY;
match.subvendor_id = PCI_MATCH_ANY;
match.subdevice_id = PCI_MATCH_ANY;
match.device_id = PCI_MATCH_ANY;
match.device_class = NVME_CLASS_CODE;
match.device_class_mask = 0xFFFFFF;
pci_dev_iter = pci_id_match_iterator_create(&match);
while ((pci_dev = pci_device_next(pci_dev_iter))) {
struct dev *dev;
if (pci_device_has_non_uio_driver(pci_dev)) {
fprintf(stderr, "non-uio kernel driver attached to nvme\n");
fprintf(stderr, " controller at pci bdf %d:%d:%d\n",
pci_dev->bus, pci_dev->dev, pci_dev->func);
fprintf(stderr, " skipping...\n");
continue;
if (nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
return 1;
}
pci_device_probe(pci_dev);
/* add to dev list */
dev = &devs[num_devs++];
dev->pci_dev = pci_dev;
snprintf(dev->name, sizeof(dev->name), "%04X:%02X:%02X.%02X",
pci_dev->domain, pci_dev->bus, pci_dev->dev, pci_dev->func);
printf("%s: attaching NVMe driver...\n", dev->name);
dev->health_page = rte_zmalloc("nvme health", sizeof(*dev->health_page), 4096);
if (dev->health_page == NULL) {
printf("Allocation error (health page)\n");
failed = 1;
if (failed) {
goto done;
}
dev->ctrlr = nvme_attach(pci_dev);
if (dev->ctrlr == NULL) {
fprintf(stderr, "failed to attach to NVMe controller %s\n", dev->name);
failed = 1;
goto done;
}
}
printf("Registering asynchronous event callbacks...\n");
foreach_dev(dev) {
nvme_ctrlr_register_aer_callback(dev->ctrlr, aer_cb, dev);
@ -311,6 +320,5 @@ int main(int argc, char **argv)
done:
cleanup();
pci_iterator_destroy(pci_dev_iter);
return failed;
}

View File

@ -503,55 +503,45 @@ register_workers(void)
return 0;
}
static bool
probe_cb(void *cb_ctx, void *pci_dev)
{
struct pci_device *dev = pci_dev;
if (pci_device_has_non_uio_driver(dev)) {
fprintf(stderr, "non-uio kernel driver attached to NVMe\n");
fprintf(stderr, " controller at PCI address %04x:%02x:%02x.%02x\n",
spdk_pci_device_get_domain(dev),
spdk_pci_device_get_bus(dev),
spdk_pci_device_get_dev(dev),
spdk_pci_device_get_func(dev));
fprintf(stderr, " skipping...\n");
return false;
}
return true;
}
static void
attach_cb(void *cb_ctx, void *pci_dev, struct nvme_controller *ctrlr)
{
register_ctrlr(ctrlr);
}
static int
register_controllers(void)
{
struct pci_device_iterator *pci_dev_iter;
struct pci_device *pci_dev;
struct pci_id_match match;
int rc;
printf("Initializing NVMe Controllers\n");
pci_system_init();
match.vendor_id = PCI_MATCH_ANY;
match.subvendor_id = PCI_MATCH_ANY;
match.subdevice_id = PCI_MATCH_ANY;
match.device_id = PCI_MATCH_ANY;
match.device_class = NVME_CLASS_CODE;
match.device_class_mask = 0xFFFFFF;
pci_dev_iter = pci_id_match_iterator_create(&match);
rc = 0;
while ((pci_dev = pci_device_next(pci_dev_iter))) {
struct nvme_controller *ctrlr;
if (pci_device_has_non_uio_driver(pci_dev)) {
fprintf(stderr, "non-uio kernel driver attached to nvme\n");
fprintf(stderr, " controller at pci bdf %d:%d:%d\n",
pci_dev->bus, pci_dev->dev, pci_dev->func);
fprintf(stderr, " skipping...\n");
continue;
if (nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
return 1;
}
pci_device_probe(pci_dev);
ctrlr = nvme_attach(pci_dev);
if (ctrlr == NULL) {
fprintf(stderr, "nvme_attach failed for controller at pci bdf %d:%d:%d\n",
pci_dev->bus, pci_dev->dev, pci_dev->func);
rc = 1;
continue;
}
register_ctrlr(ctrlr);
}
pci_iterator_destroy(pci_dev_iter);
return rc;
return 0;
}
static void

View File

@ -371,6 +371,53 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
return rc;
}
static bool
probe_cb(void *cb_ctx, void *pci_dev)
{
struct pci_device *dev = pci_dev;
if (pci_device_has_non_uio_driver(dev)) {
fprintf(stderr, "non-uio kernel driver attached to NVMe\n");
fprintf(stderr, " controller at PCI address %04x:%02x:%02x.%02x\n",
spdk_pci_device_get_domain(dev),
spdk_pci_device_get_bus(dev),
spdk_pci_device_get_dev(dev),
spdk_pci_device_get_func(dev));
fprintf(stderr, " skipping...\n");
return false;
}
printf("Attaching to %04x:%02x:%02x.%02x\n",
spdk_pci_device_get_domain(dev),
spdk_pci_device_get_bus(dev),
spdk_pci_device_get_dev(dev),
spdk_pci_device_get_func(dev));
return true;
}
static void
attach_cb(void *cb_ctx, void *pdev, struct nvme_controller *ctrlr)
{
struct dev *dev;
struct pci_device *pci_dev = pdev;
/* add to dev list */
dev = &devs[num_devs++];
dev->ctrlr = ctrlr;
dev->pci_dev = pci_dev;
snprintf(dev->name, sizeof(dev->name), "%04X:%02X:%02X.%02X",
spdk_pci_device_get_domain(pci_dev),
spdk_pci_device_get_bus(pci_dev),
spdk_pci_device_get_dev(pci_dev),
spdk_pci_device_get_func(pci_dev));
printf("Attached to %s\n", dev->name);
}
static const char *ealargs[] = {
"nvme_sgl",
"-c 0x1",
@ -379,10 +426,7 @@ static const char *ealargs[] = {
int main(int argc, char **argv)
{
struct pci_device_iterator *pci_dev_iter;
struct pci_device *pci_dev;
struct dev *iter;
struct pci_id_match match;
int rc, i;
printf("NVMe Readv/Writev Request test\n");
@ -407,48 +451,11 @@ int main(int argc, char **argv)
pci_system_init();
match.vendor_id = PCI_MATCH_ANY;
match.subvendor_id = PCI_MATCH_ANY;
match.subdevice_id = PCI_MATCH_ANY;
match.device_id = PCI_MATCH_ANY;
match.device_class = NVME_CLASS_CODE;
match.device_class_mask = 0xFFFFFF;
pci_dev_iter = pci_id_match_iterator_create(&match);
rc = 0;
while ((pci_dev = pci_device_next(pci_dev_iter))) {
struct dev *dev;
if (pci_device_has_non_uio_driver(pci_dev)) {
fprintf(stderr, "non-null kernel driver attached to nvme\n");
fprintf(stderr, " controller at pci bdf %d:%d:%d\n",
pci_dev->bus, pci_dev->dev, pci_dev->func);
fprintf(stderr, " skipping...\n");
continue;
if (nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
exit(1);
}
pci_device_probe(pci_dev);
/* add to dev list */
dev = &devs[num_devs++];
dev->pci_dev = pci_dev;
snprintf(dev->name, sizeof(dev->name), "%04X:%02X:%02X.%02X",
pci_dev->domain, pci_dev->bus, pci_dev->dev, pci_dev->func);
printf("%s: attaching NVMe driver...\n", dev->name);
dev->ctrlr = nvme_attach(pci_dev);
if (dev->ctrlr == NULL) {
fprintf(stderr, "failed to attach to NVMe controller %s\n", dev->name);
rc = 1;
continue; /* TODO: just abort */
}
}
pci_iterator_destroy(pci_dev_iter);
if (num_devs) {
rc = nvme_register_io_thread();
if (rc != 0)

View File

@ -75,6 +75,14 @@ do \
while (0)
#define nvme_dealloc_request(buf) free(buf)
static inline int
nvme_pci_enumerate(int (*enum_cb)(void *enum_ctx, void *pci_dev), void *enum_ctx)
{
/* TODO: enumeration is not needed in any unit tests yet, so it's not implemented */
return -1;
}
#define nvme_pcicfg_read32(handle, var, offset) do { *(var) = 0xFFFFFFFFu; } while (0)
#define nvme_pcicfg_write32(handle, var, offset) do { (void)(var); } while (0)