2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2016-01-26 17:47:22 +00:00
|
|
|
* Copyright (c) Intel Corporation.
|
2015-09-21 15:52:41 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*/
|
|
|
|
|
2016-12-05 17:20:20 +00:00
|
|
|
#include "env_internal.h"
|
2015-09-25 16:13:02 +00:00
|
|
|
|
2019-03-24 12:09:42 +00:00
|
|
|
#include <rte_alarm.h>
|
2022-09-15 13:22:08 +00:00
|
|
|
#include <rte_bus_pci.h>
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
#include <rte_devargs.h>
|
2016-08-10 17:41:12 +00:00
|
|
|
#include "spdk/env.h"
|
2020-04-20 19:47:03 +00:00
|
|
|
#include "spdk/log.h"
|
2021-04-20 19:20:08 +00:00
|
|
|
#include "spdk/string.h"
|
2022-09-15 18:45:31 +00:00
|
|
|
#include "spdk/assert.h"
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
#define SYSFS_PCI_DRIVERS "/sys/bus/pci/drivers"
|
|
|
|
|
2020-11-16 16:27:04 +00:00
|
|
|
/* Compatibility for versions < 20.11 */
|
2020-11-19 07:33:17 +00:00
|
|
|
#if RTE_VERSION < RTE_VERSION_NUM(20, 11, 0, 0)
|
2020-11-16 16:27:04 +00:00
|
|
|
#define RTE_DEV_ALLOWED RTE_DEV_WHITELISTED
|
|
|
|
#define RTE_DEV_BLOCKED RTE_DEV_BLACKLISTED
|
|
|
|
#define RTE_BUS_SCAN_ALLOWLIST RTE_BUS_SCAN_WHITELIST
|
|
|
|
#endif
|
|
|
|
|
2016-02-03 21:36:26 +00:00
|
|
|
#define PCI_CFG_SIZE 256
|
|
|
|
#define PCI_EXT_CAP_ID_SN 0x03
|
|
|
|
|
2018-11-23 14:36:18 +00:00
|
|
|
/* DPDK 18.11+ hotplug isn't robust. Multiple apps starting at the same time
|
|
|
|
* might cause the internal IPC to misbehave. Just retry in such case.
|
|
|
|
*/
|
|
|
|
#define DPDK_HOTPLUG_RETRY_COUNT 4
|
|
|
|
|
2019-07-03 05:21:16 +00:00
|
|
|
/* DPDK alarm/interrupt thread */
|
2018-11-18 01:48:56 +00:00
|
|
|
static pthread_mutex_t g_pci_mutex = PTHREAD_MUTEX_INITIALIZER;
|
2018-11-18 01:15:19 +00:00
|
|
|
static TAILQ_HEAD(, spdk_pci_device) g_pci_devices = TAILQ_HEAD_INITIALIZER(g_pci_devices);
|
2019-06-21 05:59:11 +00:00
|
|
|
/* devices hotplugged on a dpdk thread */
|
|
|
|
static TAILQ_HEAD(, spdk_pci_device) g_pci_hotplugged_devices =
|
|
|
|
TAILQ_HEAD_INITIALIZER(g_pci_hotplugged_devices);
|
2018-11-21 19:44:59 +00:00
|
|
|
static TAILQ_HEAD(, spdk_pci_driver) g_pci_drivers = TAILQ_HEAD_INITIALIZER(g_pci_drivers);
|
env/pci: method for registering PCI device providers
The primary motivation for this patch is to allow the VMD driver to be
notified of when users wants to attach a device under a given BDF and to
make it more similar to the regular PCI path. Currently, the way the
VMD driver scans for the devices is a little bit different. The initial
scan is done during initialization and there's a separate poller for
checking hotplugs. Also, there's no device_attach() interface, so with
hotplug poller disabled, it isn't possible to attach to a device not
present in the initial scan, even if the BDF is known.
This causes a few issues. First of all, the VMD library isn't notified
when a device is stopped being used (i.e. user calls
spdk_pci_device_detach()), so when such a device is hotremoved, it never
gets unhooked. But we cannot simply add a spdk_pci_device.detach()
callback, as this would break cases when user detaches a device (without
hotremove) and then tries to reattach it again (via
spdk_pci_device_attach()), as the VMD doesn't get notified about the
device_attach() call.
So, in order to resolve this, a device_attach() callback is added, which
will notify the VMD library that the user wants to attach a device under
a specific PCI address. Then, in subsequent patches, a
spdk_pci_device_provider.detach_cb() callback is added to make sure that
devices are unhooked once they're no longer used.
Once that is done, it'll be also possible to get rid of the VMD hotplug
poller by adding something like scan_cb() to spdk_pci_device_provider and
call it from spdk_pci_enumerate().
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I084a27dcd12455f0f841440b7692375e80d07e84
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13883
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Tom Nabarro <tom.nabarro@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-08-05 03:48:17 +00:00
|
|
|
static TAILQ_HEAD(, spdk_pci_device_provider) g_pci_device_providers =
|
|
|
|
TAILQ_HEAD_INITIALIZER(g_pci_device_providers);
|
2018-11-21 19:44:59 +00:00
|
|
|
|
2022-09-15 13:22:08 +00:00
|
|
|
struct spdk_pci_driver {
|
2022-09-15 18:45:31 +00:00
|
|
|
uint8_t driver_buf[256];
|
|
|
|
struct rte_pci_driver *driver;
|
2022-09-15 13:22:08 +00:00
|
|
|
|
|
|
|
const char *name;
|
|
|
|
const struct spdk_pci_id *id_table;
|
|
|
|
uint32_t drv_flags;
|
|
|
|
|
|
|
|
spdk_pci_enum_cb cb_fn;
|
|
|
|
void *cb_arg;
|
|
|
|
TAILQ_ENTRY(spdk_pci_driver) tailq;
|
|
|
|
};
|
2022-09-15 18:45:31 +00:00
|
|
|
SPDK_STATIC_ASSERT(offsetof(struct spdk_pci_driver, driver_buf) == 0, "driver_buf must be first");
|
|
|
|
SPDK_STATIC_ASSERT(offsetof(struct spdk_pci_driver, driver) >= sizeof(struct rte_pci_driver),
|
|
|
|
"driver_buf not big enough");
|
2022-09-15 13:22:08 +00:00
|
|
|
|
2022-09-15 20:53:18 +00:00
|
|
|
const char *dpdk_pci_device_get_name(struct rte_pci_device *);
|
2022-09-15 20:58:40 +00:00
|
|
|
struct rte_devargs *dpdk_pci_device_get_devargs(struct rte_pci_device *);
|
2022-09-15 21:06:01 +00:00
|
|
|
void dpdk_pci_device_copy_identifiers(struct rte_pci_device *_dev, struct spdk_pci_device *dev);
|
2022-09-15 21:21:35 +00:00
|
|
|
int dpdk_pci_device_map_bar(struct rte_pci_device *dev, uint32_t bar,
|
|
|
|
void **mapped_addr, uint64_t *phys_addr, uint64_t *size);
|
|
|
|
int dpdk_pci_device_read_config(struct rte_pci_device *dev, void *value, uint32_t len,
|
|
|
|
uint32_t offset);
|
|
|
|
int dpdk_pci_device_write_config(struct rte_pci_device *dev, void *value, uint32_t len,
|
|
|
|
uint32_t offset);
|
2022-09-15 20:53:18 +00:00
|
|
|
|
2022-09-15 13:22:08 +00:00
|
|
|
int pci_device_init(struct rte_pci_driver *driver, struct rte_pci_device *device);
|
|
|
|
int pci_device_fini(struct rte_pci_device *device);
|
|
|
|
|
2021-04-20 19:20:08 +00:00
|
|
|
struct env_devargs {
|
|
|
|
struct rte_bus *bus;
|
|
|
|
char name[128];
|
|
|
|
uint64_t allowed_at;
|
|
|
|
TAILQ_ENTRY(env_devargs) link;
|
|
|
|
};
|
|
|
|
static TAILQ_HEAD(, env_devargs) g_env_devargs = TAILQ_HEAD_INITIALIZER(g_env_devargs);
|
|
|
|
|
|
|
|
static struct env_devargs *
|
|
|
|
find_env_devargs(struct rte_bus *bus, const char *name)
|
|
|
|
{
|
|
|
|
struct env_devargs *da;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(da, &g_env_devargs, link) {
|
|
|
|
if (bus == da->bus && !strcmp(name, da->name)) {
|
|
|
|
return da;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-29 09:24:05 +00:00
|
|
|
static int
|
2020-05-10 22:57:07 +00:00
|
|
|
map_bar_rte(struct spdk_pci_device *device, uint32_t bar,
|
|
|
|
void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
|
2018-11-29 09:24:05 +00:00
|
|
|
{
|
2022-09-15 21:21:35 +00:00
|
|
|
return dpdk_pci_device_map_bar(device->dev_handle, bar, mapped_addr, phys_addr, size);
|
2018-11-29 09:24:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-05-10 22:57:07 +00:00
|
|
|
unmap_bar_rte(struct spdk_pci_device *device, uint32_t bar, void *addr)
|
2018-11-29 09:24:05 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-05-10 22:57:07 +00:00
|
|
|
cfg_read_rte(struct spdk_pci_device *dev, void *value, uint32_t len, uint32_t offset)
|
2018-11-29 09:24:05 +00:00
|
|
|
{
|
2022-09-15 21:21:35 +00:00
|
|
|
return dpdk_pci_device_read_config(dev->dev_handle, value, len, offset);
|
2018-11-29 09:24:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-05-10 22:57:07 +00:00
|
|
|
cfg_write_rte(struct spdk_pci_device *dev, void *value, uint32_t len, uint32_t offset)
|
2018-11-29 09:24:05 +00:00
|
|
|
{
|
2022-09-15 21:21:35 +00:00
|
|
|
return dpdk_pci_device_write_config(dev->dev_handle, value, len, offset);
|
2018-11-29 09:24:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-04-07 12:38:54 +00:00
|
|
|
remove_rte_dev(struct rte_pci_device *rte_dev)
|
2018-11-29 09:24:05 +00:00
|
|
|
{
|
|
|
|
char bdf[32];
|
|
|
|
int i = 0, rc;
|
|
|
|
|
2022-09-15 20:53:18 +00:00
|
|
|
snprintf(bdf, sizeof(bdf), "%s", dpdk_pci_device_get_name(rte_dev));
|
2018-11-29 09:24:05 +00:00
|
|
|
do {
|
|
|
|
rc = rte_eal_hotplug_remove("pci", bdf);
|
|
|
|
} while (rc == -ENOMSG && ++i <= DPDK_HOTPLUG_RETRY_COUNT);
|
|
|
|
}
|
|
|
|
|
2020-04-07 12:38:54 +00:00
|
|
|
static void
|
|
|
|
detach_rte_cb(void *_dev)
|
|
|
|
{
|
|
|
|
remove_rte_dev(_dev);
|
|
|
|
}
|
|
|
|
|
2022-07-06 09:15:26 +00:00
|
|
|
/* if it's a physical device we need to deal with DPDK on
|
|
|
|
* a different process and we can't just unset one flag
|
|
|
|
* here. We also want to stop using any device resources
|
|
|
|
* so that the device isn't "in use" by the userspace driver
|
|
|
|
* once we detach it. This would allow attaching the device
|
|
|
|
* to a different process, or to a kernel driver like nvme.
|
|
|
|
*/
|
2019-03-24 12:09:42 +00:00
|
|
|
static void
|
2020-05-10 22:57:07 +00:00
|
|
|
detach_rte(struct spdk_pci_device *dev)
|
2019-03-24 12:09:42 +00:00
|
|
|
{
|
2019-07-03 05:52:53 +00:00
|
|
|
struct rte_pci_device *rte_dev = dev->dev_handle;
|
|
|
|
int i;
|
|
|
|
bool removed;
|
|
|
|
|
2020-04-07 12:38:54 +00:00
|
|
|
if (!spdk_process_is_primary()) {
|
|
|
|
remove_rte_dev(rte_dev);
|
2020-04-02 10:42:14 +00:00
|
|
|
return;
|
|
|
|
}
|
2019-07-03 05:52:53 +00:00
|
|
|
|
2020-04-07 11:57:43 +00:00
|
|
|
pthread_mutex_lock(&g_pci_mutex);
|
2020-06-09 16:31:54 +00:00
|
|
|
dev->internal.attached = false;
|
2020-04-07 11:57:43 +00:00
|
|
|
/* prevent the hotremove notification from removing this device */
|
|
|
|
dev->internal.pending_removal = true;
|
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
|
|
|
|
2020-05-10 22:57:07 +00:00
|
|
|
rte_eal_alarm_set(1, detach_rte_cb, rte_dev);
|
2020-04-07 11:57:43 +00:00
|
|
|
|
|
|
|
/* wait up to 2s for the cb to execute */
|
2020-04-02 10:42:14 +00:00
|
|
|
for (i = 2000; i > 0; i--) {
|
|
|
|
|
|
|
|
spdk_delay_us(1000);
|
2019-07-03 05:52:53 +00:00
|
|
|
pthread_mutex_lock(&g_pci_mutex);
|
|
|
|
removed = dev->internal.removed;
|
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
2020-04-02 10:42:14 +00:00
|
|
|
|
|
|
|
if (removed) {
|
|
|
|
break;
|
2019-07-03 05:52:53 +00:00
|
|
|
}
|
2020-04-02 10:42:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* besides checking the removed flag, we also need to wait
|
|
|
|
* for the dpdk detach function to unwind, as it's doing some
|
|
|
|
* operations even after calling our detach callback. Simply
|
|
|
|
* cancel the alarm - if it started executing already, this
|
|
|
|
* call will block and wait for it to finish.
|
|
|
|
*/
|
2020-05-10 22:57:07 +00:00
|
|
|
rte_eal_alarm_cancel(detach_rte_cb, rte_dev);
|
2020-04-02 10:42:14 +00:00
|
|
|
|
|
|
|
/* the device could have been finally removed, so just check
|
|
|
|
* it again.
|
|
|
|
*/
|
|
|
|
pthread_mutex_lock(&g_pci_mutex);
|
|
|
|
removed = dev->internal.removed;
|
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
|
|
|
if (!removed) {
|
2020-04-20 19:47:03 +00:00
|
|
|
SPDK_ERRLOG("Timeout waiting for DPDK to remove PCI device %s.\n",
|
2022-09-15 20:53:18 +00:00
|
|
|
dpdk_pci_device_get_name(rte_dev));
|
2020-04-02 10:42:14 +00:00
|
|
|
/* If we reach this state, then the device couldn't be removed and most likely
|
|
|
|
a subsequent hot add of a device in the same BDF will fail */
|
2019-03-24 12:09:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-21 19:44:59 +00:00
|
|
|
void
|
2020-07-02 22:34:07 +00:00
|
|
|
spdk_pci_driver_register(const char *name, struct spdk_pci_id *id_table, uint32_t flags)
|
2018-11-21 19:44:59 +00:00
|
|
|
{
|
2020-07-02 22:34:07 +00:00
|
|
|
struct spdk_pci_driver *driver;
|
|
|
|
|
|
|
|
driver = calloc(1, sizeof(*driver));
|
|
|
|
if (!driver) {
|
|
|
|
/* we can't do any better than bailing atm */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
driver->name = name;
|
|
|
|
driver->id_table = id_table;
|
|
|
|
driver->drv_flags = flags;
|
2022-09-15 18:45:31 +00:00
|
|
|
driver->driver = (struct rte_pci_driver *)driver->driver_buf;
|
2018-11-21 19:44:59 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_pci_drivers, driver, tailq);
|
|
|
|
}
|
|
|
|
|
2020-07-02 22:44:42 +00:00
|
|
|
struct spdk_pci_driver *
|
|
|
|
spdk_pci_nvme_get_driver(void)
|
|
|
|
{
|
|
|
|
return spdk_pci_get_driver("nvme");
|
|
|
|
}
|
|
|
|
|
2020-07-02 22:38:36 +00:00
|
|
|
struct spdk_pci_driver *
|
|
|
|
spdk_pci_get_driver(const char *name)
|
|
|
|
{
|
|
|
|
struct spdk_pci_driver *driver;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(driver, &g_pci_drivers, tailq) {
|
|
|
|
if (strcmp(driver->name, name) == 0) {
|
|
|
|
return driver;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-03-23 21:38:14 +00:00
|
|
|
static void
|
2020-07-24 19:46:10 +00:00
|
|
|
pci_device_rte_dev_event(const char *device_name,
|
2020-05-10 22:57:07 +00:00
|
|
|
enum rte_dev_event_type event,
|
|
|
|
void *cb_arg)
|
2019-03-23 21:38:14 +00:00
|
|
|
{
|
|
|
|
struct spdk_pci_device *dev;
|
2019-06-19 10:34:03 +00:00
|
|
|
bool can_detach = false;
|
2019-03-23 21:38:14 +00:00
|
|
|
|
2020-07-24 19:46:10 +00:00
|
|
|
switch (event) {
|
|
|
|
default:
|
|
|
|
case RTE_DEV_EVENT_ADD:
|
|
|
|
/* Nothing to do here yet. */
|
|
|
|
break;
|
|
|
|
case RTE_DEV_EVENT_REMOVE:
|
|
|
|
pthread_mutex_lock(&g_pci_mutex);
|
|
|
|
TAILQ_FOREACH(dev, &g_pci_devices, internal.tailq) {
|
|
|
|
struct rte_pci_device *rte_dev = dev->dev_handle;
|
|
|
|
|
2022-09-15 20:53:18 +00:00
|
|
|
if (strcmp(dpdk_pci_device_get_name(rte_dev), device_name) == 0 &&
|
2020-07-24 19:46:10 +00:00
|
|
|
!dev->internal.pending_removal) {
|
|
|
|
can_detach = !dev->internal.attached;
|
|
|
|
/* prevent any further attaches */
|
|
|
|
dev->internal.pending_removal = true;
|
|
|
|
break;
|
|
|
|
}
|
2019-03-23 21:38:14 +00:00
|
|
|
}
|
2020-07-24 19:46:10 +00:00
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
2019-06-19 10:34:03 +00:00
|
|
|
|
2020-07-24 19:46:10 +00:00
|
|
|
if (dev != NULL && can_detach) {
|
|
|
|
/* if device is not attached we can remove it right away.
|
2021-02-25 16:56:34 +00:00
|
|
|
* Otherwise it will be removed at detach.
|
|
|
|
*
|
|
|
|
* Because the user's callback is invoked in eal interrupt
|
|
|
|
* callback, the interrupt callback need to be finished before
|
|
|
|
* it can be unregistered when detaching device. So finish
|
|
|
|
* callback soon and use a deferred removal to detach device
|
|
|
|
* is need. It is a workaround, once the device detaching be
|
|
|
|
* moved into the eal in the future, the deferred removal could
|
|
|
|
* be deleted.
|
|
|
|
*/
|
|
|
|
rte_eal_alarm_set(1, detach_rte_cb, dev->dev_handle);
|
2020-07-24 19:46:10 +00:00
|
|
|
}
|
|
|
|
break;
|
2019-06-19 10:34:03 +00:00
|
|
|
}
|
2019-03-23 21:38:14 +00:00
|
|
|
}
|
|
|
|
|
2019-06-19 04:52:03 +00:00
|
|
|
static void
|
|
|
|
cleanup_pci_devices(void)
|
|
|
|
{
|
|
|
|
struct spdk_pci_device *dev, *tmp;
|
|
|
|
|
2019-06-21 06:29:25 +00:00
|
|
|
pthread_mutex_lock(&g_pci_mutex);
|
2019-06-21 05:59:11 +00:00
|
|
|
/* cleanup removed devices */
|
2019-06-19 04:52:03 +00:00
|
|
|
TAILQ_FOREACH_SAFE(dev, &g_pci_devices, internal.tailq, tmp) {
|
|
|
|
if (!dev->internal.removed) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-04-07 07:32:40 +00:00
|
|
|
vtophys_pci_device_removed(dev->dev_handle);
|
2019-06-19 04:52:03 +00:00
|
|
|
TAILQ_REMOVE(&g_pci_devices, dev, internal.tailq);
|
|
|
|
free(dev);
|
|
|
|
}
|
2019-06-21 05:59:11 +00:00
|
|
|
|
|
|
|
/* add newly-attached devices */
|
|
|
|
TAILQ_FOREACH_SAFE(dev, &g_pci_hotplugged_devices, internal.tailq, tmp) {
|
|
|
|
TAILQ_REMOVE(&g_pci_hotplugged_devices, dev, internal.tailq);
|
|
|
|
TAILQ_INSERT_TAIL(&g_pci_devices, dev, internal.tailq);
|
2020-04-07 07:32:40 +00:00
|
|
|
vtophys_pci_device_added(dev->dev_handle);
|
2019-06-21 05:59:11 +00:00
|
|
|
}
|
2019-06-21 06:29:25 +00:00
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
2019-06-19 04:52:03 +00:00
|
|
|
}
|
|
|
|
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
static int scan_pci_bus(bool delay_init);
|
|
|
|
|
2020-07-02 22:17:43 +00:00
|
|
|
/* translate spdk_pci_driver to an rte_pci_driver and register it to dpdk */
|
|
|
|
static int
|
|
|
|
register_rte_driver(struct spdk_pci_driver *driver)
|
|
|
|
{
|
|
|
|
unsigned pci_id_count = 0;
|
|
|
|
struct rte_pci_id *rte_id_table;
|
|
|
|
char *rte_name;
|
|
|
|
size_t rte_name_len;
|
|
|
|
uint32_t rte_flags;
|
|
|
|
|
|
|
|
assert(driver->id_table);
|
|
|
|
while (driver->id_table[pci_id_count].vendor_id) {
|
|
|
|
pci_id_count++;
|
|
|
|
}
|
|
|
|
assert(pci_id_count > 0);
|
|
|
|
|
|
|
|
rte_id_table = calloc(pci_id_count + 1, sizeof(*rte_id_table));
|
|
|
|
if (!rte_id_table) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (pci_id_count > 0) {
|
|
|
|
struct rte_pci_id *rte_id = &rte_id_table[pci_id_count - 1];
|
|
|
|
const struct spdk_pci_id *spdk_id = &driver->id_table[pci_id_count - 1];
|
|
|
|
|
|
|
|
rte_id->class_id = spdk_id->class_id;
|
|
|
|
rte_id->vendor_id = spdk_id->vendor_id;
|
|
|
|
rte_id->device_id = spdk_id->device_id;
|
|
|
|
rte_id->subsystem_vendor_id = spdk_id->subvendor_id;
|
|
|
|
rte_id->subsystem_device_id = spdk_id->subdevice_id;
|
|
|
|
pci_id_count--;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(driver->name);
|
|
|
|
rte_name_len = strlen(driver->name) + strlen("spdk_") + 1;
|
|
|
|
rte_name = calloc(rte_name_len, 1);
|
|
|
|
if (!rte_name) {
|
|
|
|
free(rte_id_table);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
snprintf(rte_name, rte_name_len, "spdk_%s", driver->name);
|
2022-09-15 18:45:31 +00:00
|
|
|
driver->driver->driver.name = rte_name;
|
|
|
|
driver->driver->id_table = rte_id_table;
|
2020-07-02 22:17:43 +00:00
|
|
|
|
|
|
|
rte_flags = 0;
|
|
|
|
if (driver->drv_flags & SPDK_PCI_DRIVER_NEED_MAPPING) {
|
|
|
|
rte_flags |= RTE_PCI_DRV_NEED_MAPPING;
|
|
|
|
}
|
|
|
|
if (driver->drv_flags & SPDK_PCI_DRIVER_WC_ACTIVATE) {
|
|
|
|
rte_flags |= RTE_PCI_DRV_WC_ACTIVATE;
|
|
|
|
}
|
2022-09-15 18:45:31 +00:00
|
|
|
driver->driver->drv_flags = rte_flags;
|
2020-07-02 22:17:43 +00:00
|
|
|
|
2022-09-15 18:45:31 +00:00
|
|
|
driver->driver->probe = pci_device_init;
|
|
|
|
driver->driver->remove = pci_device_fini;
|
2020-07-02 22:17:43 +00:00
|
|
|
|
2022-09-15 18:45:31 +00:00
|
|
|
rte_pci_register(driver->driver);
|
2020-07-02 22:17:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-08 06:03:22 +00:00
|
|
|
static inline void
|
|
|
|
_pci_env_init(void)
|
2018-11-21 19:44:59 +00:00
|
|
|
{
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
/* We assume devices were present on the bus for more than 2 seconds
|
|
|
|
* before initializing SPDK and there's no need to wait more. We scan
|
2020-11-16 16:27:04 +00:00
|
|
|
* the bus, but we don't block any devices.
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
*/
|
|
|
|
scan_pci_bus(false);
|
|
|
|
|
2019-03-23 21:38:14 +00:00
|
|
|
/* Register a single hotremove callback for all devices. */
|
|
|
|
if (spdk_process_is_primary()) {
|
2020-07-24 19:46:10 +00:00
|
|
|
rte_dev_event_callback_register(NULL, pci_device_rte_dev_event, NULL);
|
2019-03-23 21:38:14 +00:00
|
|
|
}
|
2018-11-21 19:44:59 +00:00
|
|
|
}
|
2018-11-18 01:48:56 +00:00
|
|
|
|
2020-05-08 06:03:22 +00:00
|
|
|
void
|
|
|
|
pci_env_init(void)
|
|
|
|
{
|
|
|
|
struct spdk_pci_driver *driver;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(driver, &g_pci_drivers, tailq) {
|
2020-07-02 22:17:43 +00:00
|
|
|
register_rte_driver(driver);
|
2020-05-08 06:03:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
_pci_env_init();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pci_env_reinit(void)
|
|
|
|
{
|
|
|
|
/* There is no need to register pci drivers again, since they were
|
|
|
|
* already pre-registered in pci_env_init.
|
|
|
|
*/
|
|
|
|
|
|
|
|
_pci_env_init();
|
|
|
|
}
|
|
|
|
|
2019-03-25 20:17:23 +00:00
|
|
|
void
|
2020-05-26 09:54:02 +00:00
|
|
|
pci_env_fini(void)
|
2019-03-25 20:17:23 +00:00
|
|
|
{
|
|
|
|
struct spdk_pci_device *dev;
|
|
|
|
char bdf[32];
|
|
|
|
|
2019-06-19 04:52:03 +00:00
|
|
|
cleanup_pci_devices();
|
2019-03-25 20:17:23 +00:00
|
|
|
TAILQ_FOREACH(dev, &g_pci_devices, internal.tailq) {
|
|
|
|
if (dev->internal.attached) {
|
|
|
|
spdk_pci_addr_fmt(bdf, sizeof(bdf), &dev->addr);
|
2020-04-20 19:47:03 +00:00
|
|
|
SPDK_ERRLOG("Device %s is still attached at shutdown!\n", bdf);
|
2019-03-25 20:17:23 +00:00
|
|
|
}
|
|
|
|
}
|
2019-03-23 21:38:14 +00:00
|
|
|
|
|
|
|
if (spdk_process_is_primary()) {
|
2020-07-24 19:46:10 +00:00
|
|
|
rte_dev_event_callback_unregister(NULL, pci_device_rte_dev_event, NULL);
|
2019-03-23 21:38:14 +00:00
|
|
|
}
|
2019-03-25 20:17:23 +00:00
|
|
|
}
|
|
|
|
|
2016-12-05 17:20:20 +00:00
|
|
|
int
|
2020-04-07 07:32:40 +00:00
|
|
|
pci_device_init(struct rte_pci_driver *_drv,
|
|
|
|
struct rte_pci_device *_dev)
|
2016-02-03 21:36:26 +00:00
|
|
|
{
|
2018-12-01 09:31:35 +00:00
|
|
|
struct spdk_pci_driver *driver = (struct spdk_pci_driver *)_drv;
|
2018-11-18 01:15:19 +00:00
|
|
|
struct spdk_pci_device *dev;
|
2017-12-06 21:52:56 +00:00
|
|
|
int rc;
|
2016-02-03 21:36:26 +00:00
|
|
|
|
2018-11-18 01:15:19 +00:00
|
|
|
dev = calloc(1, sizeof(*dev));
|
|
|
|
if (dev == NULL) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->dev_handle = _dev;
|
2018-11-20 14:05:13 +00:00
|
|
|
|
2022-09-15 21:06:01 +00:00
|
|
|
dpdk_pci_device_copy_identifiers(_dev, dev);
|
2019-10-03 07:34:42 +00:00
|
|
|
dev->type = "pci";
|
2018-11-18 01:15:19 +00:00
|
|
|
|
2020-05-10 22:57:07 +00:00
|
|
|
dev->map_bar = map_bar_rte;
|
|
|
|
dev->unmap_bar = unmap_bar_rte;
|
|
|
|
dev->cfg_read = cfg_read_rte;
|
|
|
|
dev->cfg_write = cfg_write_rte;
|
2018-11-29 09:24:05 +00:00
|
|
|
|
2018-12-02 11:38:13 +00:00
|
|
|
dev->internal.driver = driver;
|
2019-09-02 09:35:33 +00:00
|
|
|
dev->internal.claim_fd = -1;
|
2018-12-02 11:38:13 +00:00
|
|
|
|
2018-11-22 14:34:33 +00:00
|
|
|
if (driver->cb_fn != NULL) {
|
|
|
|
rc = driver->cb_fn(driver->cb_arg, dev);
|
|
|
|
if (rc != 0) {
|
|
|
|
free(dev);
|
|
|
|
return rc;
|
|
|
|
}
|
2018-12-02 11:38:13 +00:00
|
|
|
dev->internal.attached = true;
|
2017-12-06 21:52:56 +00:00
|
|
|
}
|
|
|
|
|
2019-06-19 10:34:03 +00:00
|
|
|
pthread_mutex_lock(&g_pci_mutex);
|
2019-06-21 05:59:11 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_pci_hotplugged_devices, dev, internal.tailq);
|
2019-06-19 10:34:03 +00:00
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
2017-12-06 21:52:56 +00:00
|
|
|
return 0;
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
|
|
|
|
2021-04-20 19:17:53 +00:00
|
|
|
static void
|
|
|
|
set_allowed_at(struct rte_devargs *rte_da, uint64_t tsc)
|
|
|
|
{
|
2021-04-20 19:20:08 +00:00
|
|
|
struct env_devargs *env_da;
|
|
|
|
|
|
|
|
env_da = find_env_devargs(rte_da->bus, rte_da->name);
|
|
|
|
if (env_da == NULL) {
|
|
|
|
env_da = calloc(1, sizeof(*env_da));
|
|
|
|
if (env_da == NULL) {
|
|
|
|
SPDK_ERRLOG("could not set_allowed_at for device %s\n", rte_da->name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
env_da->bus = rte_da->bus;
|
|
|
|
spdk_strcpy_pad(env_da->name, rte_da->name, sizeof(env_da->name), 0);
|
|
|
|
TAILQ_INSERT_TAIL(&g_env_devargs, env_da, link);
|
|
|
|
}
|
|
|
|
|
|
|
|
env_da->allowed_at = tsc;
|
2021-04-20 19:17:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t
|
|
|
|
get_allowed_at(struct rte_devargs *rte_da)
|
|
|
|
{
|
2021-04-20 19:20:08 +00:00
|
|
|
struct env_devargs *env_da;
|
|
|
|
|
|
|
|
env_da = find_env_devargs(rte_da->bus, rte_da->name);
|
|
|
|
if (env_da) {
|
|
|
|
return env_da->allowed_at;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
2021-04-20 19:17:53 +00:00
|
|
|
}
|
|
|
|
|
2016-12-05 17:20:20 +00:00
|
|
|
int
|
2020-04-07 07:32:40 +00:00
|
|
|
pci_device_fini(struct rte_pci_device *_dev)
|
2016-02-03 21:36:26 +00:00
|
|
|
{
|
2018-11-18 01:15:19 +00:00
|
|
|
struct spdk_pci_device *dev;
|
|
|
|
|
2019-06-19 10:34:03 +00:00
|
|
|
pthread_mutex_lock(&g_pci_mutex);
|
2018-12-02 11:38:13 +00:00
|
|
|
TAILQ_FOREACH(dev, &g_pci_devices, internal.tailq) {
|
2018-11-18 01:15:19 +00:00
|
|
|
if (dev->dev_handle == _dev) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-02 11:38:13 +00:00
|
|
|
if (dev == NULL || dev->internal.attached) {
|
2018-11-20 19:57:38 +00:00
|
|
|
/* The device might be still referenced somewhere in SPDK. */
|
2019-06-19 10:34:03 +00:00
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
2021-05-19 12:32:55 +00:00
|
|
|
return -EBUSY;
|
2018-11-18 01:15:19 +00:00
|
|
|
}
|
|
|
|
|
2020-11-16 16:27:04 +00:00
|
|
|
/* remove our allowed_at option */
|
2022-09-15 20:58:40 +00:00
|
|
|
if (dpdk_pci_device_get_devargs(_dev)) {
|
|
|
|
set_allowed_at(dpdk_pci_device_get_devargs(_dev), 0);
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
}
|
|
|
|
|
2022-05-04 04:10:55 +00:00
|
|
|
/* It is possible that removed flag was already set when there is a race
|
|
|
|
* between the remove notification for this process, and another process
|
|
|
|
* that is also detaching from this same device (for example, when using
|
|
|
|
* nvme driver in multi-process mode. So do not assert here. See
|
|
|
|
* #2456 for additional details.
|
|
|
|
*/
|
2019-06-19 04:52:03 +00:00
|
|
|
dev->internal.removed = true;
|
2019-06-19 10:34:03 +00:00
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
2016-08-08 22:57:49 +00:00
|
|
|
return 0;
|
2018-11-18 01:15:19 +00:00
|
|
|
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
|
|
|
|
2016-11-15 01:53:14 +00:00
|
|
|
void
|
2018-11-18 01:15:19 +00:00
|
|
|
spdk_pci_device_detach(struct spdk_pci_device *dev)
|
2016-11-15 01:53:14 +00:00
|
|
|
{
|
2022-07-06 09:15:26 +00:00
|
|
|
struct spdk_pci_device_provider *provider;
|
|
|
|
|
2018-12-02 11:38:13 +00:00
|
|
|
assert(dev->internal.attached);
|
2019-09-02 09:35:33 +00:00
|
|
|
|
|
|
|
if (dev->internal.claim_fd >= 0) {
|
|
|
|
spdk_pci_device_unclaim(dev);
|
|
|
|
}
|
|
|
|
|
2022-07-06 09:15:26 +00:00
|
|
|
TAILQ_FOREACH(provider, &g_pci_device_providers, tailq) {
|
|
|
|
if (strcmp(dev->type, provider->name) == 0) {
|
|
|
|
break;
|
|
|
|
}
|
2020-06-09 16:31:54 +00:00
|
|
|
}
|
2019-06-19 04:52:03 +00:00
|
|
|
|
2022-07-06 09:15:26 +00:00
|
|
|
assert(provider != NULL);
|
|
|
|
dev->internal.attached = false;
|
|
|
|
provider->detach_cb(dev);
|
|
|
|
|
2019-06-19 04:52:03 +00:00
|
|
|
cleanup_pci_devices();
|
2016-11-15 01:53:14 +00:00
|
|
|
}
|
|
|
|
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
static int
|
|
|
|
scan_pci_bus(bool delay_init)
|
|
|
|
{
|
2022-09-15 13:56:06 +00:00
|
|
|
struct rte_dev_iterator it;
|
|
|
|
struct rte_device *rte_dev;
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
uint64_t now;
|
|
|
|
|
|
|
|
rte_bus_scan();
|
|
|
|
now = spdk_get_ticks();
|
|
|
|
|
2022-09-15 13:56:06 +00:00
|
|
|
if (!TAILQ_FIRST(&g_pci_drivers)) {
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-09-15 13:56:06 +00:00
|
|
|
RTE_DEV_FOREACH(rte_dev, "bus=pci", &it) {
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
struct rte_devargs *da;
|
|
|
|
|
2022-09-15 13:56:06 +00:00
|
|
|
da = rte_dev->devargs;
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
if (!da) {
|
|
|
|
char devargs_str[128];
|
|
|
|
|
2020-11-16 16:27:04 +00:00
|
|
|
/* the device was never blocked or allowed */
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
da = calloc(1, sizeof(*da));
|
|
|
|
if (!da) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-09-15 13:56:06 +00:00
|
|
|
snprintf(devargs_str, sizeof(devargs_str), "pci:%s", rte_dev->name);
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
if (rte_devargs_parse(da, devargs_str) != 0) {
|
|
|
|
free(da);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_devargs_insert(&da);
|
2022-09-15 13:56:06 +00:00
|
|
|
rte_dev->devargs = da;
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
}
|
|
|
|
|
2021-04-20 19:17:53 +00:00
|
|
|
if (get_allowed_at(da)) {
|
|
|
|
uint64_t allowed_at = get_allowed_at(da);
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
|
|
|
|
/* this device was seen by spdk before... */
|
2020-11-16 16:27:04 +00:00
|
|
|
if (da->policy == RTE_DEV_BLOCKED && allowed_at <= now) {
|
|
|
|
da->policy = RTE_DEV_ALLOWED;
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
}
|
2022-09-15 13:56:06 +00:00
|
|
|
} else if ((rte_dev->bus->conf.scan_mode == RTE_BUS_SCAN_ALLOWLIST &&
|
2020-11-16 16:27:04 +00:00
|
|
|
da->policy == RTE_DEV_ALLOWED) || da->policy != RTE_DEV_BLOCKED) {
|
|
|
|
/* override the policy only if not permanently blocked */
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
|
|
|
|
if (delay_init) {
|
2020-11-16 16:27:04 +00:00
|
|
|
da->policy = RTE_DEV_BLOCKED;
|
2021-04-20 19:17:53 +00:00
|
|
|
set_allowed_at(da, now + 2 * spdk_get_ticks_hz());
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
} else {
|
2020-11-16 16:27:04 +00:00
|
|
|
da->policy = RTE_DEV_ALLOWED;
|
2021-04-20 19:17:53 +00:00
|
|
|
set_allowed_at(da, now);
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
env/pci: method for registering PCI device providers
The primary motivation for this patch is to allow the VMD driver to be
notified of when users wants to attach a device under a given BDF and to
make it more similar to the regular PCI path. Currently, the way the
VMD driver scans for the devices is a little bit different. The initial
scan is done during initialization and there's a separate poller for
checking hotplugs. Also, there's no device_attach() interface, so with
hotplug poller disabled, it isn't possible to attach to a device not
present in the initial scan, even if the BDF is known.
This causes a few issues. First of all, the VMD library isn't notified
when a device is stopped being used (i.e. user calls
spdk_pci_device_detach()), so when such a device is hotremoved, it never
gets unhooked. But we cannot simply add a spdk_pci_device.detach()
callback, as this would break cases when user detaches a device (without
hotremove) and then tries to reattach it again (via
spdk_pci_device_attach()), as the VMD doesn't get notified about the
device_attach() call.
So, in order to resolve this, a device_attach() callback is added, which
will notify the VMD library that the user wants to attach a device under
a specific PCI address. Then, in subsequent patches, a
spdk_pci_device_provider.detach_cb() callback is added to make sure that
devices are unhooked once they're no longer used.
Once that is done, it'll be also possible to get rid of the VMD hotplug
poller by adding something like scan_cb() to spdk_pci_device_provider and
call it from spdk_pci_enumerate().
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I084a27dcd12455f0f841440b7692375e80d07e84
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13883
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Tom Nabarro <tom.nabarro@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-08-05 03:48:17 +00:00
|
|
|
static int
|
|
|
|
pci_attach_rte(const struct spdk_pci_addr *addr)
|
|
|
|
{
|
|
|
|
char bdf[32];
|
|
|
|
int rc, i = 0;
|
|
|
|
|
|
|
|
spdk_pci_addr_fmt(bdf, sizeof(bdf), addr);
|
|
|
|
|
|
|
|
do {
|
|
|
|
rc = rte_eal_hotplug_add("pci", bdf, "");
|
|
|
|
} while (rc == -ENOMSG && ++i <= DPDK_HOTPLUG_RETRY_COUNT);
|
|
|
|
|
|
|
|
if (i > 1 && rc == -EEXIST) {
|
|
|
|
/* Even though the previous request timed out, the device
|
|
|
|
* was attached successfully.
|
|
|
|
*/
|
|
|
|
rc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct spdk_pci_device_provider g_pci_rte_provider = {
|
|
|
|
.name = "pci",
|
|
|
|
.attach_cb = pci_attach_rte,
|
2022-07-06 09:15:26 +00:00
|
|
|
.detach_cb = detach_rte,
|
env/pci: method for registering PCI device providers
The primary motivation for this patch is to allow the VMD driver to be
notified of when users wants to attach a device under a given BDF and to
make it more similar to the regular PCI path. Currently, the way the
VMD driver scans for the devices is a little bit different. The initial
scan is done during initialization and there's a separate poller for
checking hotplugs. Also, there's no device_attach() interface, so with
hotplug poller disabled, it isn't possible to attach to a device not
present in the initial scan, even if the BDF is known.
This causes a few issues. First of all, the VMD library isn't notified
when a device is stopped being used (i.e. user calls
spdk_pci_device_detach()), so when such a device is hotremoved, it never
gets unhooked. But we cannot simply add a spdk_pci_device.detach()
callback, as this would break cases when user detaches a device (without
hotremove) and then tries to reattach it again (via
spdk_pci_device_attach()), as the VMD doesn't get notified about the
device_attach() call.
So, in order to resolve this, a device_attach() callback is added, which
will notify the VMD library that the user wants to attach a device under
a specific PCI address. Then, in subsequent patches, a
spdk_pci_device_provider.detach_cb() callback is added to make sure that
devices are unhooked once they're no longer used.
Once that is done, it'll be also possible to get rid of the VMD hotplug
poller by adding something like scan_cb() to spdk_pci_device_provider and
call it from spdk_pci_enumerate().
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I084a27dcd12455f0f841440b7692375e80d07e84
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13883
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Tom Nabarro <tom.nabarro@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-08-05 03:48:17 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
SPDK_PCI_REGISTER_DEVICE_PROVIDER(pci, &g_pci_rte_provider);
|
|
|
|
|
2016-11-15 01:53:14 +00:00
|
|
|
int
|
2018-12-01 09:31:35 +00:00
|
|
|
spdk_pci_device_attach(struct spdk_pci_driver *driver,
|
2016-11-15 01:53:14 +00:00
|
|
|
spdk_pci_enum_cb enum_cb,
|
|
|
|
void *enum_ctx, struct spdk_pci_addr *pci_address)
|
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
struct spdk_pci_device *dev;
|
env/pci: method for registering PCI device providers
The primary motivation for this patch is to allow the VMD driver to be
notified of when users wants to attach a device under a given BDF and to
make it more similar to the regular PCI path. Currently, the way the
VMD driver scans for the devices is a little bit different. The initial
scan is done during initialization and there's a separate poller for
checking hotplugs. Also, there's no device_attach() interface, so with
hotplug poller disabled, it isn't possible to attach to a device not
present in the initial scan, even if the BDF is known.
This causes a few issues. First of all, the VMD library isn't notified
when a device is stopped being used (i.e. user calls
spdk_pci_device_detach()), so when such a device is hotremoved, it never
gets unhooked. But we cannot simply add a spdk_pci_device.detach()
callback, as this would break cases when user detaches a device (without
hotremove) and then tries to reattach it again (via
spdk_pci_device_attach()), as the VMD doesn't get notified about the
device_attach() call.
So, in order to resolve this, a device_attach() callback is added, which
will notify the VMD library that the user wants to attach a device under
a specific PCI address. Then, in subsequent patches, a
spdk_pci_device_provider.detach_cb() callback is added to make sure that
devices are unhooked once they're no longer used.
Once that is done, it'll be also possible to get rid of the VMD hotplug
poller by adding something like scan_cb() to spdk_pci_device_provider and
call it from spdk_pci_enumerate().
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I084a27dcd12455f0f841440b7692375e80d07e84
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13883
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Tom Nabarro <tom.nabarro@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-08-05 03:48:17 +00:00
|
|
|
struct spdk_pci_device_provider *provider;
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
struct rte_pci_device *rte_dev;
|
|
|
|
struct rte_devargs *da;
|
2018-11-20 20:49:39 +00:00
|
|
|
int rc;
|
2016-11-15 01:53:14 +00:00
|
|
|
|
2019-06-19 04:52:03 +00:00
|
|
|
cleanup_pci_devices();
|
2019-06-21 06:15:59 +00:00
|
|
|
|
2018-12-02 11:38:13 +00:00
|
|
|
TAILQ_FOREACH(dev, &g_pci_devices, internal.tailq) {
|
2018-11-20 14:05:13 +00:00
|
|
|
if (spdk_pci_addr_compare(&dev->addr, pci_address) == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-02 11:38:13 +00:00
|
|
|
if (dev != NULL && dev->internal.driver == driver) {
|
2019-06-21 06:15:59 +00:00
|
|
|
pthread_mutex_lock(&g_pci_mutex);
|
2019-03-24 12:09:42 +00:00
|
|
|
if (dev->internal.attached || dev->internal.pending_removal) {
|
2018-11-20 14:05:13 +00:00
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = enum_cb(enum_ctx, dev);
|
|
|
|
if (rc == 0) {
|
2018-12-02 11:38:13 +00:00
|
|
|
dev->internal.attached = true;
|
2018-11-20 14:05:13 +00:00
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2018-12-01 09:31:35 +00:00
|
|
|
driver->cb_fn = enum_cb;
|
|
|
|
driver->cb_arg = enum_ctx;
|
2016-11-15 01:53:14 +00:00
|
|
|
|
env/pci: method for registering PCI device providers
The primary motivation for this patch is to allow the VMD driver to be
notified of when users wants to attach a device under a given BDF and to
make it more similar to the regular PCI path. Currently, the way the
VMD driver scans for the devices is a little bit different. The initial
scan is done during initialization and there's a separate poller for
checking hotplugs. Also, there's no device_attach() interface, so with
hotplug poller disabled, it isn't possible to attach to a device not
present in the initial scan, even if the BDF is known.
This causes a few issues. First of all, the VMD library isn't notified
when a device is stopped being used (i.e. user calls
spdk_pci_device_detach()), so when such a device is hotremoved, it never
gets unhooked. But we cannot simply add a spdk_pci_device.detach()
callback, as this would break cases when user detaches a device (without
hotremove) and then tries to reattach it again (via
spdk_pci_device_attach()), as the VMD doesn't get notified about the
device_attach() call.
So, in order to resolve this, a device_attach() callback is added, which
will notify the VMD library that the user wants to attach a device under
a specific PCI address. Then, in subsequent patches, a
spdk_pci_device_provider.detach_cb() callback is added to make sure that
devices are unhooked once they're no longer used.
Once that is done, it'll be also possible to get rid of the VMD hotplug
poller by adding something like scan_cb() to spdk_pci_device_provider and
call it from spdk_pci_enumerate().
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I084a27dcd12455f0f841440b7692375e80d07e84
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13883
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Tom Nabarro <tom.nabarro@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-08-05 03:48:17 +00:00
|
|
|
rc = -ENODEV;
|
|
|
|
TAILQ_FOREACH(provider, &g_pci_device_providers, tailq) {
|
|
|
|
rc = provider->attach_cb(pci_address);
|
|
|
|
if (rc == 0) {
|
|
|
|
break;
|
|
|
|
}
|
2018-11-23 14:36:18 +00:00
|
|
|
}
|
2016-11-15 01:53:14 +00:00
|
|
|
|
2018-12-01 09:31:35 +00:00
|
|
|
driver->cb_arg = NULL;
|
|
|
|
driver->cb_fn = NULL;
|
2019-06-21 05:59:11 +00:00
|
|
|
|
|
|
|
cleanup_pci_devices();
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
|
|
|
|
if (rc != 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-11-16 16:27:04 +00:00
|
|
|
/* explicit attach ignores the allowlist, so if we blocked this
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
* device before let's enable it now - just for clarity.
|
|
|
|
*/
|
|
|
|
TAILQ_FOREACH(dev, &g_pci_devices, internal.tailq) {
|
|
|
|
if (spdk_pci_addr_compare(&dev->addr, pci_address) == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(dev != NULL);
|
|
|
|
|
|
|
|
rte_dev = dev->dev_handle;
|
env/pci: method for registering PCI device providers
The primary motivation for this patch is to allow the VMD driver to be
notified of when users wants to attach a device under a given BDF and to
make it more similar to the regular PCI path. Currently, the way the
VMD driver scans for the devices is a little bit different. The initial
scan is done during initialization and there's a separate poller for
checking hotplugs. Also, there's no device_attach() interface, so with
hotplug poller disabled, it isn't possible to attach to a device not
present in the initial scan, even if the BDF is known.
This causes a few issues. First of all, the VMD library isn't notified
when a device is stopped being used (i.e. user calls
spdk_pci_device_detach()), so when such a device is hotremoved, it never
gets unhooked. But we cannot simply add a spdk_pci_device.detach()
callback, as this would break cases when user detaches a device (without
hotremove) and then tries to reattach it again (via
spdk_pci_device_attach()), as the VMD doesn't get notified about the
device_attach() call.
So, in order to resolve this, a device_attach() callback is added, which
will notify the VMD library that the user wants to attach a device under
a specific PCI address. Then, in subsequent patches, a
spdk_pci_device_provider.detach_cb() callback is added to make sure that
devices are unhooked once they're no longer used.
Once that is done, it'll be also possible to get rid of the VMD hotplug
poller by adding something like scan_cb() to spdk_pci_device_provider and
call it from spdk_pci_enumerate().
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I084a27dcd12455f0f841440b7692375e80d07e84
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13883
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Tom Nabarro <tom.nabarro@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-08-05 03:48:17 +00:00
|
|
|
if (rte_dev != NULL) {
|
2022-09-15 20:58:40 +00:00
|
|
|
da = dpdk_pci_device_get_devargs(rte_dev);
|
env/pci: method for registering PCI device providers
The primary motivation for this patch is to allow the VMD driver to be
notified of when users wants to attach a device under a given BDF and to
make it more similar to the regular PCI path. Currently, the way the
VMD driver scans for the devices is a little bit different. The initial
scan is done during initialization and there's a separate poller for
checking hotplugs. Also, there's no device_attach() interface, so with
hotplug poller disabled, it isn't possible to attach to a device not
present in the initial scan, even if the BDF is known.
This causes a few issues. First of all, the VMD library isn't notified
when a device is stopped being used (i.e. user calls
spdk_pci_device_detach()), so when such a device is hotremoved, it never
gets unhooked. But we cannot simply add a spdk_pci_device.detach()
callback, as this would break cases when user detaches a device (without
hotremove) and then tries to reattach it again (via
spdk_pci_device_attach()), as the VMD doesn't get notified about the
device_attach() call.
So, in order to resolve this, a device_attach() callback is added, which
will notify the VMD library that the user wants to attach a device under
a specific PCI address. Then, in subsequent patches, a
spdk_pci_device_provider.detach_cb() callback is added to make sure that
devices are unhooked once they're no longer used.
Once that is done, it'll be also possible to get rid of the VMD hotplug
poller by adding something like scan_cb() to spdk_pci_device_provider and
call it from spdk_pci_enumerate().
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I084a27dcd12455f0f841440b7692375e80d07e84
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13883
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Tom Nabarro <tom.nabarro@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-08-05 03:48:17 +00:00
|
|
|
if (da && get_allowed_at(da)) {
|
|
|
|
set_allowed_at(da, spdk_get_ticks());
|
|
|
|
da->policy = RTE_DEV_ALLOWED;
|
|
|
|
}
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2016-11-15 01:53:14 +00:00
|
|
|
}
|
|
|
|
|
2016-11-15 00:37:12 +00:00
|
|
|
/* Note: You can call spdk_pci_enumerate from more than one thread
|
|
|
|
* simultaneously safely, but you cannot call spdk_pci_enumerate
|
|
|
|
* and rte_eal_pci_probe simultaneously.
|
2017-04-25 17:35:22 +00:00
|
|
|
*/
|
2016-02-16 02:11:25 +00:00
|
|
|
int
|
2018-12-01 09:31:35 +00:00
|
|
|
spdk_pci_enumerate(struct spdk_pci_driver *driver,
|
2016-08-08 22:57:49 +00:00
|
|
|
spdk_pci_enum_cb enum_cb,
|
|
|
|
void *enum_ctx)
|
2016-02-16 02:11:25 +00:00
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
struct spdk_pci_device *dev;
|
|
|
|
int rc;
|
|
|
|
|
2019-06-19 04:52:03 +00:00
|
|
|
cleanup_pci_devices();
|
2019-06-21 06:29:25 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&g_pci_mutex);
|
2018-12-02 11:38:13 +00:00
|
|
|
TAILQ_FOREACH(dev, &g_pci_devices, internal.tailq) {
|
2019-03-24 12:09:42 +00:00
|
|
|
if (dev->internal.attached ||
|
|
|
|
dev->internal.driver != driver ||
|
|
|
|
dev->internal.pending_removal) {
|
2018-11-20 14:05:13 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = enum_cb(enum_ctx, dev);
|
|
|
|
if (rc == 0) {
|
2018-12-02 11:38:13 +00:00
|
|
|
dev->internal.attached = true;
|
2018-11-20 14:05:13 +00:00
|
|
|
} else if (rc < 0) {
|
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
2019-06-19 10:34:03 +00:00
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
2018-11-20 14:05:13 +00:00
|
|
|
|
env_dpdk/pci: delay device initialization on hotplug
A workaround for kernel deadlocks surfaced in #1275.
DPDK basically offers two APIs for hotplugging all PCI devices:
rte_bus_scan() and rte_bus_probe(). Scan iterates through
/sys/bus/pci/devices/* and creates corresponding rte_pci_device-s,
then rte_bus_probe() tries to initialize each device with the
supporting driver.
Previously we did scan and probe together, one after another, now
we'll have an intermediate step. After scanning the bus, we'll
iterate through all rte_pci_device-s and temporarily blacklist any
newly detected devices. We'll use devargs->data field to a store
a timeout value (integer) after which the device can be un-blacklisted
and initialized. devargs->data is documented in DPDK as "Device
string storage" and it's a char*, but it's not referenced anywhere
in DPDK. rte_bus_probe() respects the blacklist and doesn't do
absolutely anything with blacklisted ones.
The timeout value is 2 seconds, which should be plenty enough
for an NVMe device to reset, leave the critical lock sections in
kernel, and let us initialize it safely.
Note that direct attach by BDF doesn't respect the blacklist,
so an NVMe attach RPC won't be delayed in any way, it will continue
to work as it always did. Only the automatic discovery & enumeration
is deferred.
Change-Id: I62b719271bd0755bc2882331ea33f69897b1e5e5
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1733
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
2020-04-07 17:21:54 +00:00
|
|
|
if (scan_pci_bus(true) != 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
2020-06-18 09:30:34 +00:00
|
|
|
|
2018-12-01 09:31:35 +00:00
|
|
|
driver->cb_fn = enum_cb;
|
|
|
|
driver->cb_arg = enum_ctx;
|
2016-10-12 23:26:05 +00:00
|
|
|
|
2020-06-18 09:30:34 +00:00
|
|
|
if (rte_bus_probe() != 0) {
|
2018-12-01 09:31:35 +00:00
|
|
|
driver->cb_arg = NULL;
|
|
|
|
driver->cb_fn = NULL;
|
2016-11-15 00:37:12 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-12-01 09:31:35 +00:00
|
|
|
driver->cb_arg = NULL;
|
|
|
|
driver->cb_fn = NULL;
|
2019-06-21 05:59:11 +00:00
|
|
|
|
|
|
|
cleanup_pci_devices();
|
2016-11-15 00:37:12 +00:00
|
|
|
return 0;
|
2016-02-16 02:11:25 +00:00
|
|
|
}
|
|
|
|
|
2021-12-10 17:38:08 +00:00
|
|
|
void
|
|
|
|
spdk_pci_for_each_device(void *ctx, void (*fn)(void *ctx, struct spdk_pci_device *dev))
|
|
|
|
{
|
2022-08-08 04:32:34 +00:00
|
|
|
struct spdk_pci_device *dev, *tmp;
|
2021-12-10 17:38:08 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&g_pci_mutex);
|
2022-08-08 04:32:34 +00:00
|
|
|
TAILQ_FOREACH_SAFE(dev, &g_pci_devices, internal.tailq, tmp) {
|
2021-12-10 17:38:08 +00:00
|
|
|
fn(ctx, dev);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&g_pci_mutex);
|
|
|
|
}
|
|
|
|
|
2016-02-03 21:36:26 +00:00
|
|
|
int
|
2018-11-29 09:24:05 +00:00
|
|
|
spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
|
2016-08-08 22:57:49 +00:00
|
|
|
void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
|
2016-02-03 21:36:26 +00:00
|
|
|
{
|
2018-11-29 09:24:05 +00:00
|
|
|
return dev->map_bar(dev, bar, mapped_addr, phys_addr, size);
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2018-11-29 09:24:05 +00:00
|
|
|
spdk_pci_device_unmap_bar(struct spdk_pci_device *dev, uint32_t bar, void *addr)
|
2016-02-03 21:36:26 +00:00
|
|
|
{
|
2018-11-29 09:24:05 +00:00
|
|
|
return dev->unmap_bar(dev, bar, addr);
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
|
|
|
|
2021-12-01 13:38:07 +00:00
|
|
|
int
|
|
|
|
spdk_pci_device_enable_interrupt(struct spdk_pci_device *dev)
|
|
|
|
{
|
|
|
|
struct rte_pci_device *rte_dev = dev->dev_handle;
|
|
|
|
#if RTE_VERSION < RTE_VERSION_NUM(21, 11, 0, 0)
|
|
|
|
return rte_intr_enable(&rte_dev->intr_handle);
|
|
|
|
#else
|
|
|
|
return rte_intr_enable(rte_dev->intr_handle);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_pci_device_disable_interrupt(struct spdk_pci_device *dev)
|
|
|
|
{
|
|
|
|
struct rte_pci_device *rte_dev = dev->dev_handle;
|
|
|
|
#if RTE_VERSION < RTE_VERSION_NUM(21, 11, 0, 0)
|
|
|
|
return rte_intr_disable(&rte_dev->intr_handle);
|
|
|
|
#else
|
|
|
|
return rte_intr_disable(rte_dev->intr_handle);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_pci_device_get_interrupt_efd(struct spdk_pci_device *dev)
|
|
|
|
{
|
|
|
|
struct rte_pci_device *rte_dev = dev->dev_handle;
|
|
|
|
#if RTE_VERSION < RTE_VERSION_NUM(21, 11, 0, 0)
|
|
|
|
return rte_dev->intr_handle.fd;
|
|
|
|
#else
|
|
|
|
return rte_intr_fd_get(rte_dev->intr_handle);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-06-21 22:05:47 +00:00
|
|
|
uint32_t
|
2016-02-03 21:36:26 +00:00
|
|
|
spdk_pci_device_get_domain(struct spdk_pci_device *dev)
|
2016-02-03 22:00:39 +00:00
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
return dev->addr.domain;
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
2016-02-03 22:00:39 +00:00
|
|
|
|
2016-02-03 21:36:26 +00:00
|
|
|
uint8_t
|
|
|
|
spdk_pci_device_get_bus(struct spdk_pci_device *dev)
|
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
return dev->addr.bus;
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
2016-02-03 22:00:39 +00:00
|
|
|
|
2016-02-03 21:36:26 +00:00
|
|
|
uint8_t
|
|
|
|
spdk_pci_device_get_dev(struct spdk_pci_device *dev)
|
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
return dev->addr.dev;
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
2016-02-03 22:00:39 +00:00
|
|
|
|
2016-02-03 21:36:26 +00:00
|
|
|
uint8_t
|
|
|
|
spdk_pci_device_get_func(struct spdk_pci_device *dev)
|
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
return dev->addr.func;
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t
|
|
|
|
spdk_pci_device_get_vendor_id(struct spdk_pci_device *dev)
|
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
return dev->id.vendor_id;
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t
|
|
|
|
spdk_pci_device_get_device_id(struct spdk_pci_device *dev)
|
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
return dev->id.device_id;
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t
|
|
|
|
spdk_pci_device_get_subvendor_id(struct spdk_pci_device *dev)
|
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
return dev->id.subvendor_id;
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t
|
|
|
|
spdk_pci_device_get_subdevice_id(struct spdk_pci_device *dev)
|
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
return dev->id.subdevice_id;
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
|
|
|
|
2016-10-28 18:11:45 +00:00
|
|
|
struct spdk_pci_id
|
2018-11-20 14:05:13 +00:00
|
|
|
spdk_pci_device_get_id(struct spdk_pci_device *dev)
|
2016-10-28 18:11:45 +00:00
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
return dev->id;
|
2016-10-28 18:11:45 +00:00
|
|
|
}
|
|
|
|
|
2017-01-31 00:39:23 +00:00
|
|
|
int
|
2018-11-18 01:15:19 +00:00
|
|
|
spdk_pci_device_get_socket_id(struct spdk_pci_device *dev)
|
2017-01-31 00:39:23 +00:00
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
return dev->socket_id;
|
2017-01-31 00:39:23 +00:00
|
|
|
}
|
|
|
|
|
2016-02-16 02:11:25 +00:00
|
|
|
int
|
2017-09-21 16:59:10 +00:00
|
|
|
spdk_pci_device_cfg_read(struct spdk_pci_device *dev, void *value, uint32_t len, uint32_t offset)
|
2016-02-16 02:11:25 +00:00
|
|
|
{
|
2018-11-29 09:24:05 +00:00
|
|
|
return dev->cfg_read(dev, value, len, offset);
|
2016-02-16 02:11:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-21 16:59:10 +00:00
|
|
|
spdk_pci_device_cfg_write(struct spdk_pci_device *dev, void *value, uint32_t len, uint32_t offset)
|
2016-02-16 02:11:25 +00:00
|
|
|
{
|
2018-11-29 09:24:05 +00:00
|
|
|
return dev->cfg_write(dev, value, len, offset);
|
2017-09-21 16:59:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_pci_device_cfg_read8(struct spdk_pci_device *dev, uint8_t *value, uint32_t offset)
|
|
|
|
{
|
|
|
|
return spdk_pci_device_cfg_read(dev, value, 1, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_pci_device_cfg_write8(struct spdk_pci_device *dev, uint8_t value, uint32_t offset)
|
|
|
|
{
|
|
|
|
return spdk_pci_device_cfg_write(dev, &value, 1, offset);
|
2016-02-16 02:11:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_pci_device_cfg_read16(struct spdk_pci_device *dev, uint16_t *value, uint32_t offset)
|
|
|
|
{
|
2017-09-21 16:59:10 +00:00
|
|
|
return spdk_pci_device_cfg_read(dev, value, 2, offset);
|
2016-02-16 02:11:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_pci_device_cfg_write16(struct spdk_pci_device *dev, uint16_t value, uint32_t offset)
|
|
|
|
{
|
2017-09-21 16:59:10 +00:00
|
|
|
return spdk_pci_device_cfg_write(dev, &value, 2, offset);
|
2016-02-16 02:11:25 +00:00
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
int
|
2016-02-03 21:36:26 +00:00
|
|
|
spdk_pci_device_cfg_read32(struct spdk_pci_device *dev, uint32_t *value, uint32_t offset)
|
|
|
|
{
|
2017-09-21 16:59:10 +00:00
|
|
|
return spdk_pci_device_cfg_read(dev, value, 4, offset);
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_pci_device_cfg_write32(struct spdk_pci_device *dev, uint32_t value, uint32_t offset)
|
|
|
|
{
|
2017-09-21 16:59:10 +00:00
|
|
|
return spdk_pci_device_cfg_write(dev, &value, 4, offset);
|
2016-02-03 21:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
spdk_pci_device_get_serial_number(struct spdk_pci_device *dev, char *sn, size_t len)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
uint32_t pos, header = 0;
|
|
|
|
uint32_t i, buf[2];
|
|
|
|
|
2017-12-07 23:23:48 +00:00
|
|
|
if (len < 17) {
|
2015-09-21 15:52:41 +00:00
|
|
|
return -1;
|
2017-12-07 23:23:48 +00:00
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-02-03 21:36:26 +00:00
|
|
|
err = spdk_pci_device_cfg_read32(dev, &header, PCI_CFG_SIZE);
|
2017-12-07 23:23:48 +00:00
|
|
|
if (err || !header) {
|
2015-09-21 15:52:41 +00:00
|
|
|
return -1;
|
2017-12-07 23:23:48 +00:00
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
pos = PCI_CFG_SIZE;
|
|
|
|
while (1) {
|
|
|
|
if ((header & 0x0000ffff) == PCI_EXT_CAP_ID_SN) {
|
|
|
|
if (pos) {
|
2017-04-24 18:14:41 +00:00
|
|
|
/* skip the header */
|
2015-09-21 15:52:41 +00:00
|
|
|
pos += 4;
|
|
|
|
for (i = 0; i < 2; i++) {
|
2016-02-03 21:36:26 +00:00
|
|
|
err = spdk_pci_device_cfg_read32(dev, &buf[i], pos + 4 * i);
|
2017-12-07 23:23:48 +00:00
|
|
|
if (err) {
|
2015-09-21 15:52:41 +00:00
|
|
|
return -1;
|
2017-12-07 23:23:48 +00:00
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
2017-03-23 07:44:36 +00:00
|
|
|
snprintf(sn, len, "%08x%08x", buf[1], buf[0]);
|
2015-09-21 15:52:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pos = (header >> 20) & 0xffc;
|
2017-04-24 18:14:41 +00:00
|
|
|
/* 0 if no other items exist */
|
2017-12-07 23:23:48 +00:00
|
|
|
if (pos < PCI_CFG_SIZE) {
|
2015-09-21 15:52:41 +00:00
|
|
|
return -1;
|
2017-12-07 23:23:48 +00:00
|
|
|
}
|
2016-02-03 21:36:26 +00:00
|
|
|
err = spdk_pci_device_cfg_read32(dev, &header, pos);
|
2017-12-07 23:23:48 +00:00
|
|
|
if (err) {
|
2015-09-21 15:52:41 +00:00
|
|
|
return -1;
|
2017-12-07 23:23:48 +00:00
|
|
|
}
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-10-28 18:00:18 +00:00
|
|
|
struct spdk_pci_addr
|
2018-11-20 14:05:13 +00:00
|
|
|
spdk_pci_device_get_addr(struct spdk_pci_device *dev)
|
2016-10-08 00:01:54 +00:00
|
|
|
{
|
2018-11-20 14:05:13 +00:00
|
|
|
return dev->addr;
|
2016-10-28 18:00:18 +00:00
|
|
|
}
|
|
|
|
|
2019-03-23 21:38:14 +00:00
|
|
|
bool
|
|
|
|
spdk_pci_device_is_removed(struct spdk_pci_device *dev)
|
|
|
|
{
|
|
|
|
return dev->internal.pending_removal;
|
|
|
|
}
|
|
|
|
|
2016-10-28 18:00:18 +00:00
|
|
|
int
|
|
|
|
spdk_pci_addr_compare(const struct spdk_pci_addr *a1, const struct spdk_pci_addr *a2)
|
|
|
|
{
|
|
|
|
if (a1->domain > a2->domain) {
|
|
|
|
return 1;
|
|
|
|
} else if (a1->domain < a2->domain) {
|
|
|
|
return -1;
|
|
|
|
} else if (a1->bus > a2->bus) {
|
|
|
|
return 1;
|
|
|
|
} else if (a1->bus < a2->bus) {
|
|
|
|
return -1;
|
|
|
|
} else if (a1->dev > a2->dev) {
|
|
|
|
return 1;
|
|
|
|
} else if (a1->dev < a2->dev) {
|
|
|
|
return -1;
|
|
|
|
} else if (a1->func > a2->func) {
|
|
|
|
return 1;
|
|
|
|
} else if (a1->func < a2->func) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2016-10-08 00:01:54 +00:00
|
|
|
}
|
|
|
|
|
2016-01-11 22:48:00 +00:00
|
|
|
#ifdef __linux__
|
2015-09-21 15:52:41 +00:00
|
|
|
int
|
2019-09-02 09:35:33 +00:00
|
|
|
spdk_pci_device_claim(struct spdk_pci_device *dev)
|
2015-09-21 15:52:41 +00:00
|
|
|
{
|
|
|
|
int dev_fd;
|
2017-12-05 20:44:19 +00:00
|
|
|
char dev_name[64];
|
2015-09-21 15:52:41 +00:00
|
|
|
int pid;
|
|
|
|
void *dev_map;
|
|
|
|
struct flock pcidev_lock = {
|
|
|
|
.l_type = F_WRLCK,
|
|
|
|
.l_whence = SEEK_SET,
|
|
|
|
.l_start = 0,
|
|
|
|
.l_len = 0,
|
|
|
|
};
|
|
|
|
|
2020-11-13 08:07:06 +00:00
|
|
|
snprintf(dev_name, sizeof(dev_name), "/var/tmp/spdk_pci_lock_%04x:%02x:%02x.%x",
|
2019-09-02 09:35:33 +00:00
|
|
|
dev->addr.domain, dev->addr.bus, dev->addr.dev, dev->addr.func);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2017-12-05 20:44:19 +00:00
|
|
|
dev_fd = open(dev_name, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
|
2015-09-21 15:52:41 +00:00
|
|
|
if (dev_fd == -1) {
|
2020-04-20 19:47:03 +00:00
|
|
|
SPDK_ERRLOG("could not open %s\n", dev_name);
|
2019-09-02 09:35:33 +00:00
|
|
|
return -errno;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ftruncate(dev_fd, sizeof(int)) != 0) {
|
2020-04-20 19:47:03 +00:00
|
|
|
SPDK_ERRLOG("could not truncate %s\n", dev_name);
|
2015-09-21 15:52:41 +00:00
|
|
|
close(dev_fd);
|
2019-09-02 09:35:33 +00:00
|
|
|
return -errno;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dev_map = mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE,
|
|
|
|
MAP_SHARED, dev_fd, 0);
|
2018-03-28 18:07:54 +00:00
|
|
|
if (dev_map == MAP_FAILED) {
|
2020-04-20 19:47:03 +00:00
|
|
|
SPDK_ERRLOG("could not mmap dev %s (%d)\n", dev_name, errno);
|
2015-09-21 15:52:41 +00:00
|
|
|
close(dev_fd);
|
2019-09-02 09:35:33 +00:00
|
|
|
return -errno;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (fcntl(dev_fd, F_SETLK, &pcidev_lock) != 0) {
|
|
|
|
pid = *(int *)dev_map;
|
2020-04-20 19:47:03 +00:00
|
|
|
SPDK_ERRLOG("Cannot create lock on device %s, probably"
|
|
|
|
" process %d has claimed it\n", dev_name, pid);
|
2015-09-21 15:52:41 +00:00
|
|
|
munmap(dev_map, sizeof(int));
|
|
|
|
close(dev_fd);
|
2019-09-02 09:35:33 +00:00
|
|
|
/* F_SETLK returns unspecified errnos, normalize them */
|
|
|
|
return -EACCES;
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*(int *)dev_map = (int)getpid();
|
|
|
|
munmap(dev_map, sizeof(int));
|
2019-09-02 09:35:33 +00:00
|
|
|
dev->internal.claim_fd = dev_fd;
|
2015-09-21 15:52:41 +00:00
|
|
|
/* Keep dev_fd open to maintain the lock. */
|
2019-09-02 09:35:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_pci_device_unclaim(struct spdk_pci_device *dev)
|
|
|
|
{
|
|
|
|
char dev_name[64];
|
|
|
|
|
2020-11-13 08:07:06 +00:00
|
|
|
snprintf(dev_name, sizeof(dev_name), "/var/tmp/spdk_pci_lock_%04x:%02x:%02x.%x",
|
2019-09-02 09:35:33 +00:00
|
|
|
dev->addr.domain, dev->addr.bus, dev->addr.dev, dev->addr.func);
|
|
|
|
|
|
|
|
close(dev->internal.claim_fd);
|
|
|
|
dev->internal.claim_fd = -1;
|
|
|
|
unlink(dev_name);
|
2015-09-21 15:52:41 +00:00
|
|
|
}
|
2021-02-18 17:21:54 +00:00
|
|
|
#else /* !__linux__ */
|
2016-07-21 16:00:10 +00:00
|
|
|
int
|
2019-09-02 09:35:33 +00:00
|
|
|
spdk_pci_device_claim(struct spdk_pci_device *dev)
|
2016-07-21 16:00:10 +00:00
|
|
|
{
|
|
|
|
/* TODO */
|
|
|
|
return 0;
|
|
|
|
}
|
2019-09-02 09:35:33 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
spdk_pci_device_unclaim(struct spdk_pci_device *dev)
|
|
|
|
{
|
|
|
|
/* TODO */
|
|
|
|
}
|
2021-02-18 17:21:54 +00:00
|
|
|
#endif /* __linux__ */
|
2016-10-31 23:29:52 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
spdk_pci_addr_parse(struct spdk_pci_addr *addr, const char *bdf)
|
|
|
|
{
|
|
|
|
unsigned domain, bus, dev, func;
|
|
|
|
|
|
|
|
if (addr == NULL || bdf == NULL) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-04-17 02:01:55 +00:00
|
|
|
if ((sscanf(bdf, "%x:%x:%x.%x", &domain, &bus, &dev, &func) == 4) ||
|
|
|
|
(sscanf(bdf, "%x.%x.%x.%x", &domain, &bus, &dev, &func) == 4)) {
|
2016-10-31 23:29:52 +00:00
|
|
|
/* Matched a full address - all variables are initialized */
|
|
|
|
} else if (sscanf(bdf, "%x:%x:%x", &domain, &bus, &dev) == 3) {
|
|
|
|
func = 0;
|
2017-04-17 02:01:55 +00:00
|
|
|
} else if ((sscanf(bdf, "%x:%x.%x", &bus, &dev, &func) == 3) ||
|
|
|
|
(sscanf(bdf, "%x.%x.%x", &bus, &dev, &func) == 3)) {
|
2016-10-31 23:29:52 +00:00
|
|
|
domain = 0;
|
2017-04-17 02:01:55 +00:00
|
|
|
} else if ((sscanf(bdf, "%x:%x", &bus, &dev) == 2) ||
|
|
|
|
(sscanf(bdf, "%x.%x", &bus, &dev) == 2)) {
|
2016-10-31 23:29:52 +00:00
|
|
|
domain = 0;
|
|
|
|
func = 0;
|
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-06-21 22:05:47 +00:00
|
|
|
if (bus > 0xFF || dev > 0x1F || func > 7) {
|
2016-10-31 23:29:52 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr->domain = domain;
|
|
|
|
addr->bus = bus;
|
|
|
|
addr->dev = dev;
|
|
|
|
addr->func = func;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-12-02 18:00:55 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
spdk_pci_addr_fmt(char *bdf, size_t sz, const struct spdk_pci_addr *addr)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2017-06-21 22:05:47 +00:00
|
|
|
rc = snprintf(bdf, sz, "%04x:%02x:%02x.%x",
|
2016-12-02 18:00:55 +00:00
|
|
|
addr->domain, addr->bus,
|
|
|
|
addr->dev, addr->func);
|
|
|
|
|
|
|
|
if (rc > 0 && (size_t)rc < sz) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
2018-11-29 09:24:05 +00:00
|
|
|
|
2022-08-04 07:29:01 +00:00
|
|
|
int
|
2018-11-29 09:24:05 +00:00
|
|
|
spdk_pci_hook_device(struct spdk_pci_driver *drv, struct spdk_pci_device *dev)
|
|
|
|
{
|
2022-08-04 07:29:01 +00:00
|
|
|
int rc;
|
|
|
|
|
2018-11-29 09:24:05 +00:00
|
|
|
assert(dev->map_bar != NULL);
|
|
|
|
assert(dev->unmap_bar != NULL);
|
|
|
|
assert(dev->cfg_read != NULL);
|
|
|
|
assert(dev->cfg_write != NULL);
|
|
|
|
dev->internal.driver = drv;
|
2022-08-04 07:29:01 +00:00
|
|
|
|
|
|
|
if (drv->cb_fn != NULL) {
|
|
|
|
rc = drv->cb_fn(drv->cb_arg, dev);
|
|
|
|
if (rc != 0) {
|
|
|
|
return -ECANCELED;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->internal.attached = true;
|
|
|
|
}
|
|
|
|
|
2018-11-29 09:24:05 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_pci_devices, dev, internal.tailq);
|
2022-08-04 07:29:01 +00:00
|
|
|
|
|
|
|
return 0;
|
2018-11-29 09:24:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
spdk_pci_unhook_device(struct spdk_pci_device *dev)
|
|
|
|
{
|
|
|
|
assert(!dev->internal.attached);
|
|
|
|
TAILQ_REMOVE(&g_pci_devices, dev, internal.tailq);
|
|
|
|
}
|
2019-10-03 07:34:42 +00:00
|
|
|
|
env/pci: method for registering PCI device providers
The primary motivation for this patch is to allow the VMD driver to be
notified of when users wants to attach a device under a given BDF and to
make it more similar to the regular PCI path. Currently, the way the
VMD driver scans for the devices is a little bit different. The initial
scan is done during initialization and there's a separate poller for
checking hotplugs. Also, there's no device_attach() interface, so with
hotplug poller disabled, it isn't possible to attach to a device not
present in the initial scan, even if the BDF is known.
This causes a few issues. First of all, the VMD library isn't notified
when a device is stopped being used (i.e. user calls
spdk_pci_device_detach()), so when such a device is hotremoved, it never
gets unhooked. But we cannot simply add a spdk_pci_device.detach()
callback, as this would break cases when user detaches a device (without
hotremove) and then tries to reattach it again (via
spdk_pci_device_attach()), as the VMD doesn't get notified about the
device_attach() call.
So, in order to resolve this, a device_attach() callback is added, which
will notify the VMD library that the user wants to attach a device under
a specific PCI address. Then, in subsequent patches, a
spdk_pci_device_provider.detach_cb() callback is added to make sure that
devices are unhooked once they're no longer used.
Once that is done, it'll be also possible to get rid of the VMD hotplug
poller by adding something like scan_cb() to spdk_pci_device_provider and
call it from spdk_pci_enumerate().
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I084a27dcd12455f0f841440b7692375e80d07e84
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13883
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Tom Nabarro <tom.nabarro@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2022-08-05 03:48:17 +00:00
|
|
|
void
|
|
|
|
spdk_pci_register_device_provider(struct spdk_pci_device_provider *provider)
|
|
|
|
{
|
|
|
|
TAILQ_INSERT_TAIL(&g_pci_device_providers, provider, tailq);
|
|
|
|
}
|
|
|
|
|
2019-10-03 07:34:42 +00:00
|
|
|
const char *
|
|
|
|
spdk_pci_device_get_type(const struct spdk_pci_device *dev)
|
|
|
|
{
|
|
|
|
return dev->type;
|
|
|
|
}
|
2021-01-29 16:12:46 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
spdk_pci_device_allow(struct spdk_pci_addr *pci_addr)
|
|
|
|
{
|
|
|
|
struct rte_devargs *da;
|
|
|
|
char devargs_str[128];
|
|
|
|
|
|
|
|
da = calloc(1, sizeof(*da));
|
|
|
|
if (da == NULL) {
|
|
|
|
SPDK_ERRLOG("could not allocate rte_devargs\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2021-03-30 15:49:39 +00:00
|
|
|
snprintf(devargs_str, sizeof(devargs_str), "pci:%04x:%02x:%02x.%x",
|
2021-01-29 16:12:46 +00:00
|
|
|
pci_addr->domain, pci_addr->bus, pci_addr->dev, pci_addr->func);
|
|
|
|
if (rte_devargs_parse(da, devargs_str) != 0) {
|
|
|
|
SPDK_ERRLOG("rte_devargs_parse() failed on '%s'\n", devargs_str);
|
|
|
|
free(da);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
da->policy = RTE_DEV_ALLOWED;
|
|
|
|
/* Note: if a devargs already exists for this device address, it just gets
|
|
|
|
* overridden. So we do not need to check if the devargs already exists.
|
|
|
|
* DPDK will take care of memory management for the devargs structure after
|
|
|
|
* it has been inserted, so there's nothing SPDK needs to track.
|
|
|
|
*/
|
|
|
|
if (rte_devargs_insert(&da) != 0) {
|
|
|
|
SPDK_ERRLOG("rte_devargs_insert() failed on '%s'\n", devargs_str);
|
|
|
|
free(da);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2022-09-15 13:30:38 +00:00
|
|
|
|
|
|
|
uint64_t
|
|
|
|
dpdk_pci_device_vtophys(struct rte_pci_device *dev, uint64_t vaddr)
|
|
|
|
{
|
|
|
|
struct rte_mem_resource *res;
|
|
|
|
uint64_t paddr;
|
|
|
|
unsigned r;
|
|
|
|
|
|
|
|
for (r = 0; r < PCI_MAX_RESOURCE; r++) {
|
|
|
|
res = &dev->mem_resource[r];
|
|
|
|
if (res->phys_addr && vaddr >= (uint64_t)res->addr &&
|
|
|
|
vaddr < (uint64_t)res->addr + res->len) {
|
|
|
|
paddr = res->phys_addr + (vaddr - (uint64_t)res->addr);
|
|
|
|
return paddr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return SPDK_VTOPHYS_ERROR;
|
|
|
|
}
|
2022-09-15 20:53:18 +00:00
|
|
|
|
|
|
|
const char *
|
|
|
|
dpdk_pci_device_get_name(struct rte_pci_device *rte_dev)
|
|
|
|
{
|
|
|
|
return rte_dev->name;
|
|
|
|
}
|
2022-09-15 20:58:40 +00:00
|
|
|
|
|
|
|
struct rte_devargs *
|
|
|
|
dpdk_pci_device_get_devargs(struct rte_pci_device *rte_dev)
|
|
|
|
{
|
|
|
|
return rte_dev->device.devargs;
|
|
|
|
}
|
2022-09-15 21:06:01 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
dpdk_pci_device_copy_identifiers(struct rte_pci_device *_dev, struct spdk_pci_device *dev)
|
|
|
|
{
|
|
|
|
dev->addr.domain = _dev->addr.domain;
|
|
|
|
dev->addr.bus = _dev->addr.bus;
|
|
|
|
dev->addr.dev = _dev->addr.devid;
|
|
|
|
dev->addr.func = _dev->addr.function;
|
|
|
|
dev->id.class_id = _dev->id.class_id;
|
|
|
|
dev->id.vendor_id = _dev->id.vendor_id;
|
|
|
|
dev->id.device_id = _dev->id.device_id;
|
|
|
|
dev->id.subvendor_id = _dev->id.subsystem_vendor_id;
|
|
|
|
dev->id.subdevice_id = _dev->id.subsystem_device_id;
|
|
|
|
dev->socket_id = _dev->device.numa_node;
|
|
|
|
}
|
2022-09-15 21:21:35 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
dpdk_pci_device_map_bar(struct rte_pci_device *dev, uint32_t bar,
|
|
|
|
void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
|
|
|
|
{
|
|
|
|
*mapped_addr = dev->mem_resource[bar].addr;
|
|
|
|
*phys_addr = (uint64_t)dev->mem_resource[bar].phys_addr;
|
|
|
|
*size = (uint64_t)dev->mem_resource[bar].len;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
dpdk_pci_device_read_config(struct rte_pci_device *dev, void *value, uint32_t len, uint32_t offset)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = rte_pci_read_config(dev, value, len, offset);
|
|
|
|
|
|
|
|
return (rc > 0 && (uint32_t) rc == len) ? 0 : -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
dpdk_pci_device_write_config(struct rte_pci_device *dev, void *value, uint32_t len, uint32_t offset)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = rte_pci_write_config(dev, value, len, offset);
|
|
|
|
|
|
|
|
#ifdef __FreeBSD__
|
|
|
|
/* DPDK returns 0 on success and -1 on failure */
|
|
|
|
return rc;
|
|
|
|
#endif
|
|
|
|
return (rc > 0 && (uint32_t) rc == len) ? 0 : -1;
|
|
|
|
}
|