module/idxd: add lock around idxd device selection
Without it multiple threads can race and end up sharing a device when the intention is sharing only after full round robin. Signed-off-by: paul luse <paul.e.luse@intel.com> Change-Id: I29b854ff837d56078bc033802d3df244728a29aa Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8187 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Mellanox Build Bot Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
3bbfbb5b0f
commit
bb8102b1ff
@ -65,6 +65,7 @@ struct idxd_device {
|
|||||||
static TAILQ_HEAD(, idxd_device) g_idxd_devices = TAILQ_HEAD_INITIALIZER(g_idxd_devices);
|
static TAILQ_HEAD(, idxd_device) g_idxd_devices = TAILQ_HEAD_INITIALIZER(g_idxd_devices);
|
||||||
static struct idxd_device *g_next_dev = NULL;
|
static struct idxd_device *g_next_dev = NULL;
|
||||||
static uint32_t g_num_devices = 0;
|
static uint32_t g_num_devices = 0;
|
||||||
|
static pthread_mutex_t g_dev_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||||
|
|
||||||
struct idxd_io_channel {
|
struct idxd_io_channel {
|
||||||
struct spdk_idxd_io_channel *chan;
|
struct spdk_idxd_io_channel *chan;
|
||||||
@ -82,6 +83,7 @@ static struct idxd_device *
|
|||||||
idxd_select_device(struct idxd_io_channel *chan)
|
idxd_select_device(struct idxd_io_channel *chan)
|
||||||
{
|
{
|
||||||
uint32_t count = 0;
|
uint32_t count = 0;
|
||||||
|
struct idxd_device *dev;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We allow channels to share underlying devices,
|
* We allow channels to share underlying devices,
|
||||||
@ -90,20 +92,23 @@ idxd_select_device(struct idxd_io_channel *chan)
|
|||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
/* select next device */
|
/* select next device */
|
||||||
|
pthread_mutex_lock(&g_dev_lock);
|
||||||
g_next_dev = TAILQ_NEXT(g_next_dev, tailq);
|
g_next_dev = TAILQ_NEXT(g_next_dev, tailq);
|
||||||
if (g_next_dev == NULL) {
|
if (g_next_dev == NULL) {
|
||||||
g_next_dev = TAILQ_FIRST(&g_idxd_devices);
|
g_next_dev = TAILQ_FIRST(&g_idxd_devices);
|
||||||
}
|
}
|
||||||
|
dev = g_next_dev;
|
||||||
|
pthread_mutex_unlock(&g_dev_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now see if a channel is available on this one. We only
|
* Now see if a channel is available on this one. We only
|
||||||
* allow a specific number of channels to share a device
|
* allow a specific number of channels to share a device
|
||||||
* to limit outstanding IO for flow control purposes.
|
* to limit outstanding IO for flow control purposes.
|
||||||
*/
|
*/
|
||||||
chan->chan = spdk_idxd_get_channel(g_next_dev->idxd);
|
chan->chan = spdk_idxd_get_channel(dev->idxd);
|
||||||
if (chan->chan != NULL) {
|
if (chan->chan != NULL) {
|
||||||
chan->max_outstanding = spdk_idxd_chan_get_max_operations(chan->chan);
|
chan->max_outstanding = spdk_idxd_chan_get_max_operations(chan->chan);
|
||||||
return g_next_dev;
|
return dev;
|
||||||
}
|
}
|
||||||
} while (count++ < g_num_devices);
|
} while (count++ < g_num_devices);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user