bdev, copy: move all I/O paths to use I/O channels

bdev and copy modules no longer have check_io functions
now - all polling is done via pollers registered when
I/O channels are created.

Other default resources are also removed - for example,
a qpair is no longer allocated and assigned per bdev
exposed by the nvme driver - the qpairs are only allocated
via I/O channels.  Similar principle also applies to the
aio driver.

ioat channels are no longer allocated and assigned to
lcores - they are dynamically allocated and assigned
to I/O channels when needed.  If no ioat channel is
available for an I/O channel, the copy engine framework
will revert to using memcpy/memset instead.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I99435a75fe792a2b91ab08f25962dfd407d6402f
This commit is contained in:
Jim Harris 2016-09-20 16:18:44 -07:00
parent fa29c70938
commit 0babf8ce81
15 changed files with 125 additions and 235 deletions

View File

@ -98,15 +98,6 @@ struct spdk_bdev {
/** generation value used by block device reset */ /** generation value used by block device reset */
uint32_t gencnt; uint32_t gencnt;
/** Whether the poller is registered with the reactor */
bool is_running;
/** Which lcore the poller is running on */
uint32_t lcore;
/** Poller to submit IO and check completion */
struct spdk_poller *poller;
/** True if another blockdev or a LUN is using this device */ /** True if another blockdev or a LUN is using this device */
bool claimed; bool claimed;
@ -134,13 +125,6 @@ struct spdk_bdev_fn_table {
/** Destroy the backend block device object */ /** Destroy the backend block device object */
int (*destruct)(struct spdk_bdev *bdev); int (*destruct)(struct spdk_bdev *bdev);
/**
* Poll the backend for I/O waiting to be completed.
*
* Optional; if the bdev does not have any periodic work to do, this pointer can be NULL.
*/
int (*check_io)(struct spdk_bdev *bdev);
/** Process the IO. */ /** Process the IO. */
void (*submit_request)(struct spdk_bdev_io *); void (*submit_request)(struct spdk_bdev_io *);
@ -193,6 +177,9 @@ struct spdk_bdev_io {
/** The block device that this I/O belongs to. */ /** The block device that this I/O belongs to. */
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
/** The I/O channel to submit this I/O on. */
struct spdk_io_channel *ch;
/** Enumerated value representing the I/O type. */ /** Enumerated value representing the I/O type. */
enum spdk_bdev_io_type type; enum spdk_bdev_io_type type;
@ -289,25 +276,24 @@ struct spdk_bdev *spdk_bdev_next(struct spdk_bdev *prev);
bool spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type); bool spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type);
struct spdk_bdev_io *spdk_bdev_read(struct spdk_bdev *bdev, struct spdk_bdev_io *spdk_bdev_read(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
void *buf, uint64_t offset, uint64_t nbytes, void *buf, uint64_t offset, uint64_t nbytes,
spdk_bdev_io_completion_cb cb, void *cb_arg); spdk_bdev_io_completion_cb cb, void *cb_arg);
struct spdk_bdev_io *spdk_bdev_write(struct spdk_bdev *bdev, struct spdk_bdev_io *spdk_bdev_write(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
void *buf, uint64_t offset, uint64_t nbytes, void *buf, uint64_t offset, uint64_t nbytes,
spdk_bdev_io_completion_cb cb, void *cb_arg); spdk_bdev_io_completion_cb cb, void *cb_arg);
struct spdk_bdev_io *spdk_bdev_writev(struct spdk_bdev *bdev, struct spdk_bdev_io *spdk_bdev_writev(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, struct iovec *iov, int iovcnt,
uint64_t offset, uint64_t len, uint64_t offset, uint64_t len,
spdk_bdev_io_completion_cb cb, void *cb_arg); spdk_bdev_io_completion_cb cb, void *cb_arg);
struct spdk_bdev_io *spdk_bdev_unmap(struct spdk_bdev *bdev, struct spdk_bdev_io *spdk_bdev_unmap(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
struct spdk_scsi_unmap_bdesc *unmap_d, struct spdk_scsi_unmap_bdesc *unmap_d,
uint16_t bdesc_count, uint16_t bdesc_count,
spdk_bdev_io_completion_cb cb, void *cb_arg); spdk_bdev_io_completion_cb cb, void *cb_arg);
struct spdk_bdev_io *spdk_bdev_flush(struct spdk_bdev *bdev, struct spdk_bdev_io *spdk_bdev_flush(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
uint64_t offset, uint64_t length, uint64_t offset, uint64_t length,
spdk_bdev_io_completion_cb cb, void *cb_arg); spdk_bdev_io_completion_cb cb, void *cb_arg);
int spdk_bdev_io_submit(struct spdk_bdev_io *bdev_io); int spdk_bdev_io_submit(struct spdk_bdev_io *bdev_io);
void spdk_bdev_do_work(void *ctx);
int spdk_bdev_free_io(struct spdk_bdev_io *bdev_io); int spdk_bdev_free_io(struct spdk_bdev_io *bdev_io);
int spdk_bdev_reset(struct spdk_bdev *bdev, enum spdk_bdev_reset_type, int spdk_bdev_reset(struct spdk_bdev *bdev, enum spdk_bdev_reset_type,
spdk_bdev_io_completion_cb cb, void *cb_arg); spdk_bdev_io_completion_cb cb, void *cb_arg);

View File

@ -45,17 +45,18 @@
typedef void (*copy_completion_cb)(void *ref, int status); typedef void (*copy_completion_cb)(void *ref, int status);
struct spdk_io_channel;
struct copy_task { struct copy_task {
copy_completion_cb cb; copy_completion_cb cb;
uint8_t offload_ctx[0]; uint8_t offload_ctx[0];
}; };
struct spdk_copy_engine { struct spdk_copy_engine {
int64_t (*copy)(void *cb_arg, void *dst, void *src, int64_t (*copy)(void *cb_arg, struct spdk_io_channel *ch, void *dst, void *src,
uint64_t nbytes, copy_completion_cb cb); uint64_t nbytes, copy_completion_cb cb);
int64_t (*fill)(void *cb_arg, void *dst, uint8_t fill, int64_t (*fill)(void *cb_arg, struct spdk_io_channel *ch, void *dst, uint8_t fill,
uint64_t nbytes, copy_completion_cb cb); uint64_t nbytes, copy_completion_cb cb);
void (*check_io)(void);
struct spdk_io_channel *(*get_io_channel)(uint32_t priority); struct spdk_io_channel *(*get_io_channel)(uint32_t priority);
}; };
@ -86,11 +87,10 @@ struct spdk_copy_module_if {
void spdk_copy_engine_register(struct spdk_copy_engine *copy_engine); void spdk_copy_engine_register(struct spdk_copy_engine *copy_engine);
struct spdk_io_channel *spdk_copy_engine_get_io_channel(uint32_t priority); struct spdk_io_channel *spdk_copy_engine_get_io_channel(uint32_t priority);
int64_t spdk_copy_submit(struct copy_task *copy_req, void *dst, void *src, int64_t spdk_copy_submit(struct copy_task *copy_req, struct spdk_io_channel *ch, void *dst,
uint64_t nbytes, copy_completion_cb cb); void *src, uint64_t nbytes, copy_completion_cb cb);
int64_t spdk_copy_submit_fill(struct copy_task *copy_req, void *dst, uint8_t fill, int64_t spdk_copy_submit_fill(struct copy_task *copy_req, struct spdk_io_channel *ch, void *dst,
uint64_t nbytes, copy_completion_cb cb); uint8_t fill, uint64_t nbytes, copy_completion_cb cb);
int spdk_copy_check_io(void);
int spdk_copy_module_get_max_ctx_size(void); int spdk_copy_module_get_max_ctx_size(void);
void spdk_copy_module_list_add(struct spdk_copy_module_if *copy_module); void spdk_copy_module_list_add(struct spdk_copy_module_if *copy_module);

View File

@ -101,6 +101,7 @@ struct spdk_scsi_task {
uint8_t function; /* task mgmt function */ uint8_t function; /* task mgmt function */
uint8_t response; /* task mgmt response */ uint8_t response; /* task mgmt response */
struct spdk_scsi_lun *lun; struct spdk_scsi_lun *lun;
struct spdk_io_channel *ch;
struct spdk_scsi_port *target_port; struct spdk_scsi_port *target_port;
struct spdk_scsi_port *initiator_port; struct spdk_scsi_port *initiator_port;
spdk_event_t cb_event; spdk_event_t cb_event;

View File

@ -82,8 +82,6 @@ blockdev_aio_close(struct file_disk *disk)
{ {
int rc; int rc;
io_destroy(disk->ch.io_ctx);
if (disk->fd == -1) { if (disk->fd == -1) {
return 0; return 0;
} }
@ -100,10 +98,11 @@ blockdev_aio_close(struct file_disk *disk)
} }
static int64_t static int64_t
blockdev_aio_read(struct file_disk *fdisk, struct blockdev_aio_task *aio_task, blockdev_aio_read(struct file_disk *fdisk, struct spdk_io_channel *ch,
void *buf, uint64_t nbytes, off_t offset) struct blockdev_aio_task *aio_task, void *buf, uint64_t nbytes, off_t offset)
{ {
struct iocb *iocb = &aio_task->iocb; struct iocb *iocb = &aio_task->iocb;
struct blockdev_aio_io_channel *aio_ch = spdk_io_channel_get_ctx(ch);
int rc; int rc;
iocb->aio_fildes = fdisk->fd; iocb->aio_fildes = fdisk->fd;
@ -118,7 +117,7 @@ blockdev_aio_read(struct file_disk *fdisk, struct blockdev_aio_task *aio_task,
SPDK_TRACELOG(SPDK_TRACE_AIO, "read from %p of size %lu to off: %#lx\n", SPDK_TRACELOG(SPDK_TRACE_AIO, "read from %p of size %lu to off: %#lx\n",
buf, nbytes, offset); buf, nbytes, offset);
rc = io_submit(fdisk->ch.io_ctx, 1, &iocb); rc = io_submit(aio_ch->io_ctx, 1, &iocb);
if (rc < 0) { if (rc < 0) {
SPDK_ERRLOG("%s: io_submit returned %d\n", __func__, rc); SPDK_ERRLOG("%s: io_submit returned %d\n", __func__, rc);
return -1; return -1;
@ -128,10 +127,12 @@ blockdev_aio_read(struct file_disk *fdisk, struct blockdev_aio_task *aio_task,
} }
static int64_t static int64_t
blockdev_aio_writev(struct file_disk *fdisk, struct blockdev_aio_task *aio_task, blockdev_aio_writev(struct file_disk *fdisk, struct spdk_io_channel *ch,
struct blockdev_aio_task *aio_task,
struct iovec *iov, int iovcnt, size_t len, off_t offset) struct iovec *iov, int iovcnt, size_t len, off_t offset)
{ {
struct iocb *iocb = &aio_task->iocb; struct iocb *iocb = &aio_task->iocb;
struct blockdev_aio_io_channel *aio_ch = spdk_io_channel_get_ctx(ch);
int rc; int rc;
iocb->aio_fildes = fdisk->fd; iocb->aio_fildes = fdisk->fd;
@ -146,7 +147,7 @@ blockdev_aio_writev(struct file_disk *fdisk, struct blockdev_aio_task *aio_task,
SPDK_TRACELOG(SPDK_TRACE_AIO, "write %d iovs size %lu from off: %#lx\n", SPDK_TRACELOG(SPDK_TRACE_AIO, "write %d iovs size %lu from off: %#lx\n",
iovcnt, len, offset); iovcnt, len, offset);
rc = io_submit(fdisk->ch.io_ctx, 1, &iocb); rc = io_submit(aio_ch->io_ctx, 1, &iocb);
if (rc < 0) { if (rc < 0) {
SPDK_ERRLOG("%s: io_submit returned %d\n", __func__, rc); SPDK_ERRLOG("%s: io_submit returned %d\n", __func__, rc);
return -1; return -1;
@ -232,15 +233,6 @@ blockdev_aio_poll(void *arg)
} }
} }
static int
blockdev_aio_check_io(struct spdk_bdev *bdev)
{
struct file_disk *fdisk = (struct file_disk *)bdev;
blockdev_aio_poll(&fdisk->ch);
return 0;
}
static int static int
blockdev_aio_reset(struct file_disk *fdisk, struct blockdev_aio_task *aio_task) blockdev_aio_reset(struct file_disk *fdisk, struct blockdev_aio_task *aio_task)
{ {
@ -254,6 +246,7 @@ static void blockdev_aio_get_rbuf_cb(struct spdk_bdev_io *bdev_io)
int ret = 0; int ret = 0;
ret = blockdev_aio_read((struct file_disk *)bdev_io->ctx, ret = blockdev_aio_read((struct file_disk *)bdev_io->ctx,
bdev_io->ch,
(struct blockdev_aio_task *)bdev_io->driver_ctx, (struct blockdev_aio_task *)bdev_io->driver_ctx,
bdev_io->u.read.buf, bdev_io->u.read.buf,
bdev_io->u.read.nbytes, bdev_io->u.read.nbytes,
@ -273,6 +266,7 @@ static int _blockdev_aio_submit_request(struct spdk_bdev_io *bdev_io)
case SPDK_BDEV_IO_TYPE_WRITE: case SPDK_BDEV_IO_TYPE_WRITE:
return blockdev_aio_writev((struct file_disk *)bdev_io->ctx, return blockdev_aio_writev((struct file_disk *)bdev_io->ctx,
bdev_io->ch,
(struct blockdev_aio_task *)bdev_io->driver_ctx, (struct blockdev_aio_task *)bdev_io->driver_ctx,
bdev_io->u.write.iovs, bdev_io->u.write.iovs,
bdev_io->u.write.iovcnt, bdev_io->u.write.iovcnt,
@ -347,7 +341,6 @@ blockdev_aio_get_io_channel(struct spdk_bdev *bdev, uint32_t priority)
static const struct spdk_bdev_fn_table aio_fn_table = { static const struct spdk_bdev_fn_table aio_fn_table = {
.destruct = blockdev_aio_destruct, .destruct = blockdev_aio_destruct,
.check_io = blockdev_aio_check_io,
.submit_request = blockdev_aio_submit_request, .submit_request = blockdev_aio_submit_request,
.io_type_supported = blockdev_aio_io_type_supported, .io_type_supported = blockdev_aio_io_type_supported,
.get_io_channel = blockdev_aio_get_io_channel, .get_io_channel = blockdev_aio_get_io_channel,
@ -357,8 +350,6 @@ static void aio_free_disk(struct file_disk *fdisk)
{ {
if (fdisk == NULL) if (fdisk == NULL)
return; return;
if (fdisk->ch.events != NULL)
free(fdisk->ch.events);
free(fdisk); free(fdisk);
} }
@ -393,10 +384,6 @@ create_aio_disk(char *fname)
fdisk->disk.ctxt = fdisk; fdisk->disk.ctxt = fdisk;
fdisk->disk.fn_table = &aio_fn_table; fdisk->disk.fn_table = &aio_fn_table;
if (blockdev_aio_initialize_io_channel(&fdisk->ch) != 0) {
goto error_return;
}
g_blockdev_count++; g_blockdev_count++;
spdk_io_device_register(&fdisk->disk, blockdev_aio_create_cb, blockdev_aio_destroy_cb, spdk_io_device_register(&fdisk->disk, blockdev_aio_create_cb, blockdev_aio_destroy_cb,

View File

@ -62,8 +62,6 @@ struct file_disk {
char disk_name[SPDK_BDEV_MAX_NAME_LENGTH]; char disk_name[SPDK_BDEV_MAX_NAME_LENGTH];
uint64_t size; uint64_t size;
struct blockdev_aio_io_channel ch;
/** /**
* For storing I/O that were completed synchronously, and will be * For storing I/O that were completed synchronously, and will be
* completed during next check_io call. * completed during next check_io call.

View File

@ -406,12 +406,9 @@ spdk_bdev_cleanup_pending_rbuf_io(struct spdk_bdev *bdev)
} }
static void static void
__submit_request(spdk_event_t event) __submit_request(struct spdk_bdev *bdev, struct spdk_bdev_io *bdev_io, spdk_event_t cb_event)
{ {
struct spdk_bdev *bdev = spdk_event_get_arg1(event); bdev_io->cb_event = cb_event;
struct spdk_bdev_io *bdev_io = spdk_event_get_arg2(event);
bdev_io->cb_event = spdk_event_get_next(event);
if (bdev_io->status == SPDK_BDEV_IO_STATUS_PENDING) { if (bdev_io->status == SPDK_BDEV_IO_STATUS_PENDING) {
if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) { if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
@ -441,32 +438,11 @@ __submit_request(spdk_event_t event)
} }
} }
void
spdk_bdev_do_work(void *ctx)
{
struct spdk_bdev *bdev = ctx;
bdev->fn_table->check_io(bdev->ctxt);
}
int int
spdk_bdev_io_submit(struct spdk_bdev_io *bdev_io) spdk_bdev_io_submit(struct spdk_bdev_io *bdev_io)
{ {
struct spdk_bdev *bdev = bdev_io->bdev; struct spdk_bdev *bdev = bdev_io->bdev;
struct spdk_event *event, *cb_event = NULL; struct spdk_event *cb_event = NULL;
uint32_t lcore = bdev->lcore;
/* start the poller when first IO comes */
if (!bdev->is_running) {
bdev->is_running = true;
if (lcore == 0) {
lcore = rte_lcore_id();
}
bdev->lcore = lcore;
if (bdev->fn_table->check_io) {
spdk_poller_register(&bdev->poller, spdk_bdev_do_work, bdev, lcore, NULL, 0);
}
}
if (bdev_io->status == SPDK_BDEV_IO_STATUS_PENDING) { if (bdev_io->status == SPDK_BDEV_IO_STATUS_PENDING) {
cb_event = spdk_event_allocate(rte_lcore_id(), bdev_io->cb, cb_event = spdk_event_allocate(rte_lcore_id(), bdev_io->cb,
@ -474,11 +450,7 @@ spdk_bdev_io_submit(struct spdk_bdev_io *bdev_io)
RTE_VERIFY(cb_event != NULL); RTE_VERIFY(cb_event != NULL);
} }
event = spdk_event_allocate(lcore, __submit_request, bdev, bdev_io, cb_event); __submit_request(bdev, bdev_io, cb_event);
RTE_VERIFY(event != NULL);
spdk_event_call(event);
return 0; return 0;
} }
@ -542,7 +514,7 @@ spdk_bdev_get_io_channel(struct spdk_bdev *bdev, uint32_t priority)
} }
struct spdk_bdev_io * struct spdk_bdev_io *
spdk_bdev_read(struct spdk_bdev *bdev, spdk_bdev_read(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
void *buf, uint64_t offset, uint64_t nbytes, void *buf, uint64_t offset, uint64_t nbytes,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
@ -571,6 +543,7 @@ spdk_bdev_read(struct spdk_bdev *bdev,
return NULL; return NULL;
} }
bdev_io->ch = ch;
bdev_io->type = SPDK_BDEV_IO_TYPE_READ; bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
bdev_io->u.read.buf = buf; bdev_io->u.read.buf = buf;
bdev_io->u.read.nbytes = nbytes; bdev_io->u.read.nbytes = nbytes;
@ -587,7 +560,7 @@ spdk_bdev_read(struct spdk_bdev *bdev,
} }
struct spdk_bdev_io * struct spdk_bdev_io *
spdk_bdev_write(struct spdk_bdev *bdev, spdk_bdev_write(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
void *buf, uint64_t offset, uint64_t nbytes, void *buf, uint64_t offset, uint64_t nbytes,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
@ -616,6 +589,7 @@ spdk_bdev_write(struct spdk_bdev *bdev,
return NULL; return NULL;
} }
bdev_io->ch = ch;
bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
bdev_io->u.write.iov.iov_base = buf; bdev_io->u.write.iov.iov_base = buf;
bdev_io->u.write.iov.iov_len = nbytes; bdev_io->u.write.iov.iov_len = nbytes;
@ -635,7 +609,7 @@ spdk_bdev_write(struct spdk_bdev *bdev,
} }
struct spdk_bdev_io * struct spdk_bdev_io *
spdk_bdev_writev(struct spdk_bdev *bdev, spdk_bdev_writev(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, struct iovec *iov, int iovcnt,
uint64_t offset, uint64_t len, uint64_t offset, uint64_t len,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
@ -665,6 +639,7 @@ spdk_bdev_writev(struct spdk_bdev *bdev,
return NULL; return NULL;
} }
bdev_io->ch = ch;
bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
bdev_io->u.write.iovs = iov; bdev_io->u.write.iovs = iov;
bdev_io->u.write.iovcnt = iovcnt; bdev_io->u.write.iovcnt = iovcnt;
@ -682,7 +657,7 @@ spdk_bdev_writev(struct spdk_bdev *bdev,
} }
struct spdk_bdev_io * struct spdk_bdev_io *
spdk_bdev_unmap(struct spdk_bdev *bdev, spdk_bdev_unmap(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
struct spdk_scsi_unmap_bdesc *unmap_d, struct spdk_scsi_unmap_bdesc *unmap_d,
uint16_t bdesc_count, uint16_t bdesc_count,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
@ -696,6 +671,7 @@ spdk_bdev_unmap(struct spdk_bdev *bdev,
return NULL; return NULL;
} }
bdev_io->ch = ch;
bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP; bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
bdev_io->u.unmap.unmap_bdesc = unmap_d; bdev_io->u.unmap.unmap_bdesc = unmap_d;
bdev_io->u.unmap.bdesc_count = bdesc_count; bdev_io->u.unmap.bdesc_count = bdesc_count;
@ -711,7 +687,7 @@ spdk_bdev_unmap(struct spdk_bdev *bdev,
} }
struct spdk_bdev_io * struct spdk_bdev_io *
spdk_bdev_flush(struct spdk_bdev *bdev, spdk_bdev_flush(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
uint64_t offset, uint64_t length, uint64_t offset, uint64_t length,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
@ -724,6 +700,7 @@ spdk_bdev_flush(struct spdk_bdev *bdev,
return NULL; return NULL;
} }
bdev_io->ch = ch;
bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH; bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
bdev_io->u.flush.offset = offset; bdev_io->u.flush.offset = offset;
bdev_io->u.flush.length = length; bdev_io->u.flush.length = length;
@ -823,8 +800,6 @@ spdk_bdev_register(struct spdk_bdev *bdev)
{ {
/* initialize the reset generation value to zero */ /* initialize the reset generation value to zero */
bdev->gencnt = 0; bdev->gencnt = 0;
bdev->is_running = false;
bdev->poller = NULL;
SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Inserting bdev %s into list\n", bdev->name); SPDK_TRACELOG(SPDK_TRACE_DEBUG, "Inserting bdev %s into list\n", bdev->name);
TAILQ_INSERT_TAIL(&spdk_bdev_list, bdev, link); TAILQ_INSERT_TAIL(&spdk_bdev_list, bdev, link);
@ -842,13 +817,6 @@ spdk_bdev_unregister(struct spdk_bdev *bdev)
if (rc < 0) { if (rc < 0) {
SPDK_ERRLOG("destruct failed\n"); SPDK_ERRLOG("destruct failed\n");
} }
if (bdev->is_running) {
if (bdev->poller) {
spdk_poller_unregister(&bdev->poller, NULL);
}
bdev->is_running = false;
}
} }
void void

View File

@ -121,18 +121,20 @@ blockdev_malloc_destruct(struct spdk_bdev *bdev)
} }
static int64_t static int64_t
blockdev_malloc_read(struct malloc_disk *mdisk, struct copy_task *copy_req, blockdev_malloc_read(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
struct copy_task *copy_req,
void *buf, uint64_t nbytes, off_t offset) void *buf, uint64_t nbytes, off_t offset)
{ {
SPDK_TRACELOG(SPDK_TRACE_MALLOC, "read %lu bytes from offset %#lx to %p\n", SPDK_TRACELOG(SPDK_TRACE_MALLOC, "read %lu bytes from offset %#lx to %p\n",
nbytes, offset, buf); nbytes, offset, buf);
return spdk_copy_submit(copy_req, buf, mdisk->malloc_buf + offset, return spdk_copy_submit(copy_req, ch, buf, mdisk->malloc_buf + offset,
nbytes, malloc_done); nbytes, malloc_done);
} }
static int64_t static int64_t
blockdev_malloc_writev(struct malloc_disk *mdisk, struct copy_task *copy_req, blockdev_malloc_writev(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
struct copy_task *copy_req,
struct iovec *iov, int iovcnt, size_t len, off_t offset) struct iovec *iov, int iovcnt, size_t len, off_t offset)
{ {
if ((iovcnt != 1) || (iov->iov_len != len)) if ((iovcnt != 1) || (iov->iov_len != len))
@ -141,12 +143,13 @@ blockdev_malloc_writev(struct malloc_disk *mdisk, struct copy_task *copy_req,
SPDK_TRACELOG(SPDK_TRACE_MALLOC, "wrote %lu bytes to offset %#lx from %p\n", SPDK_TRACELOG(SPDK_TRACE_MALLOC, "wrote %lu bytes to offset %#lx from %p\n",
iov->iov_len, offset, iov->iov_base); iov->iov_len, offset, iov->iov_base);
return spdk_copy_submit(copy_req, mdisk->malloc_buf + offset, return spdk_copy_submit(copy_req, ch, mdisk->malloc_buf + offset,
iov->iov_base, len, malloc_done); iov->iov_base, len, malloc_done);
} }
static int static int
blockdev_malloc_unmap(struct malloc_disk *mdisk, blockdev_malloc_unmap(struct malloc_disk *mdisk,
struct spdk_io_channel *ch,
struct copy_task *copy_req, struct copy_task *copy_req,
struct spdk_scsi_unmap_bdesc *unmap_d, struct spdk_scsi_unmap_bdesc *unmap_d,
uint16_t bdesc_count) uint16_t bdesc_count)
@ -171,13 +174,7 @@ blockdev_malloc_unmap(struct malloc_disk *mdisk,
return -1; return -1;
} }
return spdk_copy_submit_fill(copy_req, mdisk->malloc_buf + offset, 0, byte_count, malloc_done); return spdk_copy_submit_fill(copy_req, ch, mdisk->malloc_buf + offset, 0, byte_count, malloc_done);
}
static int
blockdev_malloc_check_io(struct spdk_bdev *bdev)
{
return spdk_copy_check_io();
} }
static int64_t static int64_t
@ -210,6 +207,7 @@ static int _blockdev_malloc_submit_request(struct spdk_bdev_io *bdev_io)
} }
return blockdev_malloc_read((struct malloc_disk *)bdev_io->ctx, return blockdev_malloc_read((struct malloc_disk *)bdev_io->ctx,
bdev_io->ch,
(struct copy_task *)bdev_io->driver_ctx, (struct copy_task *)bdev_io->driver_ctx,
bdev_io->u.read.buf, bdev_io->u.read.buf,
bdev_io->u.read.nbytes, bdev_io->u.read.nbytes,
@ -217,6 +215,7 @@ static int _blockdev_malloc_submit_request(struct spdk_bdev_io *bdev_io)
case SPDK_BDEV_IO_TYPE_WRITE: case SPDK_BDEV_IO_TYPE_WRITE:
return blockdev_malloc_writev((struct malloc_disk *)bdev_io->ctx, return blockdev_malloc_writev((struct malloc_disk *)bdev_io->ctx,
bdev_io->ch,
(struct copy_task *)bdev_io->driver_ctx, (struct copy_task *)bdev_io->driver_ctx,
bdev_io->u.write.iovs, bdev_io->u.write.iovs,
bdev_io->u.write.iovcnt, bdev_io->u.write.iovcnt,
@ -235,6 +234,7 @@ static int _blockdev_malloc_submit_request(struct spdk_bdev_io *bdev_io)
case SPDK_BDEV_IO_TYPE_UNMAP: case SPDK_BDEV_IO_TYPE_UNMAP:
return blockdev_malloc_unmap((struct malloc_disk *)bdev_io->ctx, return blockdev_malloc_unmap((struct malloc_disk *)bdev_io->ctx,
bdev_io->ch,
(struct copy_task *)bdev_io->driver_ctx, (struct copy_task *)bdev_io->driver_ctx,
bdev_io->u.unmap.unmap_bdesc, bdev_io->u.unmap.unmap_bdesc,
bdev_io->u.unmap.bdesc_count); bdev_io->u.unmap.bdesc_count);
@ -275,7 +275,6 @@ blockdev_malloc_get_io_channel(struct spdk_bdev *bdev, uint32_t priority)
static const struct spdk_bdev_fn_table malloc_fn_table = { static const struct spdk_bdev_fn_table malloc_fn_table = {
.destruct = blockdev_malloc_destruct, .destruct = blockdev_malloc_destruct,
.check_io = blockdev_malloc_check_io,
.submit_request = blockdev_malloc_submit_request, .submit_request = blockdev_malloc_submit_request,
.io_type_supported = blockdev_malloc_io_type_supported, .io_type_supported = blockdev_malloc_io_type_supported,
.get_io_channel = blockdev_malloc_get_io_channel, .get_io_channel = blockdev_malloc_get_io_channel,

View File

@ -79,7 +79,6 @@ struct nvme_blockdev {
struct spdk_bdev disk; struct spdk_bdev disk;
struct spdk_nvme_ctrlr *ctrlr; struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_ns *ns; struct spdk_nvme_ns *ns;
struct spdk_nvme_qpair *qpair;
uint64_t lba_start; uint64_t lba_start;
uint64_t lba_end; uint64_t lba_end;
uint64_t blocklen; uint64_t blocklen;
@ -139,15 +138,17 @@ SPDK_BDEV_MODULE_REGISTER(nvme_library_init, NULL, blockdev_nvme_get_spdk_runnin
nvme_get_ctx_size) nvme_get_ctx_size)
static int64_t static int64_t
blockdev_nvme_read(struct nvme_blockdev *nbdev, struct nvme_blockio *bio, blockdev_nvme_read(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch,
struct nvme_blockio *bio,
void *buf, uint64_t nbytes, off_t offset) void *buf, uint64_t nbytes, off_t offset)
{ {
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
int64_t rc; int64_t rc;
SPDK_TRACELOG(SPDK_TRACE_NVME, "read %lu bytes with offset %#lx to %p\n", SPDK_TRACELOG(SPDK_TRACE_NVME, "read %lu bytes with offset %#lx to %p\n",
nbytes, offset, buf); nbytes, offset, buf);
rc = nvme_queue_cmd(nbdev, nbdev->qpair, bio, BDEV_DISK_READ, buf, nbytes, offset); rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_READ, buf, nbytes, offset);
if (rc < 0) if (rc < 0)
return -1; return -1;
@ -155,9 +156,11 @@ blockdev_nvme_read(struct nvme_blockdev *nbdev, struct nvme_blockio *bio,
} }
static int64_t static int64_t
blockdev_nvme_writev(struct nvme_blockdev *nbdev, struct nvme_blockio *bio, blockdev_nvme_writev(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch,
struct nvme_blockio *bio,
struct iovec *iov, int iovcnt, size_t len, off_t offset) struct iovec *iov, int iovcnt, size_t len, off_t offset)
{ {
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
int64_t rc; int64_t rc;
if ((iovcnt != 1) || (iov->iov_len != len)) if ((iovcnt != 1) || (iov->iov_len != len))
@ -166,7 +169,7 @@ blockdev_nvme_writev(struct nvme_blockdev *nbdev, struct nvme_blockio *bio,
SPDK_TRACELOG(SPDK_TRACE_NVME, "write %lu bytes with offset %#lx from %p\n", SPDK_TRACELOG(SPDK_TRACE_NVME, "write %lu bytes with offset %#lx from %p\n",
iov->iov_len, offset, iov->iov_base); iov->iov_len, offset, iov->iov_base);
rc = nvme_queue_cmd(nbdev, nbdev->qpair, bio, BDEV_DISK_WRITE, (void *)iov->iov_base, rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_WRITE, (void *)iov->iov_base,
iov->iov_len, offset); iov->iov_len, offset);
if (rc < 0) if (rc < 0)
return -1; return -1;
@ -182,16 +185,6 @@ blockdev_nvme_poll(void *arg)
spdk_nvme_qpair_process_completions(qpair, 0); spdk_nvme_qpair_process_completions(qpair, 0);
} }
static int
blockdev_nvme_check_io(struct spdk_bdev *bdev)
{
struct nvme_blockdev *nbdev = (struct nvme_blockdev *)bdev;
blockdev_nvme_poll(nbdev->qpair);
return 0;
}
static int static int
blockdev_nvme_destruct(struct spdk_bdev *bdev) blockdev_nvme_destruct(struct spdk_bdev *bdev)
{ {
@ -224,7 +217,8 @@ blockdev_nvme_reset(struct nvme_blockdev *nbdev, struct nvme_blockio *bio)
} }
static int static int
blockdev_nvme_unmap(struct nvme_blockdev *nbdev, struct nvme_blockio *bio, blockdev_nvme_unmap(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch,
struct nvme_blockio *bio,
struct spdk_scsi_unmap_bdesc *umap_d, struct spdk_scsi_unmap_bdesc *umap_d,
uint16_t bdesc_count); uint16_t bdesc_count);
@ -233,6 +227,7 @@ static void blockdev_nvme_get_rbuf_cb(struct spdk_bdev_io *bdev_io)
int ret; int ret;
ret = blockdev_nvme_read((struct nvme_blockdev *)bdev_io->ctx, ret = blockdev_nvme_read((struct nvme_blockdev *)bdev_io->ctx,
bdev_io->ch,
(struct nvme_blockio *)bdev_io->driver_ctx, (struct nvme_blockio *)bdev_io->driver_ctx,
bdev_io->u.read.buf, bdev_io->u.read.buf,
bdev_io->u.read.nbytes, bdev_io->u.read.nbytes,
@ -252,6 +247,7 @@ static int _blockdev_nvme_submit_request(struct spdk_bdev_io *bdev_io)
case SPDK_BDEV_IO_TYPE_WRITE: case SPDK_BDEV_IO_TYPE_WRITE:
return blockdev_nvme_writev((struct nvme_blockdev *)bdev_io->ctx, return blockdev_nvme_writev((struct nvme_blockdev *)bdev_io->ctx,
bdev_io->ch,
(struct nvme_blockio *)bdev_io->driver_ctx, (struct nvme_blockio *)bdev_io->driver_ctx,
bdev_io->u.write.iovs, bdev_io->u.write.iovs,
bdev_io->u.write.iovcnt, bdev_io->u.write.iovcnt,
@ -260,6 +256,7 @@ static int _blockdev_nvme_submit_request(struct spdk_bdev_io *bdev_io)
case SPDK_BDEV_IO_TYPE_UNMAP: case SPDK_BDEV_IO_TYPE_UNMAP:
return blockdev_nvme_unmap((struct nvme_blockdev *)bdev_io->ctx, return blockdev_nvme_unmap((struct nvme_blockdev *)bdev_io->ctx,
bdev_io->ch,
(struct nvme_blockio *)bdev_io->driver_ctx, (struct nvme_blockio *)bdev_io->driver_ctx,
bdev_io->u.unmap.unmap_bdesc, bdev_io->u.unmap.unmap_bdesc,
bdev_io->u.unmap.bdesc_count); bdev_io->u.unmap.bdesc_count);
@ -345,7 +342,6 @@ blockdev_nvme_get_io_channel(struct spdk_bdev *bdev, uint32_t priority)
static const struct spdk_bdev_fn_table nvmelib_fn_table = { static const struct spdk_bdev_fn_table nvmelib_fn_table = {
.destruct = blockdev_nvme_destruct, .destruct = blockdev_nvme_destruct,
.check_io = blockdev_nvme_check_io,
.submit_request = blockdev_nvme_submit_request, .submit_request = blockdev_nvme_submit_request,
.io_type_supported = blockdev_nvme_io_type_supported, .io_type_supported = blockdev_nvme_io_type_supported,
.get_io_channel = blockdev_nvme_get_io_channel, .get_io_channel = blockdev_nvme_get_io_channel,
@ -607,13 +603,6 @@ nvme_ctrlr_initialize_blockdevs(struct spdk_nvme_ctrlr *ctrlr, int bdev_per_ns,
snprintf(bdev->disk.product_name, SPDK_BDEV_MAX_PRODUCT_NAME_LENGTH, snprintf(bdev->disk.product_name, SPDK_BDEV_MAX_PRODUCT_NAME_LENGTH,
"NVMe disk"); "NVMe disk");
bdev->qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, 0);
if (!bdev->qpair) {
SPDK_ERRLOG("Could not allocate I/O queue pair for %s\n",
bdev->disk.name);
continue;
}
if (cdata->oncs.dsm) { if (cdata->oncs.dsm) {
/* /*
* Enable the thin provisioning * Enable the thin provisioning
@ -686,10 +675,12 @@ nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair,
} }
static int static int
blockdev_nvme_unmap(struct nvme_blockdev *nbdev, struct nvme_blockio *bio, blockdev_nvme_unmap(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch,
struct nvme_blockio *bio,
struct spdk_scsi_unmap_bdesc *unmap_d, struct spdk_scsi_unmap_bdesc *unmap_d,
uint16_t bdesc_count) uint16_t bdesc_count)
{ {
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
int rc = 0, i; int rc = 0, i;
for (i = 0; i < bdesc_count; i++) { for (i = 0; i < bdesc_count; i++) {
@ -699,7 +690,7 @@ blockdev_nvme_unmap(struct nvme_blockdev *nbdev, struct nvme_blockio *bio,
unmap_d++; unmap_d++;
} }
rc = spdk_nvme_ns_cmd_deallocate(nbdev->ns, nbdev->qpair, bio->dsm_range, bdesc_count, rc = spdk_nvme_ns_cmd_deallocate(nbdev->ns, nvme_ch->qpair, bio->dsm_range, bdesc_count,
queued_done, bio); queued_done, bio);
if (rc != 0) if (rc != 0)

View File

@ -70,21 +70,6 @@ spdk_memcpy_register(struct spdk_copy_engine *copy_engine)
mem_copy_engine = copy_engine; mem_copy_engine = copy_engine;
} }
static int
spdk_has_copy_engine(void)
{
return (hw_copy_engine == NULL) ? 0 : 1;
}
int
spdk_copy_check_io(void)
{
if (spdk_has_copy_engine())
hw_copy_engine->check_io();
return 0;
}
static void static void
copy_engine_done(void *ref, int status) copy_engine_done(void *ref, int status)
{ {
@ -94,41 +79,32 @@ copy_engine_done(void *ref, int status)
} }
int64_t int64_t
spdk_copy_submit(struct copy_task *copy_req, void *dst, void *src, spdk_copy_submit(struct copy_task *copy_req, struct spdk_io_channel *ch,
uint64_t nbytes, copy_completion_cb cb) void *dst, void *src, uint64_t nbytes, copy_completion_cb cb)
{ {
struct copy_task *req = copy_req; struct copy_task *req = copy_req;
struct copy_io_channel *copy_ch = spdk_io_channel_get_ctx(ch);
req->cb = cb; req->cb = cb;
return copy_ch->engine->copy(req->offload_ctx, copy_ch->ch, dst, src, nbytes,
if (spdk_has_copy_engine())
return hw_copy_engine->copy(req->offload_ctx, dst, src, nbytes,
copy_engine_done);
return mem_copy_engine->copy(req->offload_ctx, dst, src, nbytes,
copy_engine_done); copy_engine_done);
} }
int64_t int64_t
spdk_copy_submit_fill(struct copy_task *copy_req, void *dst, uint8_t fill, spdk_copy_submit_fill(struct copy_task *copy_req, struct spdk_io_channel *ch,
uint64_t nbytes, copy_completion_cb cb) void *dst, uint8_t fill, uint64_t nbytes, copy_completion_cb cb)
{ {
struct copy_task *req = copy_req; struct copy_task *req = copy_req;
struct copy_io_channel *copy_ch = spdk_io_channel_get_ctx(ch);
req->cb = cb; req->cb = cb;
return copy_ch->engine->fill(req->offload_ctx, copy_ch->ch, dst, fill, nbytes,
if (hw_copy_engine && hw_copy_engine->fill) {
return hw_copy_engine->fill(req->offload_ctx, dst, fill, nbytes,
copy_engine_done);
}
return mem_copy_engine->fill(req->offload_ctx, dst, fill, nbytes,
copy_engine_done); copy_engine_done);
} }
/* memcpy default copy engine */ /* memcpy default copy engine */
static int64_t static int64_t
mem_copy_submit(void *cb_arg, void *dst, void *src, uint64_t nbytes, mem_copy_submit(void *cb_arg, struct spdk_io_channel *ch, void *dst, void *src, uint64_t nbytes,
copy_completion_cb cb) copy_completion_cb cb)
{ {
struct copy_task *copy_req; struct copy_task *copy_req;
@ -141,7 +117,7 @@ mem_copy_submit(void *cb_arg, void *dst, void *src, uint64_t nbytes,
} }
static int64_t static int64_t
mem_copy_fill(void *cb_arg, void *dst, uint8_t fill, uint64_t nbytes, mem_copy_fill(void *cb_arg, struct spdk_io_channel *ch, void *dst, uint8_t fill, uint64_t nbytes,
copy_completion_cb cb) copy_completion_cb cb)
{ {
struct copy_task *copy_req; struct copy_task *copy_req;

View File

@ -61,8 +61,6 @@ struct ioat_device {
static TAILQ_HEAD(, ioat_device) g_devices = TAILQ_HEAD_INITIALIZER(g_devices); static TAILQ_HEAD(, ioat_device) g_devices = TAILQ_HEAD_INITIALIZER(g_devices);
static int g_unbindfromkernel = 0; static int g_unbindfromkernel = 0;
static int g_ioat_channel_count = 0;
static struct spdk_ioat_chan *g_ioat_chan[RTE_MAX_LCORE];
static pthread_mutex_t g_ioat_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t g_ioat_mutex = PTHREAD_MUTEX_INITIALIZER;
struct ioat_whitelist { struct ioat_whitelist {
@ -164,32 +162,32 @@ ioat_done(void *cb_arg)
} }
static int64_t static int64_t
ioat_copy_submit(void *cb_arg, void *dst, void *src, uint64_t nbytes, ioat_copy_submit(void *cb_arg, struct spdk_io_channel *ch, void *dst, void *src, uint64_t nbytes,
copy_completion_cb cb) copy_completion_cb cb)
{ {
struct ioat_task *ioat_task = (struct ioat_task *)cb_arg; struct ioat_task *ioat_task = (struct ioat_task *)cb_arg;
struct spdk_ioat_chan *chan = g_ioat_chan[rte_lcore_id()]; struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
RTE_VERIFY(chan != NULL); RTE_VERIFY(ioat_ch->ioat_ch != NULL);
ioat_task->cb = cb; ioat_task->cb = cb;
return spdk_ioat_submit_copy(chan, ioat_task, ioat_done, dst, src, nbytes); return spdk_ioat_submit_copy(ioat_ch->ioat_ch, ioat_task, ioat_done, dst, src, nbytes);
} }
static int64_t static int64_t
ioat_copy_submit_fill(void *cb_arg, void *dst, uint8_t fill, uint64_t nbytes, ioat_copy_submit_fill(void *cb_arg, struct spdk_io_channel *ch, void *dst, uint8_t fill,
copy_completion_cb cb) uint64_t nbytes, copy_completion_cb cb)
{ {
struct ioat_task *ioat_task = (struct ioat_task *)cb_arg; struct ioat_task *ioat_task = (struct ioat_task *)cb_arg;
struct spdk_ioat_chan *chan = g_ioat_chan[rte_lcore_id()]; struct ioat_io_channel *ioat_ch = spdk_io_channel_get_ctx(ch);
uint64_t fill64 = 0x0101010101010101ULL * fill; uint64_t fill64 = 0x0101010101010101ULL * fill;
RTE_VERIFY(chan != NULL); RTE_VERIFY(ioat_ch->ioat_ch != NULL);
ioat_task->cb = cb; ioat_task->cb = cb;
return spdk_ioat_submit_fill(chan, ioat_task, ioat_done, dst, fill64, nbytes); return spdk_ioat_submit_fill(ioat_ch->ioat_ch, ioat_task, ioat_done, dst, fill64, nbytes);
} }
static void static void
@ -200,21 +198,11 @@ ioat_poll(void *arg)
spdk_ioat_process_events(chan); spdk_ioat_process_events(chan);
} }
static void
ioat_check_io(void)
{
struct spdk_ioat_chan *chan = g_ioat_chan[rte_lcore_id()];
RTE_VERIFY(chan != NULL);
ioat_poll(chan);
}
static struct spdk_io_channel *ioat_get_io_channel(uint32_t priority); static struct spdk_io_channel *ioat_get_io_channel(uint32_t priority);
static struct spdk_copy_engine ioat_copy_engine = { static struct spdk_copy_engine ioat_copy_engine = {
.copy = ioat_copy_submit, .copy = ioat_copy_submit,
.fill = ioat_copy_submit_fill, .fill = ioat_copy_submit_fill,
.check_io = ioat_check_io,
.get_io_channel = ioat_get_io_channel, .get_io_channel = ioat_get_io_channel,
}; };
@ -315,7 +303,6 @@ attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_ioat_chan *
dev->ioat = ioat; dev->ioat = ioat;
TAILQ_INSERT_TAIL(&g_devices, dev, tailq); TAILQ_INSERT_TAIL(&g_devices, dev, tailq);
g_ioat_channel_count++;
} }
static int static int
@ -325,7 +312,6 @@ copy_engine_ioat_init(void)
const char *val, *pci_bdf; const char *val, *pci_bdf;
int i; int i;
struct ioat_probe_ctx probe_ctx = {}; struct ioat_probe_ctx probe_ctx = {};
int lcore;
if (sp != NULL) { if (sp != NULL) {
val = spdk_conf_section_get_val(sp, "Disable"); val = spdk_conf_section_get_val(sp, "Disable");
@ -360,21 +346,6 @@ copy_engine_ioat_init(void)
return -1; return -1;
} }
/* We only handle the case where we have enough channels */
if (g_ioat_channel_count < spdk_app_get_core_count()) {
SPDK_ERRLOG("Not enough IOAT channels for all cores\n");
copy_engine_ioat_exit();
return 0;
}
/* Assign channels to lcores in the active core mask */
/* we use u64 as CPU core mask */
for (lcore = 0; lcore < RTE_MAX_LCORE && lcore < 64; lcore++) {
if ((spdk_app_get_core_mask() & (1ULL << lcore))) {
g_ioat_chan[lcore] = ioat_allocate_device()->ioat;
}
}
SPDK_NOTICELOG("Ioat Copy Engine Offload Enabled\n"); SPDK_NOTICELOG("Ioat Copy Engine Offload Enabled\n");
spdk_copy_engine_register(&ioat_copy_engine); spdk_copy_engine_register(&ioat_copy_engine);
spdk_io_device_register(&ioat_copy_engine, ioat_create_cb, ioat_destroy_cb, spdk_io_device_register(&ioat_copy_engine, ioat_create_cb, ioat_destroy_cb,

View File

@ -381,7 +381,8 @@ nvmf_virtual_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
} }
static int static int
nvmf_virtual_ctrlr_rw_cmd(struct spdk_bdev *bdev, struct spdk_nvmf_request *req) nvmf_virtual_ctrlr_rw_cmd(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
struct spdk_nvmf_request *req)
{ {
uint64_t lba_address; uint64_t lba_address;
uint64_t blockcnt; uint64_t blockcnt;
@ -413,14 +414,14 @@ nvmf_virtual_ctrlr_rw_cmd(struct spdk_bdev *bdev, struct spdk_nvmf_request *req)
if (cmd->opc == SPDK_NVME_OPC_READ) { if (cmd->opc == SPDK_NVME_OPC_READ) {
spdk_trace_record(TRACE_NVMF_LIB_READ_START, 0, 0, (uint64_t)req, 0); spdk_trace_record(TRACE_NVMF_LIB_READ_START, 0, 0, (uint64_t)req, 0);
if (spdk_bdev_read(bdev, req->data, offset, req->length, nvmf_virtual_ctrlr_complete_cmd, if (spdk_bdev_read(bdev, ch, req->data, offset, req->length, nvmf_virtual_ctrlr_complete_cmd,
req) == NULL) { req) == NULL) {
response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} }
} else { } else {
spdk_trace_record(TRACE_NVMF_LIB_WRITE_START, 0, 0, (uint64_t)req, 0); spdk_trace_record(TRACE_NVMF_LIB_WRITE_START, 0, 0, (uint64_t)req, 0);
if (spdk_bdev_write(bdev, req->data, offset, req->length, nvmf_virtual_ctrlr_complete_cmd, if (spdk_bdev_write(bdev, ch, req->data, offset, req->length, nvmf_virtual_ctrlr_complete_cmd,
req) == NULL) { req) == NULL) {
response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
@ -431,14 +432,15 @@ nvmf_virtual_ctrlr_rw_cmd(struct spdk_bdev *bdev, struct spdk_nvmf_request *req)
} }
static int static int
nvmf_virtual_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_nvmf_request *req) nvmf_virtual_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
struct spdk_nvmf_request *req)
{ {
uint64_t nbytes; uint64_t nbytes;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
nbytes = bdev->blockcnt * bdev->blocklen; nbytes = bdev->blockcnt * bdev->blocklen;
if (spdk_bdev_flush(bdev, 0, nbytes, nvmf_virtual_ctrlr_complete_cmd, req) == NULL) { if (spdk_bdev_flush(bdev, ch, 0, nbytes, nvmf_virtual_ctrlr_complete_cmd, req) == NULL) {
response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
} }
@ -446,7 +448,8 @@ nvmf_virtual_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_nvmf_request *r
} }
static int static int
nvmf_virtual_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_nvmf_request *req) nvmf_virtual_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
struct spdk_nvmf_request *req)
{ {
int i; int i;
uint32_t attribute; uint32_t attribute;
@ -477,7 +480,7 @@ nvmf_virtual_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_nvmf_request *req
to_be64(&unmap[i].lba, dsm_range[i].starting_lba); to_be64(&unmap[i].lba, dsm_range[i].starting_lba);
to_be32(&unmap[i].block_count, dsm_range[i].length); to_be32(&unmap[i].block_count, dsm_range[i].length);
} }
if (spdk_bdev_unmap(bdev, unmap, nr, nvmf_virtual_ctrlr_complete_cmd, req) == NULL) { if (spdk_bdev_unmap(bdev, ch, unmap, nr, nvmf_virtual_ctrlr_complete_cmd, req) == NULL) {
free(unmap); free(unmap);
response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
@ -496,6 +499,7 @@ nvmf_virtual_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
{ {
uint32_t nsid; uint32_t nsid;
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
struct spdk_io_channel *ch;
struct spdk_nvmf_subsystem *subsystem = req->conn->sess->subsys; struct spdk_nvmf_subsystem *subsystem = req->conn->sess->subsys;
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
@ -511,14 +515,15 @@ nvmf_virtual_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
} }
bdev = subsystem->dev.virtual.ns_list[nsid - 1]; bdev = subsystem->dev.virtual.ns_list[nsid - 1];
ch = subsystem->dev.virtual.ch[nsid - 1];
switch (cmd->opc) { switch (cmd->opc) {
case SPDK_NVME_OPC_READ: case SPDK_NVME_OPC_READ:
case SPDK_NVME_OPC_WRITE: case SPDK_NVME_OPC_WRITE:
return nvmf_virtual_ctrlr_rw_cmd(bdev, req); return nvmf_virtual_ctrlr_rw_cmd(bdev, ch, req);
case SPDK_NVME_OPC_FLUSH: case SPDK_NVME_OPC_FLUSH:
return nvmf_virtual_ctrlr_flush_cmd(bdev, req); return nvmf_virtual_ctrlr_flush_cmd(bdev, ch, req);
case SPDK_NVME_OPC_DATASET_MANAGEMENT: case SPDK_NVME_OPC_DATASET_MANAGEMENT:
return nvmf_virtual_ctrlr_dsm_cmd(bdev, req); return nvmf_virtual_ctrlr_dsm_cmd(bdev, ch, req);
default: default:
SPDK_ERRLOG("Unsupported IO command opc: %x\n", cmd->opc); SPDK_ERRLOG("Unsupported IO command opc: %x\n", cmd->opc);
response->status.sc = SPDK_NVME_SC_INVALID_OPCODE; response->status.sc = SPDK_NVME_SC_INVALID_OPCODE;

View File

@ -226,6 +226,7 @@ spdk_scsi_lun_execute_tasks(struct spdk_scsi_lun *lun)
TAILQ_FOREACH_SAFE(task, &lun->pending_tasks, scsi_link, task_tmp) { TAILQ_FOREACH_SAFE(task, &lun->pending_tasks, scsi_link, task_tmp) {
task->status = SPDK_SCSI_STATUS_GOOD; task->status = SPDK_SCSI_STATUS_GOOD;
task->ch = lun->io_channel;
spdk_trace_record(TRACE_SCSI_TASK_START, lun->dev->id, task->length, (uintptr_t)task, 0); spdk_trace_record(TRACE_SCSI_TASK_START, lun->dev->id, task->length, (uintptr_t)task, 0);
rc = spdk_bdev_scsi_execute(lun->bdev, task); rc = spdk_bdev_scsi_execute(lun->bdev, task);

View File

@ -1334,7 +1334,7 @@ spdk_bdev_scsi_read(struct spdk_bdev *bdev,
return -1; return -1;
} }
task->blockdev_io = spdk_bdev_read(bdev, task->rbuf, offset, nbytes, task->blockdev_io = spdk_bdev_read(bdev, task->ch, task->rbuf, offset, nbytes,
spdk_bdev_scsi_task_complete, task); spdk_bdev_scsi_task_complete, task);
if (!task->blockdev_io) { if (!task->blockdev_io) {
SPDK_ERRLOG("spdk_bdev_read() failed\n"); SPDK_ERRLOG("spdk_bdev_read() failed\n");
@ -1384,7 +1384,7 @@ spdk_bdev_scsi_write(struct spdk_bdev *bdev,
} }
offset += task->offset; offset += task->offset;
task->blockdev_io = spdk_bdev_writev(bdev, &task->iov, task->blockdev_io = spdk_bdev_writev(bdev, task->ch, &task->iov,
1, offset, task->length, 1, offset, task->length,
spdk_bdev_scsi_task_complete, spdk_bdev_scsi_task_complete,
task); task);
@ -1431,7 +1431,7 @@ spdk_bdev_scsi_sync(struct spdk_bdev *bdev, struct spdk_scsi_task *task,
return SPDK_SCSI_TASK_COMPLETE; return SPDK_SCSI_TASK_COMPLETE;
} }
task->blockdev_io = spdk_bdev_flush(bdev, offset, nbytes, task->blockdev_io = spdk_bdev_flush(bdev, task->ch, offset, nbytes,
spdk_bdev_scsi_task_complete, task); spdk_bdev_scsi_task_complete, task);
if (!task->blockdev_io) { if (!task->blockdev_io) {
@ -1503,7 +1503,7 @@ spdk_bdev_scsi_unmap(struct spdk_bdev *bdev,
return SPDK_SCSI_TASK_COMPLETE; return SPDK_SCSI_TASK_COMPLETE;
} }
task->blockdev_io = spdk_bdev_unmap(bdev, (struct spdk_scsi_unmap_bdesc *)&data[8], task->blockdev_io = spdk_bdev_unmap(bdev, task->ch, (struct spdk_scsi_unmap_bdesc *)&data[8],
bdesc_count, spdk_bdev_scsi_task_complete, bdesc_count, spdk_bdev_scsi_task_complete,
task); task);

View File

@ -52,6 +52,7 @@
#include "spdk/copy_engine.h" #include "spdk/copy_engine.h"
#include "spdk/endian.h" #include "spdk/endian.h"
#include "spdk/log.h" #include "spdk/log.h"
#include "spdk/io_channel.h"
struct bdevperf_task { struct bdevperf_task {
struct iovec iov; struct iovec iov;
@ -80,6 +81,7 @@ static void bdevperf_submit_single(struct io_target *target);
struct io_target { struct io_target {
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
struct spdk_io_channel *ch;
struct io_target *next; struct io_target *next;
unsigned lcore; unsigned lcore;
int io_completed; int io_completed;
@ -212,6 +214,7 @@ bdevperf_complete(spdk_event_t event)
if (!target->is_draining) { if (!target->is_draining) {
bdevperf_submit_single(target); bdevperf_submit_single(target);
} else if (target->current_queue_depth == 0) { } else if (target->current_queue_depth == 0) {
spdk_put_io_channel(target->ch);
complete = spdk_event_allocate(rte_get_master_lcore(), end_run, NULL, NULL, NULL); complete = spdk_event_allocate(rte_get_master_lcore(), end_run, NULL, NULL, NULL);
spdk_event_call(complete); spdk_event_call(complete);
} }
@ -230,7 +233,7 @@ bdevperf_unmap_complete(spdk_event_t event)
memset(task->buf, 0, g_io_size); memset(task->buf, 0, g_io_size);
/* Read the data back in */ /* Read the data back in */
spdk_bdev_read(target->bdev, NULL, spdk_bdev_read(target->bdev, target->ch, NULL,
from_be64(&bdev_io->u.unmap.unmap_bdesc->lba) * target->bdev->blocklen, from_be64(&bdev_io->u.unmap.unmap_bdesc->lba) * target->bdev->blocklen,
from_be32(&bdev_io->u.unmap.unmap_bdesc->block_count) * target->bdev->blocklen, from_be32(&bdev_io->u.unmap.unmap_bdesc->block_count) * target->bdev->blocklen,
bdevperf_complete, task); bdevperf_complete, task);
@ -260,11 +263,11 @@ bdevperf_verify_write_complete(spdk_event_t event)
to_be64(&bdesc->lba, bdev_io->u.write.offset / target->bdev->blocklen); to_be64(&bdesc->lba, bdev_io->u.write.offset / target->bdev->blocklen);
to_be32(&bdesc->block_count, bdev_io->u.write.len / target->bdev->blocklen); to_be32(&bdesc->block_count, bdev_io->u.write.len / target->bdev->blocklen);
spdk_bdev_unmap(target->bdev, bdesc, 1, bdevperf_unmap_complete, spdk_bdev_unmap(target->bdev, target->ch, bdesc, 1, bdevperf_unmap_complete,
task); task);
} else { } else {
/* Read the data back in */ /* Read the data back in */
spdk_bdev_read(target->bdev, NULL, spdk_bdev_read(target->bdev, target->ch, NULL,
bdev_io->u.write.offset, bdev_io->u.write.offset,
bdev_io->u.write.len, bdev_io->u.write.len,
bdevperf_complete, task); bdevperf_complete, task);
@ -287,11 +290,13 @@ static void
bdevperf_submit_single(struct io_target *target) bdevperf_submit_single(struct io_target *target)
{ {
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
struct spdk_io_channel *ch;
struct bdevperf_task *task = NULL; struct bdevperf_task *task = NULL;
uint64_t offset_in_ios; uint64_t offset_in_ios;
void *rbuf; void *rbuf;
bdev = target->bdev; bdev = target->bdev;
ch = target->ch;
if (rte_mempool_get(task_pool, (void **)&task) != 0 || task == NULL) { if (rte_mempool_get(task_pool, (void **)&task) != 0 || task == NULL) {
printf("Task pool allocation failed\n"); printf("Task pool allocation failed\n");
@ -313,17 +318,17 @@ bdevperf_submit_single(struct io_target *target)
memset(task->buf, rand_r(&seed) % 256, g_io_size); memset(task->buf, rand_r(&seed) % 256, g_io_size);
task->iov.iov_base = task->buf; task->iov.iov_base = task->buf;
task->iov.iov_len = g_io_size; task->iov.iov_len = g_io_size;
spdk_bdev_writev(bdev, &task->iov, 1, offset_in_ios * g_io_size, g_io_size, spdk_bdev_writev(bdev, ch, &task->iov, 1, offset_in_ios * g_io_size, g_io_size,
bdevperf_verify_write_complete, task); bdevperf_verify_write_complete, task);
} else if ((g_rw_percentage == 100) || } else if ((g_rw_percentage == 100) ||
(g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) { (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) {
rbuf = g_zcopy ? NULL : task->buf; rbuf = g_zcopy ? NULL : task->buf;
spdk_bdev_read(bdev, rbuf, offset_in_ios * g_io_size, g_io_size, spdk_bdev_read(bdev, ch, rbuf, offset_in_ios * g_io_size, g_io_size,
bdevperf_complete, task); bdevperf_complete, task);
} else { } else {
task->iov.iov_base = task->buf; task->iov.iov_base = task->buf;
task->iov.iov_len = g_io_size; task->iov.iov_len = g_io_size;
spdk_bdev_writev(bdev, &task->iov, 1, offset_in_ios * g_io_size, g_io_size, spdk_bdev_writev(bdev, ch, &task->iov, 1, offset_in_ios * g_io_size, g_io_size,
bdevperf_complete, task); bdevperf_complete, task);
} }
@ -392,6 +397,8 @@ bdevperf_submit_on_core(spdk_event_t event)
/* Submit initial I/O for each block device. Each time one /* Submit initial I/O for each block device. Each time one
* completes, another will be submitted. */ * completes, another will be submitted. */
while (target != NULL) { while (target != NULL) {
target->ch = spdk_bdev_get_io_channel(target->bdev, SPDK_IO_PRIORITY_DEFAULT);
/* Start a timer to stop this I/O chain when the run is over */ /* Start a timer to stop this I/O chain when the run is over */
rte_timer_reset(&target->run_timer, rte_get_timer_hz() * g_time_in_sec, SINGLE, rte_timer_reset(&target->run_timer, rte_get_timer_hz() * g_time_in_sec, SINGLE,
target->lcore, end_target, target); target->lcore, end_target, target);

View File

@ -132,7 +132,7 @@ spdk_scsi_task_build_sense_data(struct spdk_scsi_task *task, int sk, int asc, in
} }
struct spdk_bdev_io * struct spdk_bdev_io *
spdk_bdev_read(struct spdk_bdev *bdev, spdk_bdev_read(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
void *buf, uint64_t offset, uint64_t nbytes, void *buf, uint64_t offset, uint64_t nbytes,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {
@ -140,7 +140,7 @@ spdk_bdev_read(struct spdk_bdev *bdev,
} }
struct spdk_bdev_io * struct spdk_bdev_io *
spdk_bdev_writev(struct spdk_bdev *bdev, spdk_bdev_writev(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, struct iovec *iov, int iovcnt,
uint64_t offset, uint64_t len, uint64_t offset, uint64_t len,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
@ -149,7 +149,7 @@ spdk_bdev_writev(struct spdk_bdev *bdev,
} }
struct spdk_bdev_io * struct spdk_bdev_io *
spdk_bdev_unmap(struct spdk_bdev *bdev, spdk_bdev_unmap(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
struct spdk_scsi_unmap_bdesc *unmap_d, struct spdk_scsi_unmap_bdesc *unmap_d,
uint16_t bdesc_count, uint16_t bdesc_count,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
@ -165,7 +165,7 @@ spdk_bdev_reset(struct spdk_bdev *bdev, enum spdk_bdev_reset_type reset_type,
} }
struct spdk_bdev_io * struct spdk_bdev_io *
spdk_bdev_flush(struct spdk_bdev *bdev, spdk_bdev_flush(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
uint64_t offset, uint64_t length, uint64_t offset, uint64_t length,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg)
{ {