bdev: enable IO vector operations

This patch enables vector operation for bdev drivers aio, malloc and
nvme.
The rbd driver still handle only one vector.

Change-Id: Ie2c1f6853bfd54ebd8039df9a0305854ca3297b9
Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
This commit is contained in:
Pawel Wodkowski 2016-10-04 16:39:27 +02:00
parent f1fcdeb341
commit 113f8e23a7
8 changed files with 273 additions and 105 deletions

View File

@ -189,11 +189,17 @@ struct spdk_bdev_io {
/** The unaligned rbuf originally allocated. */
void *buf_unaligned;
/** For single buffer cases, pointer to the aligned data buffer. */
void *buf;
/** For basic read case, use our own iovec element. */
struct iovec iov;
/** For single buffer cases, size of the data buffer. */
uint64_t nbytes;
/** For SG buffer cases, array of iovecs to transfer. */
struct iovec *iovs;
/** For SG buffer cases, number of iovecs in iovec array. */
int iovcnt;
/** For SG buffer cases, total size of data to be transferred. */
size_t len;
/** Starting offset (in bytes) of the blockdev for this I/O. */
uint64_t offset;
@ -279,6 +285,11 @@ bool spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type
struct spdk_bdev_io *spdk_bdev_read(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
void *buf, uint64_t offset, uint64_t nbytes,
spdk_bdev_io_completion_cb cb, void *cb_arg);
struct spdk_bdev_io *
spdk_bdev_readv(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt,
uint64_t offset, uint64_t nbytes,
spdk_bdev_io_completion_cb cb, void *cb_arg);
struct spdk_bdev_io *spdk_bdev_write(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
void *buf, uint64_t offset, uint64_t nbytes,
spdk_bdev_io_completion_cb cb, void *cb_arg);

View File

@ -98,24 +98,20 @@ blockdev_aio_close(struct file_disk *disk)
}
static int64_t
blockdev_aio_read(struct file_disk *fdisk, struct spdk_io_channel *ch,
struct blockdev_aio_task *aio_task, void *buf, uint64_t nbytes, uint64_t offset)
blockdev_aio_readv(struct file_disk *fdisk, struct spdk_io_channel *ch,
struct blockdev_aio_task *aio_task,
struct iovec *iov, int iovcnt, uint64_t nbytes, uint64_t offset)
{
struct iocb *iocb = &aio_task->iocb;
struct blockdev_aio_io_channel *aio_ch = spdk_io_channel_get_ctx(ch);
int rc;
iocb->aio_fildes = fdisk->fd;
iocb->aio_reqprio = 0;
iocb->aio_lio_opcode = IO_CMD_PREAD;
iocb->u.c.buf = buf;
iocb->u.c.nbytes = nbytes;
iocb->u.c.offset = offset;
io_prep_preadv(iocb, fdisk->fd, iov, iovcnt, offset);
iocb->data = aio_task;
aio_task->len = nbytes;
SPDK_TRACELOG(SPDK_TRACE_AIO, "read from %p of size %lu to off: %#lx\n",
buf, nbytes, offset);
SPDK_TRACELOG(SPDK_TRACE_AIO, "read %d iovs size %lu to off: %#lx\n",
iovcnt, nbytes, offset);
rc = io_submit(aio_ch->io_ctx, 1, &iocb);
if (rc < 0) {
@ -135,12 +131,7 @@ blockdev_aio_writev(struct file_disk *fdisk, struct spdk_io_channel *ch,
struct blockdev_aio_io_channel *aio_ch = spdk_io_channel_get_ctx(ch);
int rc;
iocb->aio_fildes = fdisk->fd;
iocb->aio_lio_opcode = IO_CMD_PWRITEV;
iocb->aio_reqprio = 0;
iocb->u.v.vec = iov;
iocb->u.v.nr = iovcnt;
iocb->u.v.offset = offset;
io_prep_pwritev(iocb, fdisk->fd, iov, iovcnt, offset);
iocb->data = aio_task;
aio_task->len = len;
@ -245,12 +236,13 @@ static void blockdev_aio_get_rbuf_cb(struct spdk_bdev_io *bdev_io)
{
int ret = 0;
ret = blockdev_aio_read((struct file_disk *)bdev_io->ctx,
bdev_io->ch,
(struct blockdev_aio_task *)bdev_io->driver_ctx,
bdev_io->u.read.buf,
bdev_io->u.read.nbytes,
bdev_io->u.read.offset);
ret = blockdev_aio_readv((struct file_disk *)bdev_io->ctx,
bdev_io->ch,
(struct blockdev_aio_task *)bdev_io->driver_ctx,
bdev_io->u.read.iovs,
bdev_io->u.read.iovcnt,
bdev_io->u.read.len,
bdev_io->u.read.offset);
if (ret < 0) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);

View File

@ -112,8 +112,11 @@ spdk_bdev_io_set_rbuf(struct spdk_bdev_io *bdev_io, void *buf)
{
assert(bdev_io->get_rbuf_cb != NULL);
assert(buf != NULL);
assert(bdev_io->u.read.iovs != NULL);
bdev_io->u.read.buf_unaligned = buf;
bdev_io->u.read.buf = (void *)((unsigned long)((char *)buf + 512) & ~511UL);
bdev_io->u.read.iovs[0].iov_base = (void *)((unsigned long)((char *)buf + 512) & ~511UL);
bdev_io->u.read.iovs[0].iov_len = bdev_io->u.read.len;
bdev_io->u.read.put_rbuf = true;
bdev_io->get_rbuf_cb(bdev_io);
}
@ -127,7 +130,9 @@ spdk_bdev_io_put_rbuf(struct spdk_bdev_io *bdev_io)
need_rbuf_tailq_t *tailq;
uint64_t length;
length = bdev_io->u.read.nbytes;
assert(bdev_io->u.read.iovcnt == 1);
length = bdev_io->u.read.len;
buf = bdev_io->u.read.buf_unaligned;
if (length <= SPDK_BDEV_SMALL_RBUF_MAX_SIZE) {
@ -360,7 +365,7 @@ spdk_bdev_put_io(struct spdk_bdev_io *bdev_io)
static void
_spdk_bdev_io_get_rbuf(struct spdk_bdev_io *bdev_io)
{
uint64_t len = bdev_io->u.read.nbytes;
uint64_t len = bdev_io->u.read.len;
struct rte_mempool *pool;
need_rbuf_tailq_t *tailq;
int rc;
@ -543,9 +548,62 @@ spdk_bdev_read(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
bdev_io->ch = ch;
bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
bdev_io->u.read.buf = buf;
bdev_io->u.read.nbytes = nbytes;
bdev_io->u.read.iov.iov_base = buf;
bdev_io->u.read.iov.iov_len = nbytes;
bdev_io->u.read.iovs = &bdev_io->u.read.iov;
bdev_io->u.read.iovcnt = 1;
bdev_io->u.read.len = nbytes;
bdev_io->u.read.offset = offset;
bdev_io->u.read.put_rbuf = false;
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
rc = spdk_bdev_io_submit(bdev_io);
if (rc < 0) {
spdk_bdev_put_io(bdev_io);
return NULL;
}
return bdev_io;
}
struct spdk_bdev_io *
spdk_bdev_readv(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt,
uint64_t offset, uint64_t nbytes,
spdk_bdev_io_completion_cb cb, void *cb_arg)
{
struct spdk_bdev_io *bdev_io;
int rc;
/* Return failure if nbytes is not a multiple of bdev->blocklen */
if (nbytes % bdev->blocklen) {
return NULL;
}
/* Return failure if offset + nbytes is less than offset; indicates there
* has been an overflow and hence the offset has been wrapped around */
if ((offset + nbytes) < offset) {
return NULL;
}
/* Return failure if offset + nbytes exceeds the size of the blockdev */
if ((offset + nbytes) > (bdev->blockcnt * bdev->blocklen)) {
return NULL;
}
bdev_io = spdk_bdev_get_io();
if (!bdev_io) {
SPDK_ERRLOG("spdk_bdev_io memory allocation failed duing read\n");
return NULL;
}
bdev_io->ch = ch;
bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
bdev_io->u.read.iovs = iov;
bdev_io->u.read.iovcnt = iovcnt;
bdev_io->u.read.len = nbytes;
bdev_io->u.read.offset = offset;
bdev_io->u.read.put_rbuf = false;
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
rc = spdk_bdev_io_submit(bdev_io);
@ -832,8 +890,9 @@ void
spdk_bdev_io_get_rbuf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_rbuf_cb cb)
{
assert(cb != NULL);
assert(bdev_io->u.read.iovs != NULL);
if (bdev_io->u.read.buf == NULL) {
if (bdev_io->u.read.iovs[0].iov_base == NULL) {
bdev_io->get_rbuf_cb = cb;
_spdk_bdev_io_get_rbuf(bdev_io);
} else {

View File

@ -135,24 +135,53 @@ blockdev_malloc_destruct(struct spdk_bdev *bdev)
return 0;
}
static void
blockdev_malloc_read(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
struct malloc_task *task,
void *buf, uint64_t nbytes, uint64_t offset)
static int
blockdev_malloc_check_iov_len(struct iovec *iovs, int iovcnt, size_t nbytes)
{
int64_t rc;
int i;
SPDK_TRACELOG(SPDK_TRACE_MALLOC, "read %lu bytes from offset %#lx to %p\n",
nbytes, offset, buf);
for (i = 0; i < iovcnt; i++) {
if (nbytes < iovs[i].iov_len)
return 0;
nbytes -= iovs[i].iov_len;
}
return nbytes != 0;
}
static void
blockdev_malloc_readv(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
struct malloc_task *task,
struct iovec *iov, int iovcnt, size_t len, uint64_t offset)
{
int64_t res = 0;
void *src = mdisk->malloc_buf + offset;
int i;
if (blockdev_malloc_check_iov_len(iov, iovcnt, len)) {
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
SPDK_BDEV_IO_STATUS_FAILED);
return;
}
SPDK_TRACELOG(SPDK_TRACE_MALLOC, "read %lu bytes from offset %#lx\n",
len, offset);
task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
task->num_outstanding = 1;
task->num_outstanding = iovcnt;
rc = spdk_copy_submit(__copy_task_from_malloc_task(task), ch, buf,
mdisk->malloc_buf + offset, nbytes, malloc_done);
for (i = 0; i < iovcnt; i++) {
res = spdk_copy_submit(__copy_task_from_malloc_task(task),
ch, iov[i].iov_base,
src, iov[i].iov_len, malloc_done);
if (rc != (int64_t)nbytes) {
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), SPDK_BDEV_IO_STATUS_FAILED);
if (res != (int64_t)iov[i].iov_len) {
malloc_done(__copy_task_from_malloc_task(task), -1);
}
src += iov[i].iov_len;
len -= iov[i].iov_len;
}
}
@ -161,24 +190,33 @@ blockdev_malloc_writev(struct malloc_disk *mdisk, struct spdk_io_channel *ch,
struct malloc_task *task,
struct iovec *iov, int iovcnt, size_t len, uint64_t offset)
{
int64_t rc;
int64_t res = 0;
void *dst = mdisk->malloc_buf + offset;
int i;
if ((iovcnt != 1) || (iov->iov_len != len)) {
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), SPDK_BDEV_IO_STATUS_FAILED);
if (blockdev_malloc_check_iov_len(iov, iovcnt, len)) {
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task),
SPDK_BDEV_IO_STATUS_FAILED);
return;
}
SPDK_TRACELOG(SPDK_TRACE_MALLOC, "wrote %lu bytes to offset %#lx from %p\n",
iov->iov_len, offset, iov->iov_base);
SPDK_TRACELOG(SPDK_TRACE_MALLOC, "wrote %lu bytes to offset %#lx\n",
len, offset);
task->status = SPDK_BDEV_IO_STATUS_SUCCESS;
task->num_outstanding = 1;
task->num_outstanding = iovcnt;
rc = spdk_copy_submit(__copy_task_from_malloc_task(task), ch, mdisk->malloc_buf + offset,
iov->iov_base, len, malloc_done);
for (i = 0; i < iovcnt; i++) {
res = spdk_copy_submit(__copy_task_from_malloc_task(task),
ch, dst, iov[i].iov_base,
iov[i].iov_len, malloc_done);
if (rc != (int64_t)len) {
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(task), SPDK_BDEV_IO_STATUS_FAILED);
if (res != (int64_t)iov[i].iov_len) {
malloc_done(__copy_task_from_malloc_task(task), -1);
}
dst += iov[i].iov_len;
len -= iov[i].iov_len;
}
}
@ -237,20 +275,25 @@ static int _blockdev_malloc_submit_request(struct spdk_bdev_io *bdev_io)
{
switch (bdev_io->type) {
case SPDK_BDEV_IO_TYPE_READ:
if (bdev_io->u.read.buf == NULL) {
bdev_io->u.read.buf = ((struct malloc_disk *)bdev_io->ctx)->malloc_buf +
bdev_io->u.read.offset;
if (bdev_io->u.read.iovs[0].iov_base == NULL) {
assert(bdev_io->u.read.iovcnt == 1);
bdev_io->u.read.iovs[0].iov_base =
((struct malloc_disk *)bdev_io->ctx)->malloc_buf +
bdev_io->u.read.offset;
bdev_io->u.read.iovs[0].iov_len = bdev_io->u.read.len;
bdev_io->u.read.put_rbuf = false;
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(bdev_io->driver_ctx),
SPDK_BDEV_IO_STATUS_SUCCESS);
return 0;
}
blockdev_malloc_read((struct malloc_disk *)bdev_io->ctx,
bdev_io->ch,
(struct malloc_task *)bdev_io->driver_ctx,
bdev_io->u.read.buf,
bdev_io->u.read.nbytes,
bdev_io->u.read.offset);
blockdev_malloc_readv((struct malloc_disk *)bdev_io->ctx,
bdev_io->ch,
(struct malloc_task *)bdev_io->driver_ctx,
bdev_io->u.read.iovs,
bdev_io->u.read.iovcnt,
bdev_io->u.read.len,
bdev_io->u.read.offset);
return 0;
case SPDK_BDEV_IO_TYPE_WRITE:

View File

@ -84,7 +84,17 @@ struct nvme_io_channel {
#define NVME_DEFAULT_MAX_UNMAP_BDESC_COUNT 1
struct nvme_blockio {
int reserved;
/** array of iovecs to transfer. */
struct iovec *iovs;
/** Number of iovecs in iovs array. */
int iovcnt;
/** Current iovec position. */
int iovpos;
/** Offset in current iovec. */
uint32_t iov_offset;
};
enum data_direction {
@ -107,9 +117,10 @@ static void nvme_ctrlr_initialize_blockdevs(struct spdk_nvme_ctrlr *ctrlr,
int bdev_per_ns, int ctrlr_id);
static int nvme_library_init(void);
static void nvme_library_fini(void);
int nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair,
struct nvme_blockio *bio,
int direction, void *buf, uint64_t nbytes, uint64_t offset);
static int nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair,
struct nvme_blockio *bio,
int direction, struct iovec *iov, int iovcnt, uint64_t nbytes,
uint64_t offset);
static int
nvme_get_ctx_size(void)
@ -121,17 +132,18 @@ SPDK_BDEV_MODULE_REGISTER(nvme_library_init, NULL, blockdev_nvme_get_spdk_runnin
nvme_get_ctx_size)
static int64_t
blockdev_nvme_read(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch,
struct nvme_blockio *bio,
void *buf, uint64_t nbytes, uint64_t offset)
blockdev_nvme_readv(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch,
struct nvme_blockio *bio,
struct iovec *iov, int iovcnt, uint64_t nbytes, uint64_t offset)
{
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
int64_t rc;
SPDK_TRACELOG(SPDK_TRACE_BDEV_NVME, "read %lu bytes with offset %#lx to %p\n",
nbytes, offset, buf);
SPDK_TRACELOG(SPDK_TRACE_BDEV_NVME, "read %lu bytes with offset %#lx\n",
nbytes, offset);
rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_READ, buf, nbytes, offset);
rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_READ,
iov, iovcnt, nbytes, offset);
if (rc < 0)
return -1;
@ -146,18 +158,15 @@ blockdev_nvme_writev(struct nvme_blockdev *nbdev, struct spdk_io_channel *ch,
struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
int64_t rc;
if ((iovcnt != 1) || (iov->iov_len != len))
return -1;
SPDK_TRACELOG(SPDK_TRACE_BDEV_NVME, "write %lu bytes with offset %#lx\n",
len, offset);
SPDK_TRACELOG(SPDK_TRACE_BDEV_NVME, "write %lu bytes with offset %#lx from %p\n",
iov->iov_len, offset, iov->iov_base);
rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_WRITE, (void *)iov->iov_base,
iov->iov_len, offset);
rc = nvme_queue_cmd(nbdev, nvme_ch->qpair, bio, BDEV_DISK_WRITE,
iov, iovcnt, len, offset);
if (rc < 0)
return -1;
return iov->iov_len;
return len;
}
static void
@ -209,12 +218,13 @@ static void blockdev_nvme_get_rbuf_cb(struct spdk_bdev_io *bdev_io)
{
int ret;
ret = blockdev_nvme_read((struct nvme_blockdev *)bdev_io->ctx,
bdev_io->ch,
(struct nvme_blockio *)bdev_io->driver_ctx,
bdev_io->u.read.buf,
bdev_io->u.read.nbytes,
bdev_io->u.read.offset);
ret = blockdev_nvme_readv((struct nvme_blockdev *)bdev_io->ctx,
bdev_io->ch,
(struct nvme_blockio *)bdev_io->driver_ctx,
bdev_io->u.read.iovs,
bdev_io->u.read.iovcnt,
bdev_io->u.read.len,
bdev_io->u.read.offset);
if (ret < 0) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
@ -604,10 +614,51 @@ queued_done(void *ref, const struct spdk_nvme_cpl *cpl)
spdk_bdev_io_complete(spdk_bdev_io_from_ctx(bio), status);
}
static void
queued_reset_sgl(void *ref, uint32_t sgl_offset)
{
struct nvme_blockio *bio = ref;
struct iovec *iov;
bio->iov_offset = sgl_offset;
for (bio->iovpos = 0; bio->iovpos < bio->iovcnt; bio->iovpos++) {
iov = &bio->iovs[bio->iovpos];
if (bio->iov_offset < iov->iov_len)
break;
bio->iov_offset -= iov->iov_len;
}
}
static int
queued_next_sge(void *ref, uint64_t *address, uint32_t *length)
{
struct nvme_blockio *bio = ref;
struct iovec *iov;
assert(bio->iovpos < bio->iovcnt);
iov = &bio->iovs[bio->iovpos];
bio->iovpos++;
*address = spdk_vtophys(iov->iov_base);
*length = iov->iov_len;
if (bio->iov_offset) {
assert(bio->iov_offset <= iov->iov_len);
*address += bio->iov_offset;
*length -= bio->iov_offset;
bio->iov_offset = 0;
}
return 0;
}
int
nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair,
struct nvme_blockio *bio,
int direction, void *buf, uint64_t nbytes, uint64_t offset)
int direction, struct iovec *iov, int iovcnt, uint64_t nbytes,
uint64_t offset)
{
uint32_t ss = spdk_nvme_ns_get_sector_size(bdev->ns);
uint32_t lba_count;
@ -623,12 +674,19 @@ nvme_queue_cmd(struct nvme_blockdev *bdev, struct spdk_nvme_qpair *qpair,
lba_count = nbytes / ss;
bio->iovs = iov;
bio->iovcnt = iovcnt;
bio->iovpos = 0;
bio->iov_offset = 0;
if (direction == BDEV_DISK_READ) {
rc = spdk_nvme_ns_cmd_read(bdev->ns, qpair, buf, next_lba,
lba_count, queued_done, bio, 0);
rc = spdk_nvme_ns_cmd_readv(bdev->ns, qpair, next_lba,
lba_count, queued_done, bio, 0,
queued_reset_sgl, queued_next_sge);
} else {
rc = spdk_nvme_ns_cmd_write(bdev->ns, qpair, buf, next_lba,
lba_count, queued_done, bio, 0);
rc = spdk_nvme_ns_cmd_writev(bdev->ns, qpair, next_lba,
lba_count, queued_done, bio, 0,
queued_reset_sgl, queued_next_sge);
}
if (rc != 0) {

View File

@ -226,17 +226,20 @@ SPDK_BDEV_MODULE_REGISTER(blockdev_rbd_library_init, blockdev_rbd_library_fini,
blockdev_rbd_get_ctx_size)
static int64_t
blockdev_rbd_read(struct blockdev_rbd *disk, struct spdk_io_channel *ch,
struct blockdev_rbd_io *cmd, void *buf, size_t nbytes,
uint64_t offset)
blockdev_rbd_readv(struct blockdev_rbd *disk, struct spdk_io_channel *ch,
struct blockdev_rbd_io *cmd, struct iovec *iov,
int iovcnt, size_t len, uint64_t offset)
{
struct blockdev_rbd_io_channel *rbdio_ch = spdk_io_channel_get_ctx(ch);
if (iovcnt != 1 || iov->iov_len != len)
return -1;
cmd->ch = rbdio_ch;
cmd->direction = BLOCKDEV_RBD_READ;
cmd->len = nbytes;
cmd->len = len;
return blockdev_rbd_start_aio(rbdio_ch->image, cmd, buf, offset, nbytes);
return blockdev_rbd_start_aio(rbdio_ch->image, cmd, iov->iov_base, offset, len);
}
static int64_t
@ -277,12 +280,13 @@ static void blockdev_rbd_get_rbuf_cb(struct spdk_bdev_io *bdev_io)
{
int ret;
ret = blockdev_rbd_read(bdev_io->ctx,
bdev_io->ch,
(struct blockdev_rbd_io *)bdev_io->driver_ctx,
bdev_io->u.read.buf,
bdev_io->u.read.nbytes,
bdev_io->u.read.offset);
ret = blockdev_rbd_readv(bdev_io->ctx,
bdev_io->ch,
(struct blockdev_rbd_io *)bdev_io->driver_ctx,
bdev_io->u.read.iovs,
bdev_io->u.read.iovcnt,
bdev_io->u.read.len,
bdev_io->u.read.offset);
if (ret != 0) {
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);

View File

@ -1302,7 +1302,7 @@ spdk_bdev_scsi_task_complete(spdk_event_t event)
}
if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
task->rbuf = bdev_io->u.read.buf;
task->rbuf = bdev_io->u.read.iovs[0].iov_base;
}
spdk_scsi_lun_complete_task(task->lun, task);

View File

@ -193,7 +193,8 @@ bdevperf_complete(spdk_event_t event)
target->is_draining = true;
g_run_failed = true;
} else if (g_verify || g_reset || g_unmap) {
if (memcmp(task->buf, bdev_io->u.read.buf, g_io_size) != 0) {
assert(bdev_io->u.read.iovcnt == 1);
if (memcmp(task->buf, bdev_io->u.read.iov.iov_base, g_io_size) != 0) {
printf("Buffer mismatch! Disk Offset: %lu\n", bdev_io->u.read.offset);
target->is_draining = true;
g_run_failed = true;