blob: Add readv/writev_ext functions

These function accept optional spdk_blob_ext_io_opts
structure. If this structure is provided by the user
then readv/writev_ext ops of base dev will be used
in data path

Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Change-Id: I370dd43f8c56f5752f7a52d0780bcfe3e3ae2d9e
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11371
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
This commit is contained in:
Alexey Marchuk 2022-01-31 13:16:21 +03:00 committed by Tomasz Zawadzki
parent 8b25bfce7a
commit a236084542
9 changed files with 265 additions and 66 deletions

View File

@ -73,6 +73,11 @@ New parameters, `ctrlr_loss_timeout_sec`, `reconnect_delay_sec`, and `fast_io_fa
added to the RPC `bdev_nvme_set_options`. They can be overridden if they are given by the RPC
`bdev_nvme_attach_controller`.
### blobstore
New functions `spdk_blob_io_writev_ext` and `spdk_blob_io_readv_ext` are added. The new functions accept
`spdk_blob_ext_io_opts` structure with extended IO request options.
### event
Added `msg_mempool_size` parameter to `spdk_reactors_init` and `spdk_thread_lib_init_ext`.

View File

@ -809,6 +809,44 @@ void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg);
/**
* Write the data described by 'iov' to 'length' io_units beginning at 'offset' io_units
* into the blob. Accepts extended IO request options
*
* \param blob Blob to write.
* \param channel I/O channel used to submit requests.
* \param iov The pointer points to an array of iovec structures.
* \param iovcnt The number of buffers.
* \param offset Offset is in io units from the beginning of the blob.
* \param length Size of data in io units.
* \param cb_fn Called when the operation is complete.
* \param cb_arg Argument passed to function cb_fn.
* \param io_opts Optional extended IO request options
*/
void spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg,
struct spdk_blob_ext_io_opts *io_opts);
/**
* Read 'length' io_units starting at 'offset' io_units into the blob into the memory
* described by 'iov'. Accepts extended IO request options
*
* \param blob Blob to read.
* \param channel I/O channel used to submit requests.
* \param iov The pointer points to an array of iovec structures.
* \param iovcnt The number of buffers.
* \param offset Offset is in io units from the beginning of the blob.
* \param length Size of data in io units.
* \param cb_fn Called when the operation is complete.
* \param cb_arg Argument passed to function cb_fn.
* \param io_opts Optional extended IO request options
*/
void spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg,
struct spdk_blob_ext_io_opts *io_opts);
/**
* Unmap 'length' io_units beginning at 'offset' io_units on the blob as unused. Unmapped
* io_units may allow the underlying storage media to behave more efficiently.

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -55,6 +56,17 @@ blob_bs_dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
assert(false);
}
static void
blob_bs_dev_writev_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count,
struct spdk_bs_dev_cb_args *cb_args,
struct spdk_blob_ext_io_opts *ext_opts)
{
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, -EPERM);
assert(false);
}
static void
blob_bs_dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
uint64_t lba, uint64_t lba_count,
@ -102,6 +114,18 @@ blob_bs_dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
blob_bs_dev_read_cpl, cb_args);
}
static inline void
blob_bs_dev_readv_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt,
uint64_t lba, uint32_t lba_count, struct spdk_bs_dev_cb_args *cb_args,
struct spdk_blob_ext_io_opts *ext_opts)
{
struct spdk_blob_bs_dev *b = (struct spdk_blob_bs_dev *)dev;
spdk_blob_io_readv_ext(b->blob, channel, iov, iovcnt, lba, lba_count,
blob_bs_dev_read_cpl, cb_args, ext_opts);
}
static void
blob_bs_dev_destroy_cpl(void *cb_arg, int bserrno)
{
@ -140,8 +164,10 @@ bs_create_blob_bs_dev(struct spdk_blob *blob)
b->bs_dev.destroy = blob_bs_dev_destroy;
b->bs_dev.write = blob_bs_dev_write;
b->bs_dev.writev = blob_bs_dev_writev;
b->bs_dev.writev_ext = blob_bs_dev_writev_ext;
b->bs_dev.read = blob_bs_dev_read;
b->bs_dev.readv = blob_bs_dev_readv;
b->bs_dev.readv_ext = blob_bs_dev_readv_ext;
b->bs_dev.write_zeroes = blob_bs_dev_write_zeroes;
b->bs_dev.unmap = blob_bs_dev_unmap;
b->blob = blob;

View File

@ -2841,6 +2841,7 @@ struct rw_iov_ctx {
uint64_t io_unit_offset;
uint64_t io_units_remaining;
uint64_t io_units_done;
struct spdk_blob_ext_io_opts *ext_io_opts;
struct iovec iov[0];
};
@ -2913,18 +2914,19 @@ rw_iov_split_next(void *cb_arg, int bserrno)
iov = &ctx->iov[0];
if (ctx->read) {
spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
io_units_count, rw_iov_split_next, ctx);
spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts);
} else {
spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
io_units_count, rw_iov_split_next, ctx);
spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts);
}
}
static void
blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg, bool read)
struct iovec *iov, int iovcnt,
uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read,
struct spdk_blob_ext_io_opts *ext_io_opts)
{
struct spdk_bs_cpl cpl;
@ -2997,6 +2999,8 @@ blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_chan
return;
}
seq->ext_io_opts = ext_io_opts;
if (is_allocated) {
bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL);
} else {
@ -3013,6 +3017,8 @@ blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_chan
return;
}
seq->ext_io_opts = ext_io_opts;
bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL);
} else {
/* Queue this operation and allocate the cluster */
@ -3025,6 +3031,8 @@ blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_chan
return;
}
op->ext_io_opts = ext_io_opts;
bs_allocate_and_copy_cluster(blob, _channel, offset, op);
}
}
@ -3047,6 +3055,7 @@ blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_chan
ctx->io_unit_offset = offset;
ctx->io_units_remaining = length;
ctx->io_units_done = 0;
ctx->ext_io_opts = ext_io_opts;
rw_iov_split_next(ctx, 0);
}
@ -7576,14 +7585,32 @@ void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg)
{
blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false);
blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL);
}
void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg)
{
blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true);
blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL);
}
void
spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
{
blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false,
io_opts);
}
void
spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
{
blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true,
io_opts);
}
struct spdk_bs_iter_ctx {

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -121,6 +122,7 @@ bs_sequence_start(struct spdk_io_channel *_channel,
set->cb_args.cb_fn = bs_sequence_completion;
set->cb_args.cb_arg = set;
set->cb_args.channel = channel->dev_channel;
set->ext_io_opts = NULL;
return (spdk_bs_sequence_t *)set;
}
@ -191,8 +193,14 @@ bs_sequence_readv_bs_dev(spdk_bs_sequence_t *seq, struct spdk_bs_dev *bs_dev,
set->u.sequence.cb_fn = cb_fn;
set->u.sequence.cb_arg = cb_arg;
bs_dev->readv(bs_dev, spdk_io_channel_from_ctx(channel), iov, iovcnt, lba, lba_count,
&set->cb_args);
if (set->ext_io_opts) {
assert(bs_dev->readv_ext);
bs_dev->readv_ext(bs_dev, spdk_io_channel_from_ctx(channel), iov, iovcnt, lba, lba_count,
&set->cb_args, set->ext_io_opts);
} else {
bs_dev->readv(bs_dev, spdk_io_channel_from_ctx(channel), iov, iovcnt, lba, lba_count,
&set->cb_args);
}
}
void
@ -207,8 +215,13 @@ bs_sequence_readv_dev(spdk_bs_sequence_t *seq, struct iovec *iov, int iovcnt,
set->u.sequence.cb_fn = cb_fn;
set->u.sequence.cb_arg = cb_arg;
channel->dev->readv(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count,
&set->cb_args);
if (set->ext_io_opts) {
assert(channel->dev->readv_ext);
channel->dev->readv_ext(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count,
&set->cb_args, set->ext_io_opts);
} else {
channel->dev->readv(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count, &set->cb_args);
}
}
void
@ -225,8 +238,14 @@ bs_sequence_writev_dev(spdk_bs_sequence_t *seq, struct iovec *iov, int iovcnt,
set->u.sequence.cb_fn = cb_fn;
set->u.sequence.cb_arg = cb_arg;
channel->dev->writev(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count,
&set->cb_args);
if (set->ext_io_opts) {
assert(channel->dev->writev_ext);
channel->dev->writev_ext(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count,
&set->cb_args, set->ext_io_opts);
} else {
channel->dev->writev(channel->dev, channel->dev_channel, iov, iovcnt, lba, lba_count,
&set->cb_args);
}
}
void
@ -438,6 +457,7 @@ bs_user_op_alloc(struct spdk_io_channel *_channel, struct spdk_bs_cpl *cpl,
set->cpl = *cpl;
set->channel = channel;
set->ext_io_opts = NULL;
args = &set->u.user_op;
@ -480,14 +500,16 @@ bs_user_op_execute(spdk_bs_user_op_t *op)
set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg);
break;
case SPDK_BLOB_READV:
spdk_blob_io_readv(args->blob, ch, args->payload, args->iovcnt,
args->offset, args->length,
set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg);
spdk_blob_io_readv_ext(args->blob, ch, args->payload, args->iovcnt,
args->offset, args->length,
set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg,
set->ext_io_opts);
break;
case SPDK_BLOB_WRITEV:
spdk_blob_io_writev(args->blob, ch, args->payload, args->iovcnt,
args->offset, args->length,
set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg);
spdk_blob_io_writev_ext(args->blob, ch, args->payload, args->iovcnt,
args->offset, args->length,
set->cpl.u.blob_basic.cb_fn, set->cpl.u.blob_basic.cb_arg,
set->ext_io_opts);
break;
}
TAILQ_INSERT_TAIL(&set->channel->reqs, set, link);

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -139,7 +140,8 @@ struct spdk_bs_request_set {
void *payload; /* cast to iov for readv/writev */
} user_op;
} u;
/* Pointer to ext_io_opts passed by the user */
struct spdk_blob_ext_io_opts *ext_io_opts;
TAILQ_ENTRY(spdk_bs_request_set) link;
};

View File

@ -46,6 +46,8 @@
spdk_blob_io_read;
spdk_blob_io_writev;
spdk_blob_io_readv;
spdk_blob_io_readv_ext;
spdk_blob_io_writev_ext;
spdk_blob_io_unmap;
spdk_blob_io_write_zeroes;
spdk_bs_iter_first;

View File

@ -6252,15 +6252,42 @@ test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_c
CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
}
static inline void
test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
{
if (io_opts) {
g_dev_writev_ext_called = false;
memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
io_opts);
} else {
spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
}
poll_threads();
CU_ASSERT(g_bserrno == 0);
if (io_opts) {
CU_ASSERT(g_dev_writev_ext_called);
CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
}
}
static void
test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
bool ext_api)
{
uint8_t payload_ff[64 * 512];
uint8_t payload_aa[64 * 512];
uint8_t payload_00[64 * 512];
uint8_t *cluster0, *cluster1;
struct iovec iov[4];
struct spdk_blob_ext_io_opts ext_opts = {
.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
.memory_domain_ctx = (void *)0xf00df00d,
.size = sizeof(struct spdk_blob_ext_io_opts),
.user_ctx = (void *)123,
};
memset(payload_ff, 0xFF, sizeof(payload_ff));
memset(payload_aa, 0xAA, sizeof(payload_aa));
@ -6269,9 +6296,9 @@ test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_c
/* Try to perform I/O with io unit = 512 */
iov[0].iov_base = payload_ff;
iov[0].iov_len = 1 * 512;
spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
ext_api ? &ext_opts : NULL);
/* If thin provisioned is set cluster should be allocated now */
SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
@ -6286,9 +6313,9 @@ test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_c
/* Verify write with offset on first page */
iov[0].iov_base = payload_ff;
iov[0].iov_len = 1 * 512;
spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
ext_api ? &ext_opts : NULL);
/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
@ -6328,8 +6355,9 @@ test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_c
/* Verify write across multiple pages */
iov[0].iov_base = payload_aa;
iov[0].iov_len = 8 * 512;
spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL);
poll_threads();
test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
ext_api ? &ext_opts : NULL);
/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
@ -6343,8 +6371,9 @@ test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_c
iov[0].iov_base = payload_ff;
iov[0].iov_len = 8 * 512;
spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL);
poll_threads();
test_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL,
ext_api ? &ext_opts : NULL);
SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
@ -6366,8 +6395,9 @@ test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_c
iov[0].iov_base = payload_ff;
iov[0].iov_len = 2 * 512;
spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL);
poll_threads();
test_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL,
ext_api ? &ext_opts : NULL);
SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
@ -6387,14 +6417,41 @@ test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_c
CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
}
static inline void
test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
{
if (io_opts) {
g_dev_readv_ext_called = false;
memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
} else {
spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
}
poll_threads();
CU_ASSERT(g_bserrno == 0);
if (io_opts) {
CU_ASSERT(g_dev_readv_ext_called);
CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
}
}
static void
test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
bool ext_api)
{
uint8_t payload_read[64 * 512];
uint8_t payload_ff[64 * 512];
uint8_t payload_aa[64 * 512];
uint8_t payload_00[64 * 512];
struct iovec iov[4];
struct spdk_blob_ext_io_opts ext_opts = {
.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
.memory_domain_ctx = (void *)0xf00df00d,
.size = sizeof(struct spdk_blob_ext_io_opts),
.user_ctx = (void *)123,
};
memset(payload_ff, 0xFF, sizeof(payload_ff));
memset(payload_aa, 0xAA, sizeof(payload_aa));
@ -6407,10 +6464,9 @@ test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_ch
memset(payload_read, 0x00, sizeof(payload_read));
iov[0].iov_base = payload_read;
iov[0].iov_len = 1 * 512;
spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
@ -6422,9 +6478,8 @@ test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_ch
memset(payload_read, 0x00, sizeof(payload_read));
iov[0].iov_base = payload_read;
iov[0].iov_len = 4 * 512;
spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
@ -6441,9 +6496,8 @@ test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_ch
iov[0].iov_len = 4 * 512;
iov[1].iov_base = payload_read + 4 * 512;
iov[1].iov_len = 4 * 512;
spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
@ -6461,9 +6515,9 @@ test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_ch
iov[2].iov_len = 2 * 512;
iov[3].iov_base = payload_read + 6 * 512;
iov[3].iov_len = 2 * 512;
spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
test_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL,
ext_api ? &ext_opts : NULL);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
@ -6477,9 +6531,9 @@ test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_ch
iov[0].iov_len = 1 * 512;
iov[1].iov_base = payload_read + 1 * 512;
iov[1].iov_len = 3 * 512;
spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
test_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL,
ext_api ? &ext_opts : NULL);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
@ -6498,9 +6552,10 @@ test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_ch
iov[2].iov_len = 4 * 512;
iov[3].iov_base = payload_read + 7 * 512;
iov[3].iov_len = 25 * 512;
spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
test_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL,
ext_api ? &ext_opts : NULL);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
@ -6518,9 +6573,9 @@ test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_ch
iov[2].iov_len = 16 * 512;
iov[3].iov_base = payload_read + 25 * 512;
iov[3].iov_len = 39 * 512;
spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL);
poll_threads();
CU_ASSERT(g_bserrno == 0);
test_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL,
ext_api ? &ext_opts : NULL);
CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
@ -6579,8 +6634,12 @@ blob_io_unit(void)
test_io_read(dev, blob, channel);
test_io_zeroes(dev, blob, channel);
test_iov_write(dev, blob, channel);
test_iov_read(dev, blob, channel);
test_iov_write(dev, blob, channel, false);
test_iov_read(dev, blob, channel, false);
test_io_zeroes(dev, blob, channel);
test_iov_write(dev, blob, channel, true);
test_iov_read(dev, blob, channel, true);
test_io_unmap(dev, blob, channel);
@ -6601,11 +6660,14 @@ blob_io_unit(void)
test_io_write(dev, blob, channel);
test_io_read(dev, blob, channel);
test_io_zeroes(dev, blob, channel);
test_iov_write(dev, blob, channel);
test_iov_read(dev, blob, channel);
test_iov_write(dev, blob, channel, false);
test_iov_read(dev, blob, channel, false);
test_io_zeroes(dev, blob, channel);
test_iov_write(dev, blob, channel, true);
test_iov_read(dev, blob, channel, true);
/* Create snapshot */
@ -6637,9 +6699,13 @@ blob_io_unit(void)
test_io_read(dev, snapshot, channel);
test_io_read(dev, clone, channel);
test_iov_read(dev, blob, channel);
test_iov_read(dev, snapshot, channel);
test_iov_read(dev, clone, channel);
test_iov_read(dev, blob, channel, false);
test_iov_read(dev, snapshot, channel, false);
test_iov_read(dev, clone, channel, false);
test_iov_read(dev, blob, channel, true);
test_iov_read(dev, snapshot, channel, true);
test_iov_read(dev, clone, channel, true);
/* Inflate clone */
@ -6652,8 +6718,12 @@ blob_io_unit(void)
test_io_unmap(dev, clone, channel);
test_iov_write(dev, clone, channel);
test_iov_read(dev, clone, channel);
test_iov_write(dev, clone, channel, false);
test_iov_read(dev, clone, channel, false);
test_io_unmap(dev, clone, channel);
test_iov_write(dev, clone, channel, true);
test_iov_read(dev, clone, channel, true);
spdk_blob_close(blob, blob_op_complete, NULL);
spdk_blob_close(snapshot, blob_op_complete, NULL);

View File

@ -41,6 +41,9 @@
uint8_t *g_dev_buffer;
uint64_t g_dev_write_bytes;
uint64_t g_dev_read_bytes;
bool g_dev_writev_ext_called;
bool g_dev_readv_ext_called;
struct spdk_blob_ext_io_opts g_blob_ext_io_opts;
struct spdk_power_failure_counters {
uint64_t general_counter;
@ -258,6 +261,8 @@ dev_readv_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct spdk_bs_dev_cb_args *cb_args,
struct spdk_blob_ext_io_opts *io_opts)
{
g_dev_readv_ext_called = true;
g_blob_ext_io_opts = *io_opts;
dev_readv(dev, channel, iov, iovcnt, lba, lba_count, cb_args);
}
@ -307,6 +312,8 @@ dev_writev_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
struct spdk_bs_dev_cb_args *cb_args,
struct spdk_blob_ext_io_opts *io_opts)
{
g_dev_writev_ext_called = true;
g_blob_ext_io_opts = *io_opts;
dev_writev(dev, channel, iov, iovcnt, lba, lba_count, cb_args);
}