bdev: Copy command fallback supports split to make copy size unlimited

The generic bdev layer has a fallback meachanism for the copy command
used when the backend bdev module does not support it. However, its max
size is limited. To remove the limitation, the fallback supports split by
using the unified split logic rather than following the write zeroes
command.

bdev_copy_should_split() and bdev_copy_split() use spdk_bdev_get_max_copy()
rather then referring bdev->max_copy to include the fallback case.

Then, spdk_bdev_copy_blocks() does the following.

If the copy size is large and should be split, use the generic split
logic regardless of whether copy is supported or not.
If copy is supported, send the copy request, or if copy is not
supported, emulate it using regulard read and write requests.

Add unit test case to verify this addition.

Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: Iaf51db56bb4b95f99a0ea7a0237d8fa8ae039a54
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17073
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Shuhei Matsumoto 2023-05-02 10:35:53 +09:00 committed by Jim Harris
parent bf8f5afa44
commit 0c1df53e7a
2 changed files with 55 additions and 3 deletions

View File

@ -9739,10 +9739,15 @@ spdk_bdev_copy_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
return 0;
}
/* If the bdev backing device support copy directly, pass to it to process.
* Else do general processing from bdev layer.
/* If the copy size is large and should be split, use the generic split logic
* regardless of whether SPDK_BDEV_IO_TYPE_COPY is supported or not.
*
* Then, send the copy request if SPDK_BDEV_IO_TYPE_COPY is supported or
* emulate it using regular read and write requests otherwise.
*/
if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY)) {
if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY) ||
bdev_io->internal.split) {
bdev_io_submit(bdev_io);
return 0;
}

View File

@ -6282,6 +6282,53 @@ bdev_copy_split_test(void)
}
CU_ASSERT(g_io_done == true);
/* Case 4: Same test scenario as the case 2 but the configuration is different.
* Copy is not supported.
*/
ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false);
num_children = 2;
max_copy_blocks = spdk_bdev_get_max_copy(bdev);
num_blocks = max_copy_blocks * num_children;
src_offset = bdev->blockcnt - num_blocks;
offset = 0;
g_io_done = false;
for (i = 0; i < num_children; i++) {
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset,
max_copy_blocks, 0);
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
src_offset += max_copy_blocks;
}
for (i = 0; i < num_children; i++) {
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset,
max_copy_blocks, 0);
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
offset += max_copy_blocks;
}
src_offset = bdev->blockcnt - num_blocks;
offset = 0;
rc = spdk_bdev_copy_blocks(desc, ioch, offset, src_offset, num_blocks, io_done, NULL);
CU_ASSERT_EQUAL(rc, 0);
CU_ASSERT(g_io_done == false);
while (num_children > 0) {
num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS);
/* One copy request is split into one read and one write requests. */
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
stub_complete_io(num_outstanding);
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding);
stub_complete_io(num_outstanding);
num_children -= num_outstanding;
}
CU_ASSERT(g_io_done == true);
ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true);
spdk_put_io_channel(ioch);
spdk_bdev_close(desc);
free_bdev(bdev);