bdev: Add default copy command support in bdev

Add default copy command support in bdev layer for backing devices that
does not support copy command.

Signed-off-by: Rui Chang <rui.chang@arm.com>
Change-Id: I5632e25544e95ac0c53ff91c4cd135dac53323ae
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16638
Community-CI: Mellanox Build Bot
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Rui Chang 2023-01-19 10:54:22 +08:00 committed by Tomasz Zawadzki
parent e44d631724
commit 8613654074
6 changed files with 150 additions and 41 deletions

View File

@ -497,7 +497,10 @@ struct spdk_bdev {
/* Maximum write zeroes in unit of logical block */
uint32_t max_write_zeroes;
/* Maximum copy size in unit of logical block */
/**
* Maximum copy size in unit of logical block
* Should be set explicitly when backing device support copy command
*/
uint32_t max_copy;
/**

View File

@ -26,6 +26,7 @@
#include "bdev_internal.h"
#include "spdk_internal/trace_defs.h"
#include "spdk_internal/assert.h"
#ifdef SPDK_CONFIG_VTUNE
#include "ittnotify.h"
@ -4309,7 +4310,22 @@ spdk_bdev_is_dif_check_enabled(const struct spdk_bdev *bdev,
uint32_t
spdk_bdev_get_max_copy(const struct spdk_bdev *bdev)
{
return bdev->max_copy;
uint64_t alighed_length;
uint64_t max_copy_blocks;
uint64_t temp_max_copy_blocks;
struct spdk_iobuf_opts opts;
if (spdk_bdev_io_type_supported((struct spdk_bdev *)bdev, SPDK_BDEV_IO_TYPE_COPY)) {
return bdev->max_copy;
} else {
spdk_iobuf_get_opts(&opts);
alighed_length = opts.large_bufsize - spdk_bdev_get_buf_align(bdev);
temp_max_copy_blocks = spdk_bdev_is_md_separate(bdev) ?
alighed_length / (bdev->blocklen + bdev->md_len) :
alighed_length / bdev->blocklen;
max_copy_blocks = 1 << spdk_u64log2(temp_max_copy_blocks);
return max_copy_blocks;
}
}
uint64_t
@ -9031,6 +9047,88 @@ spdk_bdev_for_each_channel(struct spdk_bdev *bdev, spdk_bdev_for_each_channel_ms
iter, bdev_each_channel_cpl);
}
static void
bdev_copy_do_write_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
struct spdk_bdev_io *parent_io = cb_arg;
/* Check return status of write */
parent_io->internal.status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
parent_io->internal.cb(parent_io, success, parent_io->internal.caller_ctx);
spdk_bdev_free_io(bdev_io);
}
static void
bdev_copy_do_write(void *_bdev_io)
{
struct spdk_bdev_io *bdev_io = _bdev_io;
int rc;
/* Write blocks */
rc = spdk_bdev_write_blocks_with_md(bdev_io->internal.desc,
spdk_io_channel_from_ctx(bdev_io->internal.ch), bdev_io->u.bdev.iovs[0].iov_base,
bdev_io->u.bdev.md_buf, bdev_io->u.bdev.offset_blocks,
bdev_io->u.bdev.num_blocks, bdev_copy_do_write_complete, bdev_io);
if (rc == -ENOMEM) {
bdev_queue_io_wait_with_cb(bdev_io, bdev_copy_do_write);
} else if (rc != 0) {
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
}
}
static void
bdev_copy_do_read_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
struct spdk_bdev_io *parent_io = cb_arg;
/* Check return status of read */
if (!success) {
parent_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
parent_io->internal.cb(parent_io, false, parent_io->internal.caller_ctx);
spdk_bdev_free_io(bdev_io);
return;
}
spdk_bdev_free_io(bdev_io);
/* Do write */
bdev_copy_do_write(parent_io);
}
static void
bdev_copy_do_read(void *_bdev_io)
{
struct spdk_bdev_io *bdev_io = _bdev_io;
int rc;
/* Read blocks */
rc = spdk_bdev_read_blocks_with_md(bdev_io->internal.desc,
spdk_io_channel_from_ctx(bdev_io->internal.ch), bdev_io->u.bdev.iovs[0].iov_base,
bdev_io->u.bdev.md_buf, bdev_io->u.bdev.copy.src_offset_blocks,
bdev_io->u.bdev.num_blocks, bdev_copy_do_read_complete, bdev_io);
if (rc == -ENOMEM) {
bdev_queue_io_wait_with_cb(bdev_io, bdev_copy_do_read);
} else if (rc != 0) {
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
}
}
static void
bdev_copy_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
{
if (!success) {
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
return;
}
bdev_copy_do_read(bdev_io);
}
int
spdk_bdev_copy_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
uint64_t dst_offset_blocks, uint64_t src_offset_blocks, uint64_t num_blocks,
@ -9044,11 +9142,6 @@ spdk_bdev_copy_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
return -EBADF;
}
if (spdk_unlikely(!bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY))) {
SPDK_DEBUGLOG(bdev, "Copy IO type is not supported\n");
return -ENOTSUP;
}
if (num_blocks == 0) {
SPDK_ERRLOG("Can't copy 0 blocks\n");
return -EINVAL;
@ -9076,9 +9169,28 @@ spdk_bdev_copy_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
bdev_io->u.bdev.num_blocks = num_blocks;
bdev_io->u.bdev.memory_domain = NULL;
bdev_io->u.bdev.memory_domain_ctx = NULL;
bdev_io->u.bdev.iovs = NULL;
bdev_io->u.bdev.iovcnt = 0;
bdev_io->u.bdev.md_buf = NULL;
bdev_io_init(bdev_io, bdev, cb_arg, cb);
bdev_io_submit(bdev_io);
if (dst_offset_blocks == src_offset_blocks) {
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
bdev_io->internal.cb(bdev_io, true, bdev_io->internal.caller_ctx);
return 0;
}
/* If the bdev backing device support copy directly, pass to it to process.
* Else do general processing from bdev layer.
*/
if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY)) {
bdev_io_submit(bdev_io);
return 0;
}
spdk_bdev_io_get_buf(bdev_io, bdev_copy_get_buf_cb, num_blocks * spdk_bdev_get_block_size(bdev));
return 0;
}

View File

@ -295,6 +295,7 @@ nvmf_ctrlr_cdata_init(struct spdk_nvmf_transport *transport, struct spdk_nvmf_su
cdata->oncs.compare = 1;
cdata->oncs.reservations = 1;
cdata->fuses.compare_and_write = 1;
cdata->oncs.copy = 1;
cdata->sgls.supported = 1;
cdata->sgls.keyed_sgl = 1;
cdata->sgls.sgl_offset = 1;
@ -2764,7 +2765,7 @@ spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_c
cdata->oncs.dsm = nvmf_ctrlr_dsm_supported(ctrlr);
cdata->oncs.write_zeroes = nvmf_ctrlr_write_zeroes_supported(ctrlr);
cdata->oncs.reservations = ctrlr->cdata.oncs.reservations;
cdata->oncs.copy = nvmf_ctrlr_copy_supported(ctrlr);
cdata->oncs.copy = ctrlr->cdata.oncs.copy;
cdata->ocfs.copy_format0 = cdata->oncs.copy;
if (subsystem->flags.ana_reporting) {
/* Asymmetric Namespace Access Reporting is supported. */

View File

@ -181,19 +181,17 @@ nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *n
SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch");
memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64));
if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY)) {
max_copy = spdk_bdev_get_max_copy(bdev);
if (max_copy == 0 || max_copy > UINT16_MAX) {
/* Zero means copy size is unlimited */
nsdata->mcl = UINT16_MAX;
nsdata->mssrl = UINT16_MAX;
} else {
nsdata->mcl = max_copy;
nsdata->mssrl = max_copy;
}
/* For now we support just one source range for copy command */
nsdata->msrc = 0;
/* For now we support just one source range */
nsdata->msrc = 0;
max_copy = spdk_bdev_get_max_copy(bdev);
if (max_copy == 0 || max_copy > UINT16_MAX) {
/* Zero means copy size is unlimited */
nsdata->mcl = UINT16_MAX;
nsdata->mssrl = UINT16_MAX;
} else {
nsdata->mcl = max_copy;
nsdata->mssrl = max_copy;
}
}
@ -696,13 +694,6 @@ nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_COPY)) {
SPDK_NOTICELOG("Copy command not supported by bdev\n");
response->status.sct = SPDK_NVME_SCT_GENERIC;
response->status.sc = SPDK_NVME_SC_INVALID_OPCODE;
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}
/*
* We support only one source range, and rely on this with the xfer
* below.

View File

@ -1150,12 +1150,6 @@ bdev_io_types_test(void)
ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true);
ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true);
/* COPY is not supported */
ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false);
rc = spdk_bdev_copy_blocks(desc, io_ch, 128, 0, 128, io_done, NULL);
CU_ASSERT(rc == -ENOTSUP);
ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true);
/* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */
ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false);
ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false);
@ -6051,22 +6045,32 @@ bdev_copy(void)
/* First test that if the bdev supports copy, the request won't be split */
bdev->md_len = 0;
bdev->blocklen = 4096;
num_blocks = 512;
bdev->blocklen = 512;
num_blocks = 128;
src_offset = bdev->blockcnt - num_blocks;
expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks);
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
CU_ASSERT_EQUAL(rc, 0);
num_completed = stub_complete_io(1);
CU_ASSERT_EQUAL(num_completed, 1);
/* Check that if copy is not supported it'll fail */
/* Check that if copy is not supported it'll still work */
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0);
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0);
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false);
rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL);
CU_ASSERT_EQUAL(rc, -ENOTSUP);
CU_ASSERT_EQUAL(rc, 0);
num_completed = stub_complete_io(1);
CU_ASSERT_EQUAL(num_completed, 1);
num_completed = stub_complete_io(1);
CU_ASSERT_EQUAL(num_completed, 1);
ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true);
spdk_put_io_channel(ioch);

View File

@ -743,9 +743,7 @@ test_nvmf_bdev_ctrlr_cmd(void)
memset(&rsp, 0, sizeof(rsp));
rc = nvmf_bdev_ctrlr_copy_cmd(&bdev, NULL, &ch, &req);
CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
MOCK_SET(spdk_bdev_io_type_supported, true);