bdev: pull/push data if bdev doesn't support

memory domains

If bdev doesn't support any memory domain then allocate
internal bounce buffer, pull data for write operation before
IO submission, push data to memory domain once IO completes
for read operation.

Update test tool, add simple pull/push functions
implementation.

Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Change-Id: Ie9b94463e6a818bcd606fbb898fb0d6e0b5d5027
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10069
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
This commit is contained in:
Alexey Marchuk 2021-10-20 09:35:28 +03:00 committed by Tomasz Zawadzki
parent 18c6e89d9a
commit 1299439f3d
7 changed files with 403 additions and 32 deletions

View File

@ -46,6 +46,7 @@
#include "spdk/notify.h" #include "spdk/notify.h"
#include "spdk/util.h" #include "spdk/util.h"
#include "spdk/trace.h" #include "spdk/trace.h"
#include "spdk/dma.h"
#include "spdk/bdev_module.h" #include "spdk/bdev_module.h"
#include "spdk/log.h" #include "spdk/log.h"
@ -322,6 +323,7 @@ struct spdk_bdev_desc {
} callback; } callback;
bool closed; bool closed;
bool write; bool write;
bool memory_domains_supported;
pthread_mutex_t mutex; pthread_mutex_t mutex;
uint32_t refs; uint32_t refs;
TAILQ_HEAD(, media_event_entry) pending_media_events; TAILQ_HEAD(, media_event_entry) pending_media_events;
@ -752,6 +754,12 @@ spdk_bdev_next_leaf(struct spdk_bdev *prev)
return bdev; return bdev;
} }
static inline bool
bdev_io_use_memory_domain(struct spdk_bdev_io *bdev_io)
{
return bdev_io->internal.ext_opts && bdev_io->internal.ext_opts->memory_domain;
}
void void
spdk_bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len) spdk_bdev_io_set_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len)
{ {
@ -869,6 +877,8 @@ _bdev_io_pull_buffer_cpl(void *ctx, int rc)
static void static void
_bdev_io_pull_bounce_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len) _bdev_io_pull_bounce_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len)
{ {
int rc = 0;
/* save original md_buf */ /* save original md_buf */
bdev_io->internal.orig_md_iov.iov_base = bdev_io->u.bdev.md_buf; bdev_io->internal.orig_md_iov.iov_base = bdev_io->u.bdev.md_buf;
bdev_io->internal.orig_md_iov.iov_len = len; bdev_io->internal.orig_md_iov.iov_len = len;
@ -878,11 +888,26 @@ _bdev_io_pull_bounce_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t l
bdev_io->u.bdev.md_buf = md_buf; bdev_io->u.bdev.md_buf = md_buf;
if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
if (bdev_io_use_memory_domain(bdev_io)) {
rc = spdk_memory_domain_pull_data(bdev_io->internal.ext_opts->memory_domain,
bdev_io->internal.ext_opts->memory_domain_ctx,
&bdev_io->internal.orig_md_iov, 1,
&bdev_io->internal.bounce_md_iov, 1,
bdev_io->internal.data_transfer_cpl,
bdev_io);
if (rc == 0) {
/* Continue to submit IO in completion callback */
return;
}
SPDK_ERRLOG("Failed to pull data from memory domain %s, rc %d\n",
spdk_memory_domain_get_dma_device_id(bdev_io->internal.ext_opts->memory_domain), rc);
} else {
memcpy(md_buf, bdev_io->internal.orig_md_iov.iov_base, bdev_io->internal.orig_md_iov.iov_len); memcpy(md_buf, bdev_io->internal.orig_md_iov.iov_base, bdev_io->internal.orig_md_iov.iov_len);
} }
}
assert(bdev_io->internal.data_transfer_cpl); assert(bdev_io->internal.data_transfer_cpl);
bdev_io->internal.data_transfer_cpl(bdev_io, 0); bdev_io->internal.data_transfer_cpl(bdev_io, rc);
} }
static void static void
@ -928,6 +953,8 @@ static void
_bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len, _bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t len,
bdev_copy_bounce_buffer_cpl cpl_cb) bdev_copy_bounce_buffer_cpl cpl_cb)
{ {
int rc = 0;
bdev_io->internal.data_transfer_cpl = cpl_cb; bdev_io->internal.data_transfer_cpl = cpl_cb;
/* save original iovec */ /* save original iovec */
bdev_io->internal.orig_iovs = bdev_io->u.bdev.iovs; bdev_io->internal.orig_iovs = bdev_io->u.bdev.iovs;
@ -940,10 +967,26 @@ _bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t le
bdev_io->u.bdev.iovs[0].iov_len = len; bdev_io->u.bdev.iovs[0].iov_len = len;
/* if this is write path, copy data from original buffer to bounce buffer */ /* if this is write path, copy data from original buffer to bounce buffer */
if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
if (bdev_io_use_memory_domain(bdev_io)) {
rc = spdk_memory_domain_pull_data(bdev_io->internal.ext_opts->memory_domain,
bdev_io->internal.ext_opts->memory_domain_ctx,
bdev_io->internal.orig_iovs,
(uint32_t) bdev_io->internal.orig_iovcnt,
bdev_io->u.bdev.iovs, 1,
_bdev_io_pull_bounce_data_buf_done,
bdev_io);
if (rc == 0) {
/* Continue to submit IO in completion callback */
return;
}
SPDK_ERRLOG("Failed to pull data from memory domain %s\n",
spdk_memory_domain_get_dma_device_id(bdev_io->internal.ext_opts->memory_domain));
} else {
_copy_iovs_to_buf(buf, len, bdev_io->internal.orig_iovs, bdev_io->internal.orig_iovcnt); _copy_iovs_to_buf(buf, len, bdev_io->internal.orig_iovs, bdev_io->internal.orig_iovcnt);
} }
}
_bdev_io_pull_bounce_data_buf_done(bdev_io, 0); _bdev_io_pull_bounce_data_buf_done(bdev_io, rc);
} }
static void static void
@ -1122,19 +1165,38 @@ _bdev_io_complete_push_bounce_done(void *ctx, int rc)
static inline void static inline void
_bdev_io_push_bounce_md_buffer(struct spdk_bdev_io *bdev_io) _bdev_io_push_bounce_md_buffer(struct spdk_bdev_io *bdev_io)
{ {
int rc = 0;
/* do the same for metadata buffer */ /* do the same for metadata buffer */
if (spdk_unlikely(bdev_io->internal.orig_md_iov.iov_base != NULL)) { if (spdk_unlikely(bdev_io->internal.orig_md_iov.iov_base != NULL)) {
assert(spdk_bdev_is_md_separate(bdev_io->bdev)); assert(spdk_bdev_is_md_separate(bdev_io->bdev));
if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ && if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ &&
bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) { bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
if (bdev_io_use_memory_domain(bdev_io)) {
/* If memory domain is used then we need to call async push function */
rc = spdk_memory_domain_push_data(bdev_io->internal.ext_opts->memory_domain,
bdev_io->internal.ext_opts->memory_domain_ctx,
&bdev_io->internal.orig_md_iov,
(uint32_t)bdev_io->internal.orig_iovcnt,
&bdev_io->internal.bounce_md_iov, 1,
bdev_io->internal.data_transfer_cpl,
bdev_io);
if (rc == 0) {
/* Continue IO completion in async callback */
return;
}
SPDK_ERRLOG("Failed to push md to memory domain %s\n",
spdk_memory_domain_get_dma_device_id(bdev_io->internal.ext_opts->memory_domain));
} else {
memcpy(bdev_io->internal.orig_md_iov.iov_base, bdev_io->u.bdev.md_buf, memcpy(bdev_io->internal.orig_md_iov.iov_base, bdev_io->u.bdev.md_buf,
bdev_io->internal.orig_md_iov.iov_len); bdev_io->internal.orig_md_iov.iov_len);
} }
} }
}
assert(bdev_io->internal.data_transfer_cpl); assert(bdev_io->internal.data_transfer_cpl);
bdev_io->internal.data_transfer_cpl(bdev_io, 0); bdev_io->internal.data_transfer_cpl(bdev_io, rc);
} }
static void static void
@ -1162,18 +1224,37 @@ _bdev_io_push_bounce_data_buffer_done(void *ctx, int rc)
static inline void static inline void
_bdev_io_push_bounce_data_buffer(struct spdk_bdev_io *bdev_io, bdev_copy_bounce_buffer_cpl cpl_cb) _bdev_io_push_bounce_data_buffer(struct spdk_bdev_io *bdev_io, bdev_copy_bounce_buffer_cpl cpl_cb)
{ {
int rc = 0;
bdev_io->internal.data_transfer_cpl = cpl_cb; bdev_io->internal.data_transfer_cpl = cpl_cb;
/* if this is read path, copy data from bounce buffer to original buffer */ /* if this is read path, copy data from bounce buffer to original buffer */
if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ && if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ &&
bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) { bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
if (bdev_io_use_memory_domain(bdev_io)) {
/* If memory domain is used then we need to call async push function */
rc = spdk_memory_domain_push_data(bdev_io->internal.ext_opts->memory_domain,
bdev_io->internal.ext_opts->memory_domain_ctx,
bdev_io->internal.orig_iovs,
(uint32_t)bdev_io->internal.orig_iovcnt,
&bdev_io->internal.bounce_iov, 1,
_bdev_io_push_bounce_data_buffer_done,
bdev_io);
if (rc == 0) {
/* Continue IO completion in async callback */
return;
}
SPDK_ERRLOG("Failed to push data to memory domain %s\n",
spdk_memory_domain_get_dma_device_id(bdev_io->internal.ext_opts->memory_domain));
} else {
_copy_buf_to_iovs(bdev_io->internal.orig_iovs, _copy_buf_to_iovs(bdev_io->internal.orig_iovs,
bdev_io->internal.orig_iovcnt, bdev_io->internal.orig_iovcnt,
bdev_io->internal.bounce_iov.iov_base, bdev_io->internal.bounce_iov.iov_base,
bdev_io->internal.bounce_iov.iov_len); bdev_io->internal.bounce_iov.iov_len);
} }
}
_bdev_io_push_bounce_data_buffer_done(bdev_io, 0); _bdev_io_push_bounce_data_buffer_done(bdev_io, rc);
} }
static void static void
@ -1239,6 +1320,28 @@ spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, u
bdev_io_get_buf(bdev_io, len); bdev_io_get_buf(bdev_io, len);
} }
static void
_bdev_memory_domain_get_io_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
bool success)
{
if (!success) {
SPDK_ERRLOG("Failed to get data buffer, completing IO\n");
bdev_io_complete(bdev_io);
} else {
bdev_io_submit(bdev_io);
}
}
static void
_bdev_memory_domain_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
uint64_t len)
{
assert(cb != NULL);
bdev_io->internal.get_buf_cb = cb;
bdev_io_get_buf(bdev_io, len);
}
void void
spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb) spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb)
{ {
@ -3989,6 +4092,33 @@ _bdev_io_check_md_buf(const struct iovec *iovs, const void *md_buf)
return _is_buf_allocated(iovs) == (md_buf != NULL); return _is_buf_allocated(iovs) == (md_buf != NULL);
} }
static inline void
_bdev_io_copy_ext_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
{
struct spdk_bdev_ext_io_opts *opts_copy = &bdev_io->internal.ext_opts_copy;
memcpy(opts_copy, opts, opts->size);
bdev_io->internal.ext_opts_copy.metadata = bdev_io->u.bdev.md_buf;
/* Save pointer to the copied ext_opts which will be used by bdev modules */
bdev_io->u.bdev.ext_opts = opts_copy;
}
static inline void
_bdev_io_ext_use_bounce_buffer(struct spdk_bdev_io *bdev_io)
{
/* bdev doesn't support memory domains, thereby buffers in this IO request can't
* be accessed directly. It is needed to allocate buffers before issuing IO operation.
* For write operation we need to pull buffers from memory domain before submitting IO.
* Once read operation completes, we need to use memory_domain push functionality to
* update data in original memory domain IO buffer
* This IO request will go through a regular IO flow, so clear memory domains pointers in
* the copied ext_opts */
bdev_io->internal.ext_opts_copy.memory_domain = NULL;
bdev_io->internal.ext_opts_copy.memory_domain_ctx = NULL;
_bdev_memory_domain_io_get_buf(bdev_io, _bdev_memory_domain_get_io_cb,
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
}
static int static int
bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf, bdev_read_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
void *md_buf, uint64_t offset_blocks, uint64_t num_blocks, void *md_buf, uint64_t offset_blocks, uint64_t num_blocks,
@ -4093,7 +4223,6 @@ bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *c
struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc); struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch); struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
struct spdk_bdev_ext_io_opts *opts_copy;
if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) { if (!bdev_io_valid_blocks(bdev, offset_blocks, num_blocks)) {
return -EINVAL; return -EINVAL;
@ -4116,13 +4245,17 @@ bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *c
bdev_io->internal.ext_opts = opts; bdev_io->internal.ext_opts = opts;
bdev_io->u.bdev.ext_opts = opts; bdev_io->u.bdev.ext_opts = opts;
if (opts && copy_opts) { if (opts) {
bool use_pull_push = opts->memory_domain && !desc->memory_domains_supported;
assert(opts->size <= sizeof(*opts)); assert(opts->size <= sizeof(*opts));
opts_copy = &bdev_io->internal.ext_opts_copy; if (copy_opts || use_pull_push) {
memcpy(opts_copy, opts, opts->size); _bdev_io_copy_ext_opts(bdev_io, opts);
bdev_io->internal.ext_opts_copy.metadata = md_buf; if (use_pull_push) {
bdev_io->internal.ext_opts = opts_copy; _bdev_io_ext_use_bounce_buffer(bdev_io);
bdev_io->u.bdev.ext_opts = opts_copy; return 0;
}
}
} }
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -4170,6 +4303,10 @@ spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *
if (spdk_unlikely(!opts->size || opts->size > sizeof(struct spdk_bdev_ext_io_opts))) { if (spdk_unlikely(!opts->size || opts->size > sizeof(struct spdk_bdev_ext_io_opts))) {
return -EINVAL; return -EINVAL;
} }
if (spdk_unlikely(opts->memory_domain && !(iov && iov[0].iov_base))) {
/* When memory domain is used, the user must provide data buffers */
return -EINVAL;
}
md = opts->metadata; md = opts->metadata;
} }
@ -4279,7 +4416,6 @@ bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *
struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc); struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(desc);
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch); struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
struct spdk_bdev_ext_io_opts *opts_copy;
if (!desc->write) { if (!desc->write) {
return -EBADF; return -EBADF;
@ -4306,13 +4442,17 @@ bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *
bdev_io->internal.ext_opts = opts; bdev_io->internal.ext_opts = opts;
bdev_io->u.bdev.ext_opts = opts; bdev_io->u.bdev.ext_opts = opts;
if (opts && copy_opts) { if (opts) {
bool use_pull_push = opts->memory_domain && !desc->memory_domains_supported;
assert(opts->size <= sizeof(*opts)); assert(opts->size <= sizeof(*opts));
opts_copy = &bdev_io->internal.ext_opts_copy; if (copy_opts || use_pull_push) {
memcpy(opts_copy, opts, opts->size); _bdev_io_copy_ext_opts(bdev_io, opts);
bdev_io->internal.ext_opts_copy.metadata = md_buf; if (use_pull_push) {
bdev_io->internal.ext_opts = opts_copy; _bdev_io_ext_use_bounce_buffer(bdev_io);
bdev_io->u.bdev.ext_opts = opts_copy; return 0;
}
}
} }
bdev_io_submit(bdev_io); bdev_io_submit(bdev_io);
@ -4377,6 +4517,10 @@ spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel
if (spdk_unlikely(!opts->size || opts->size > sizeof(struct spdk_bdev_ext_io_opts))) { if (spdk_unlikely(!opts->size || opts->size > sizeof(struct spdk_bdev_ext_io_opts))) {
return -EINVAL; return -EINVAL;
} }
if (spdk_unlikely(opts->memory_domain && !(iov && iov[0].iov_base))) {
/* When memory domain is used, the user must provide data buffers */
return -EINVAL;
}
md = opts->metadata; md = opts->metadata;
} }
@ -6282,6 +6426,7 @@ spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event
TAILQ_INIT(&desc->pending_media_events); TAILQ_INIT(&desc->pending_media_events);
TAILQ_INIT(&desc->free_media_events); TAILQ_INIT(&desc->free_media_events);
desc->memory_domains_supported = spdk_bdev_get_memory_domains(bdev, NULL, 0) > 0;
desc->callback.event_fn = event_cb; desc->callback.event_fn = event_cb;
desc->callback.ctx = event_ctx; desc->callback.ctx = event_ctx;
pthread_mutex_init(&desc->mutex, NULL); pthread_mutex_init(&desc->mutex, NULL);

View File

@ -83,7 +83,7 @@ DEPDIRS-net := log util $(JSON_LIBS)
DEPDIRS-notify := log util $(JSON_LIBS) DEPDIRS-notify := log util $(JSON_LIBS)
DEPDIRS-trace := log util $(JSON_LIBS) DEPDIRS-trace := log util $(JSON_LIBS)
DEPDIRS-bdev := log util thread $(JSON_LIBS) notify trace DEPDIRS-bdev := log util thread $(JSON_LIBS) notify trace dma
DEPDIRS-blobfs := log thread blob trace DEPDIRS-blobfs := log thread blob trace
DEPDIRS-event := log util thread $(JSON_LIBS) trace init DEPDIRS-event := log util thread $(JSON_LIBS) trace init
DEPDIRS-init := jsonrpc json log rpc thread util DEPDIRS-init := jsonrpc json log rpc thread util

View File

@ -81,6 +81,11 @@ struct dma_test_task {
TAILQ_ENTRY(dma_test_task) link; TAILQ_ENTRY(dma_test_task) link;
}; };
struct dma_test_data_cpl_ctx {
spdk_memory_domain_data_cpl_cb data_cpl;
void *data_cpl_arg;
};
TAILQ_HEAD(, dma_test_task) g_tasks = TAILQ_HEAD_INITIALIZER(g_tasks); TAILQ_HEAD(, dma_test_task) g_tasks = TAILQ_HEAD_INITIALIZER(g_tasks);
/* User's input */ /* User's input */
@ -274,6 +279,55 @@ dma_test_task_is_read(struct dma_test_task *task)
return false; return false;
} }
static void
dma_test_data_cpl(void *ctx)
{
struct dma_test_data_cpl_ctx *cpl_ctx = ctx;
cpl_ctx->data_cpl(cpl_ctx->data_cpl_arg, 0);
free(cpl_ctx);
}
static int
dma_test_copy_memory(struct dma_test_req *req, struct iovec *dst_iov, uint32_t dst_iovcnt,
struct iovec *src_iov, uint32_t src_iovcnt, spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
{
struct dma_test_data_cpl_ctx *cpl_ctx;
cpl_ctx = calloc(1, sizeof(*cpl_ctx));
if (!cpl_ctx) {
return -ENOMEM;
}
cpl_ctx->data_cpl = cpl_cb;
cpl_ctx->data_cpl_arg = cpl_cb_arg;
spdk_iovcpy(src_iov, src_iovcnt, dst_iov, dst_iovcnt);
spdk_thread_send_msg(req->task->thread, dma_test_data_cpl, cpl_ctx);
return 0;
}
static int dma_test_push_memory_cb(struct spdk_memory_domain *dst_domain,
void *dst_domain_ctx,
struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
{
struct dma_test_req *req = dst_domain_ctx;
return dma_test_copy_memory(req, dst_iov, dst_iovcnt, src_iov, src_iovcnt, cpl_cb, cpl_cb_arg);
}
static int dma_test_pull_memory_cb(struct spdk_memory_domain *src_domain,
void *src_domain_ctx,
struct iovec *src_iov, uint32_t src_iovcnt, struct iovec *dst_iov, uint32_t dst_iovcnt,
spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
{
struct dma_test_req *req = src_domain_ctx;
return dma_test_copy_memory(req, dst_iov, dst_iovcnt, src_iov, src_iovcnt, cpl_cb, cpl_cb_arg);
}
static int static int
dma_test_translate_memory_cb(struct spdk_memory_domain *src_domain, void *src_domain_ctx, dma_test_translate_memory_cb(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx, struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
@ -666,6 +720,8 @@ dma_test_start(void *arg)
return; return;
} }
spdk_memory_domain_set_translation(g_domain, dma_test_translate_memory_cb); spdk_memory_domain_set_translation(g_domain, dma_test_translate_memory_cb);
spdk_memory_domain_set_pull(g_domain, dma_test_pull_memory_cb);
spdk_memory_domain_set_push(g_domain, dma_test_push_memory_cb);
SPDK_ENV_FOREACH_CORE(i) { SPDK_ENV_FOREACH_CORE(i) {
rc = allocate_task(i, g_bdev_name); rc = allocate_task(i, g_bdev_name);

View File

@ -14,6 +14,50 @@ MALLOC_BLOCK_SIZE=512
subsystem="0" subsystem="0"
rpc_py="$rootdir/scripts/rpc.py" rpc_py="$rootdir/scripts/rpc.py"
function gen_malloc_json() {
jq . <<- JSON
{
"subsystems": [
{
"subsystem": "bdev",
"config": [
{
"method": "bdev_nvme_set_options",
"params": {
"action_on_timeout": "none",
"timeout_us": 0,
"retry_count": 4,
"arbitration_burst": 0,
"low_priority_weight": 0,
"medium_priority_weight": 0,
"high_priority_weight": 0,
"nvme_adminq_poll_period_us": 10000,
"keep_alive_timeout_ms" : 10000,
"nvme_ioq_poll_period_us": 0,
"io_queue_requests": 0,
"delay_cmd_submit": true
}
},
{
"method": "bdev_malloc_create",
"params": {
"name": "Malloc0",
"num_blocks": 131072,
"block_size": 512,
"uuid": "e1c24cb1-dd44-4be6-8d67-de92a332013f",
"optimal_io_boundary": 2
}
},
{
"method": "bdev_wait_for_examine"
}
]
}
]
}
JSON
}
nvmftestinit nvmftestinit
nvmfappstart -m 0x3 nvmfappstart -m 0x3
@ -27,6 +71,9 @@ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode$subsystem -t $TEST
# test_dma doesn't use RPC, but we change the sock path since nvmf target is already using the default RPC sock # test_dma doesn't use RPC, but we change the sock path since nvmf target is already using the default RPC sock
"$rootdir/test/dma/test_dma/test_dma" -q 16 -o 4096 -w randrw -M 70 -t 5 -m 0xc --json <(gen_nvmf_target_json $subsystem) -b "Nvme${subsystem}n1" -f -r /var/tmp/dma.sock "$rootdir/test/dma/test_dma/test_dma" -q 16 -o 4096 -w randrw -M 70 -t 5 -m 0xc --json <(gen_nvmf_target_json $subsystem) -b "Nvme${subsystem}n1" -f -r /var/tmp/dma.sock
# test data pull/push with split against local malloc
"$rootdir/test/dma/test_dma/test_dma" -q 16 -o 4096 -w randrw -M 70 -t 5 -m 0xc --json <(gen_malloc_json) -b "Malloc0" -r /var/tmp/dma.sock
trap - SIGINT SIGTERM EXIT trap - SIGINT SIGTERM EXIT
nvmftestfini nvmftestfini

View File

@ -45,6 +45,37 @@
DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
"test_domain");
DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
(struct spdk_memory_domain *domain), 0);
static bool g_memory_domain_pull_data_called;
static bool g_memory_domain_push_data_called;
DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
int
spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
{
g_memory_domain_pull_data_called = true;
HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
cpl_cb(cpl_cb_arg, 0);
return 0;
}
DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
int
spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
{
g_memory_domain_push_data_called = true;
HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
cpl_cb(cpl_cb_arg, 0);
return 0;
}
int g_status; int g_status;
int g_count; int g_count;
@ -238,8 +269,8 @@ stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
if (expected_io->md_buf != NULL) { if (expected_io->md_buf != NULL) {
CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf);
if (bdev_io->internal.ext_opts) { if (bdev_io->u.bdev.ext_opts) {
CU_ASSERT(expected_io->md_buf == bdev_io->internal.ext_opts->metadata); CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.ext_opts->metadata);
} }
} }
@ -4900,7 +4931,8 @@ bdev_writev_readv_ext(void)
struct spdk_bdev *bdev; struct spdk_bdev *bdev;
struct spdk_bdev_desc *desc = NULL; struct spdk_bdev_desc *desc = NULL;
struct spdk_io_channel *io_ch; struct spdk_io_channel *io_ch;
struct iovec iov = { .iov_base = (void *)0xbaaddead, .iov_len = 0x1000 }; char io_buf[512];
struct iovec iov = { .iov_base = io_buf, .iov_len = 512 };
struct ut_expected_io *expected_io; struct ut_expected_io *expected_io;
struct spdk_bdev_ext_io_opts ext_io_opts = { struct spdk_bdev_ext_io_opts ext_io_opts = {
.metadata = (void *)0xFF000000, .metadata = (void *)0xFF000000,
@ -4921,6 +4953,7 @@ bdev_writev_readv_ext(void)
io_ch = spdk_bdev_get_io_channel(desc); io_ch = spdk_bdev_get_io_channel(desc);
CU_ASSERT(io_ch != NULL); CU_ASSERT(io_ch != NULL);
/* Test 1, Simple test */
g_io_done = false; g_io_done = false;
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
expected_io->md_buf = ext_io_opts.metadata; expected_io->md_buf = ext_io_opts.metadata;
@ -4951,7 +4984,7 @@ bdev_writev_readv_ext(void)
stub_complete_io(1); stub_complete_io(1);
CU_ASSERT(g_io_done == true); CU_ASSERT(g_io_done == true);
/* Test invalid ext_opts size */ /* Test 2, invalid ext_opts size */
ext_io_opts.size = 0; ext_io_opts.size = 0;
rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
CU_ASSERT(rc != 0); CU_ASSERT(rc != 0);
@ -4964,7 +4997,7 @@ bdev_writev_readv_ext(void)
rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
CU_ASSERT(rc != 0); CU_ASSERT(rc != 0);
/* Check that IO request with ext_opts and metadata is split correctly /* Test 3, Check that IO request with ext_opts and metadata is split correctly
* Offset 14, length 8, payload 0xF000 * Offset 14, length 8, payload 0xF000
* Child - Offset 14, length 2, payload 0xF000 * Child - Offset 14, length 2, payload 0xF000
* Child - Offset 16, length 6, payload 0xF000 + 2 * 512 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
@ -5079,6 +5112,40 @@ bdev_writev_readv_ext(void)
CU_ASSERT(g_io_done == true); CU_ASSERT(g_io_done == true);
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
/* Test 4, Verify data pull/push
* bdev doens't support memory domains, so buffers from bdev memory pool will be used */
ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef;
g_io_done = false;
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1);
ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
expected_io->ext_io_opts = &ext_io_opts;
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
CU_ASSERT(rc == 0);
CU_ASSERT(g_io_done == false);
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_memory_domain_push_data_called == true);
CU_ASSERT(g_io_done == true);
g_io_done = false;
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1);
ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len);
expected_io->ext_io_opts = &ext_io_opts;
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts);
CU_ASSERT(rc == 0);
CU_ASSERT(g_memory_domain_pull_data_called == true);
CU_ASSERT(g_io_done == false);
CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
stub_complete_io(1);
CU_ASSERT(g_io_done == true);
spdk_put_io_channel(io_ch); spdk_put_io_channel(io_ch);
spdk_bdev_close(desc); spdk_bdev_close(desc);
free_bdev(bdev); free_bdev(bdev);

View File

@ -48,6 +48,34 @@ DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io, int *sc, int *sk, DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io, int *sc, int *sk,
int *asc, int *ascq)); int *asc, int *ascq));
DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
"test_domain");
DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
(struct spdk_memory_domain *domain), 0);
DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
int
spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
{
HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
cpl_cb(cpl_cb_arg, 0);
return 0;
}
DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
int
spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
{
HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
cpl_cb(cpl_cb_arg, 0);
return 0;
}
struct ut_bdev { struct ut_bdev {
struct spdk_bdev bdev; struct spdk_bdev bdev;

View File

@ -45,6 +45,34 @@
DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
"test_domain");
DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
(struct spdk_memory_domain *domain), 0);
DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
int
spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
{
HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
cpl_cb(cpl_cb_arg, 0);
return 0;
}
DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
int
spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
{
HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
cpl_cb(cpl_cb_arg, 0);
return 0;
}
static void static void
_part_cleanup(struct spdk_bdev_part *part) _part_cleanup(struct spdk_bdev_part *part)