bdev/crypto: use accel appends for encryption

Similarly to reads, writes path is now also using the spdk_accel_append*
interface for performing encrypt operation.

Additionally, this patch also changes the way aux buffer is allocated -
spdk_bdev_io_get_aux_buf() was replaced with spdk_accel_get_buf().  This
ensures that the actual data buffer will be only allocated if it's
actually needed.

Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I3cd1d4f5753a95709d7b81de23d9227102a74261
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17022
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Community-CI: Mellanox Build Bot
This commit is contained in:
Konrad Sztyber 2023-02-22 13:21:25 +01:00 committed by Ben Walker
parent 5d860c18d4
commit 85cf8d273f
2 changed files with 103 additions and 176 deletions

View File

@ -62,6 +62,8 @@ struct crypto_bdev_io {
uint64_t aux_offset_blocks; /* block offset on media */ uint64_t aux_offset_blocks; /* block offset on media */
void *aux_buf_raw; /* raw buffer that the bdev layer gave us for write buffer */ void *aux_buf_raw; /* raw buffer that the bdev layer gave us for write buffer */
struct iovec aux_buf_iov; /* iov representing aligned contig write buffer */ struct iovec aux_buf_iov; /* iov representing aligned contig write buffer */
struct spdk_memory_domain *aux_domain; /* memory domain of the aux buf */
void *aux_domain_ctx; /* memory domain ctx of the aux buf */
struct spdk_accel_sequence *seq; /* sequence of accel operations */ struct spdk_accel_sequence *seq; /* sequence of accel operations */
/* for bdev_io_wait */ /* for bdev_io_wait */
@ -77,73 +79,56 @@ static void vbdev_crypto_examine(struct spdk_bdev *bdev);
static int vbdev_crypto_claim(const char *bdev_name); static int vbdev_crypto_claim(const char *bdev_name);
static void vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); static void vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io);
/* Following an encrypt or decrypt we need to then either write the encrypted data or finish
* the read on decrypted data. Do that here.
*/
static void static void
_crypto_operation_complete(void *ref, int status) crypto_write(struct crypto_io_channel *crypto_ch, struct spdk_bdev_io *bdev_io)
{ {
struct spdk_bdev_io *bdev_io = ref;
struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto, struct vbdev_crypto *crypto_bdev = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_crypto,
crypto_bdev); crypto_bdev);
struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx; struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
struct crypto_io_channel *crypto_ch = crypto_io->crypto_ch; struct spdk_bdev_ext_io_opts opts = {};
int rc = 0; int rc;
if (status || crypto_ch->reset_iter) { opts.size = sizeof(opts);
/* If we're completing this with an outstanding reset we need to fail it */ opts.accel_sequence = crypto_io->seq;
rc = -EINVAL; opts.memory_domain = crypto_io->aux_domain;
} opts.memory_domain_ctx = crypto_io->aux_domain_ctx;
TAILQ_REMOVE(&crypto_ch->in_accel_fw, bdev_io, module_link); /* Write the encrypted data. */
rc = spdk_bdev_writev_blocks_ext(crypto_bdev->base_desc, crypto_ch->base_ch,
if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { &crypto_io->aux_buf_iov, 1, crypto_io->aux_offset_blocks,
if (!rc) { crypto_io->aux_num_blocks, _complete_internal_write,
/* Write the encrypted data. */ bdev_io, &opts);
rc = spdk_bdev_writev_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, if (spdk_unlikely(rc != 0)) {
&crypto_io->aux_buf_iov, 1, crypto_io->aux_offset_blocks, if (rc == -ENOMEM) {
crypto_io->aux_num_blocks, _complete_internal_write, SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
bdev_io); vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_ENCRYPT_DONE);
if (rc == -ENOMEM) {
vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_ENCRYPT_DONE);
goto check_reset;
}
} else { } else {
SPDK_ERRLOG("Issue with encryption on bdev_io %p\n", bdev_io); SPDK_ERRLOG("Failed to submit bdev_io!\n");
spdk_accel_sequence_abort(crypto_io->seq);
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
} }
} else {
SPDK_ERRLOG("Unknown bdev type %u on crypto operation completion\n", bdev_io->type);
rc = -EINVAL;
}
if (rc) {
if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
spdk_bdev_io_put_aux_buf(bdev_io, crypto_io->aux_buf_raw);
}
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
}
check_reset:
/* If the channel iter is not NULL, we need to wait
* until the pending list is empty, then we can move on to the
* next channel.
*/
if (crypto_ch->reset_iter && TAILQ_EMPTY(&crypto_ch->in_accel_fw)) {
SPDK_NOTICELOG("Channel %p has been quiesced.\n", crypto_ch);
spdk_for_each_channel_continue(crypto_ch->reset_iter, 0);
crypto_ch->reset_iter = NULL;
} }
} }
static void
crypto_encrypt_cb(void *cb_arg)
{
struct crypto_bdev_io *crypto_io = cb_arg;
struct crypto_io_channel *crypto_ch = crypto_io->crypto_ch;
spdk_accel_put_buf(crypto_ch->accel_channel, crypto_io->aux_buf_raw,
crypto_io->aux_domain, crypto_io->aux_domain_ctx);
}
/* We're either encrypting on the way down or decrypting on the way back. */ /* We're either encrypting on the way down or decrypting on the way back. */
static int static void
_crypto_encrypt(struct spdk_bdev_io *bdev_io, void *aux_buf) crypto_encrypt(struct crypto_io_channel *crypto_ch, struct spdk_bdev_io *bdev_io)
{ {
struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx; struct crypto_bdev_io *crypto_io = (struct crypto_bdev_io *)bdev_io->driver_ctx;
struct crypto_io_channel *crypto_ch = crypto_io->crypto_ch;
uint32_t crypto_len = crypto_io->crypto_bdev->crypto_bdev.blocklen; uint32_t crypto_len = crypto_io->crypto_bdev->crypto_bdev.blocklen;
uint64_t total_length; uint64_t total_length;
uint64_t alignment; uint64_t alignment;
void *aux_buf = crypto_io->aux_buf_raw;
int rc; int rc;
/* For encryption, we need to prepare a single contiguous buffer as the encryption /* For encryption, we need to prepare a single contiguous buffer as the encryption
@ -154,22 +139,33 @@ _crypto_encrypt(struct spdk_bdev_io *bdev_io, void *aux_buf)
total_length = bdev_io->u.bdev.num_blocks * crypto_len; total_length = bdev_io->u.bdev.num_blocks * crypto_len;
alignment = spdk_bdev_get_buf_align(&crypto_io->crypto_bdev->crypto_bdev); alignment = spdk_bdev_get_buf_align(&crypto_io->crypto_bdev->crypto_bdev);
crypto_io->aux_buf_iov.iov_len = total_length; crypto_io->aux_buf_iov.iov_len = total_length;
crypto_io->aux_buf_raw = aux_buf;
crypto_io->aux_buf_iov.iov_base = (void *)(((uintptr_t)aux_buf + (alignment - 1)) & ~ crypto_io->aux_buf_iov.iov_base = (void *)(((uintptr_t)aux_buf + (alignment - 1)) & ~
(alignment - 1)); (alignment - 1));
crypto_io->aux_offset_blocks = bdev_io->u.bdev.offset_blocks; crypto_io->aux_offset_blocks = bdev_io->u.bdev.offset_blocks;
crypto_io->aux_num_blocks = bdev_io->u.bdev.num_blocks; crypto_io->aux_num_blocks = bdev_io->u.bdev.num_blocks;
rc = spdk_accel_submit_encrypt(crypto_ch->accel_channel, crypto_ch->crypto_key, rc = spdk_accel_append_encrypt(&crypto_io->seq, crypto_ch->accel_channel,
&crypto_io->aux_buf_iov, 1, crypto_ch->crypto_key, &crypto_io->aux_buf_iov, 1,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, crypto_io->aux_domain, crypto_io->aux_domain_ctx,
bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, NULL, NULL,
bdev_io->u.bdev.offset_blocks, crypto_len, 0, bdev_io->u.bdev.offset_blocks, crypto_len, 0,
_crypto_operation_complete, bdev_io); crypto_encrypt_cb, crypto_io);
if (!rc) { if (spdk_unlikely(rc != 0)) {
TAILQ_INSERT_TAIL(&crypto_ch->in_accel_fw, bdev_io, module_link); spdk_accel_put_buf(crypto_ch->accel_channel, crypto_io->aux_buf_raw,
crypto_io->aux_domain, crypto_io->aux_domain_ctx);
if (rc == -ENOMEM) {
SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_NEW);
} else {
SPDK_ERRLOG("Failed to submit bdev_io!\n");
spdk_accel_sequence_abort(crypto_io->seq);
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
}
return;
} }
return rc; crypto_write(crypto_ch, bdev_io);
} }
/* This function is called after all channels have been quiesced following /* This function is called after all channels have been quiesced following
@ -231,9 +227,6 @@ _complete_internal_write(struct spdk_bdev_io *bdev_io, bool success, void *cb_ar
{ {
struct spdk_bdev_io *orig_io = cb_arg; struct spdk_bdev_io *orig_io = cb_arg;
int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
struct crypto_bdev_io *orig_ctx = (struct crypto_bdev_io *)orig_io->driver_ctx;
spdk_bdev_io_put_aux_buf(orig_io, orig_ctx->aux_buf_raw);
spdk_bdev_io_complete(orig_io, status); spdk_bdev_io_complete(orig_io, status);
spdk_bdev_free_io(bdev_io); spdk_bdev_free_io(bdev_io);
@ -271,7 +264,7 @@ vbdev_crypto_resubmit_io(void *arg)
vbdev_crypto_submit_request(ch, bdev_io); vbdev_crypto_submit_request(ch, bdev_io);
break; break;
case CRYPTO_IO_ENCRYPT_DONE: case CRYPTO_IO_ENCRYPT_DONE:
_crypto_operation_complete(bdev_io, 0); crypto_write(crypto_io->crypto_ch, bdev_io);
break; break;
case CRYPTO_IO_DECRYPT_DONE: case CRYPTO_IO_DECRYPT_DONE:
crypto_read(crypto_io->crypto_ch, bdev_io); crypto_read(crypto_io->crypto_ch, bdev_io);
@ -373,34 +366,6 @@ crypto_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
crypto_read(crypto_ch, bdev_io); crypto_read(crypto_ch, bdev_io);
} }
/* For encryption we don't want to encrypt the data in place as the host isn't
* expecting us to mangle its data buffers so we need to encrypt into the bdev
* aux buffer, then we can use that as the source for the disk data transfer.
*/
static void
crypto_write_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
void *aux_buf)
{
int rc;
if (spdk_unlikely(!aux_buf)) {
SPDK_ERRLOG("Failed to get aux buffer!\n");
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return;
}
rc = _crypto_encrypt(bdev_io, aux_buf);
if (rc != 0) {
spdk_bdev_io_put_aux_buf(bdev_io, aux_buf);
if (rc == -ENOMEM) {
SPDK_DEBUGLOG(vbdev_crypto, "No memory, queue the IO.\n");
vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_NEW);
} else {
SPDK_ERRLOG("Failed to submit crypto operation!\n");
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
}
}
}
/* Called when someone submits IO to this crypto vbdev. For IO's not relevant to crypto, /* Called when someone submits IO to this crypto vbdev. For IO's not relevant to crypto,
* we're simply passing it on here via SPDK IO calls which in turn allocate another bdev IO * we're simply passing it on here via SPDK IO calls which in turn allocate another bdev IO
* and call our cpl callback provided below along with the original bdev_io so that we can * and call our cpl callback provided below along with the original bdev_io so that we can
@ -428,10 +393,17 @@ vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bde
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen);
break; break;
case SPDK_BDEV_IO_TYPE_WRITE: case SPDK_BDEV_IO_TYPE_WRITE:
/* Tell the bdev layer that we need an aux buf in addition to the data /* For encryption we don't want to encrypt the data in place as the host isn't
* buf already associated with the bdev. * expecting us to mangle its data buffers so we need to encrypt into the aux accel
* buffer, then we can use that as the source for the disk data transfer.
*/ */
spdk_bdev_io_get_aux_buf(bdev_io, crypto_write_get_buf_cb); rc = spdk_accel_get_buf(crypto_ch->accel_channel,
bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen,
&crypto_io->aux_buf_raw, &crypto_io->aux_domain,
&crypto_io->aux_domain_ctx);
if (rc == 0) {
crypto_encrypt(crypto_ch, bdev_io);
}
break; break;
case SPDK_BDEV_IO_TYPE_UNMAP: case SPDK_BDEV_IO_TYPE_UNMAP:
rc = spdk_bdev_unmap_blocks(crypto_bdev->base_desc, crypto_ch->base_ch, rc = spdk_bdev_unmap_blocks(crypto_bdev->base_desc, crypto_ch->base_ch,
@ -462,6 +434,7 @@ vbdev_crypto_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bde
vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_NEW); vbdev_crypto_queue_io(bdev_io, CRYPTO_IO_NEW);
} else { } else {
SPDK_ERRLOG("Failed to submit bdev_io!\n"); SPDK_ERRLOG("Failed to submit bdev_io!\n");
spdk_accel_sequence_abort(crypto_io->seq);
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
} }
} }

View File

@ -62,7 +62,16 @@ DEFINE_STUB(spdk_accel_append_decrypt, int,
struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain, struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain,
void *src_domain_ctx, uint64_t iv, uint32_t block_size, int flags, void *src_domain_ctx, uint64_t iv, uint32_t block_size, int flags,
spdk_accel_step_cb cb_fn, void *cb_arg), 0); spdk_accel_step_cb cb_fn, void *cb_arg), 0);
DEFINE_STUB(spdk_accel_append_encrypt, int,
(struct spdk_accel_sequence **seq, struct spdk_io_channel *ch,
struct spdk_accel_crypto_key *key, struct iovec *dst_iovs,
uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain,
void *src_domain_ctx, uint64_t iv, uint32_t block_size, int flags,
spdk_accel_step_cb cb_fn, void *cb_arg), 0);
DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
DEFINE_STUB_V(spdk_accel_put_buf, (struct spdk_io_channel *ch, void *buf,
struct spdk_memory_domain *domain, void *domain_ctx));
/* global vars and setup/cleanup functions used for all test functions */ /* global vars and setup/cleanup functions used for all test functions */
struct spdk_bdev_io *g_bdev_io; struct spdk_bdev_io *g_bdev_io;
@ -72,10 +81,14 @@ struct spdk_io_channel *g_io_ch;
struct vbdev_crypto g_crypto_bdev; struct vbdev_crypto g_crypto_bdev;
struct vbdev_crypto_opts g_crypto_bdev_opts; struct vbdev_crypto_opts g_crypto_bdev_opts;
void int
spdk_bdev_io_get_aux_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_aux_buf_cb cb) spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
struct spdk_memory_domain **domain, void **domain_ctx)
{ {
cb(g_io_ch, g_bdev_io, (void *)0xDEADBEEF); *buf = (void *)0xdeadbeef;
*domain = (void *)0xbeefdead;
return 0;
} }
void void
@ -129,15 +142,16 @@ spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *
return 0; return 0;
} }
DEFINE_RETURN_MOCK(spdk_bdev_writev_blocks, int); DEFINE_RETURN_MOCK(spdk_bdev_writev_blocks_ext, int);
int int
spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
struct iovec *iov, int iovcnt, struct iovec *iov, int iovcnt,
uint64_t offset_blocks, uint64_t num_blocks, uint64_t offset_blocks, uint64_t num_blocks,
spdk_bdev_io_completion_cb cb, void *cb_arg) spdk_bdev_io_completion_cb cb, void *cb_arg,
struct spdk_bdev_ext_io_opts *opts)
{ {
HANDLE_RETURN_MOCK(spdk_bdev_writev_blocks); HANDLE_RETURN_MOCK(spdk_bdev_writev_blocks_ext);
ut_vbdev_crypto_bdev_cpl(cb, g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg); ut_vbdev_crypto_bdev_cpl(cb, g_bdev_io, !ut_spdk_bdev_writev_blocks_ext, cb_arg);
return 0; return 0;
} }
@ -187,44 +201,6 @@ struct ut_vbdev_crypto_accel_cpl_args {
int rc; int rc;
}; };
static void
_vbdev_crypto_ut_accel_cpl(void *arg)
{
struct ut_vbdev_crypto_accel_cpl_args *cpl_args = arg;
cpl_args->cb_fn(cpl_args->cb_arg, cpl_args->rc);
free(cpl_args);
}
static void
vbdev_crypto_ut_accel_cpl(spdk_accel_completion_cb cb_fn, void *cb_arg, int rc)
{
struct ut_vbdev_crypto_accel_cpl_args *cpl_args = calloc(1, sizeof(*cpl_args));
SPDK_CU_ASSERT_FATAL(cpl_args);
cpl_args->cb_fn = cb_fn;
cpl_args->cb_arg = cb_arg;
cpl_args->rc = rc;
spdk_thread_send_msg(spdk_get_thread(), _vbdev_crypto_ut_accel_cpl, cpl_args);
}
DEFINE_RETURN_MOCK(spdk_accel_submit_encrypt, int);
int ut_spdk_accel_submit_encrypt_cb_rc = 0;
int
spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
struct iovec *dst_iovs, uint32_t dst_iovcnt,
struct iovec *src_iovs, uint32_t src_iovcnt,
uint64_t iv, uint32_t block_size, int flags,
spdk_accel_completion_cb cb_fn, void *cb_arg)
{
HANDLE_RETURN_MOCK(spdk_accel_submit_encrypt);
/* We must not call cb_fn immediately */
vbdev_crypto_ut_accel_cpl(cb_fn, cb_arg, ut_spdk_accel_submit_encrypt_cb_rc);
return 0;
}
struct spdk_io_channel *spdk_accel_get_io_channel(void) struct spdk_io_channel *spdk_accel_get_io_channel(void)
{ {
return (struct spdk_io_channel *)0xfeedbeef; return (struct spdk_io_channel *)0xfeedbeef;
@ -275,7 +251,7 @@ test_error_paths(void)
g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
/* test error returned by accel fw */ /* test error returned by accel fw */
MOCK_SET(spdk_accel_submit_encrypt, -ENOMEM); MOCK_SET(spdk_accel_append_encrypt, -ENOMEM);
vbdev_crypto_submit_request(g_io_ch, g_bdev_io); vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
CU_ASSERT(g_io_ctx->bdev_io_wait.bdev == &g_crypto_bdev.crypto_bdev); CU_ASSERT(g_io_ctx->bdev_io_wait.bdev == &g_crypto_bdev.crypto_bdev);
@ -284,22 +260,14 @@ test_error_paths(void)
CU_ASSERT(g_io_ctx->resubmit_state == CRYPTO_IO_NEW); CU_ASSERT(g_io_ctx->resubmit_state == CRYPTO_IO_NEW);
memset(&g_io_ctx->bdev_io_wait, 0, sizeof(g_io_ctx->bdev_io_wait)); memset(&g_io_ctx->bdev_io_wait, 0, sizeof(g_io_ctx->bdev_io_wait));
MOCK_SET(spdk_accel_submit_encrypt, -EINVAL); MOCK_SET(spdk_accel_append_encrypt, -EINVAL);
vbdev_crypto_submit_request(g_io_ch, g_bdev_io); vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
MOCK_CLEAR(spdk_accel_submit_encrypt); MOCK_SET(spdk_accel_append_encrypt, 0);
/* Test error returned in accel cpl cb */
ut_spdk_accel_submit_encrypt_cb_rc = -EINVAL;
g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
poll_threads();
CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
ut_spdk_accel_submit_encrypt_cb_rc = 0;
/* Test error returned from bdev */ /* Test error returned from bdev */
g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
MOCK_SET(spdk_bdev_writev_blocks, -ENOMEM); MOCK_SET(spdk_bdev_writev_blocks_ext, -ENOMEM);
vbdev_crypto_submit_request(g_io_ch, g_bdev_io); vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
poll_threads(); poll_threads();
CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
@ -311,20 +279,20 @@ test_error_paths(void)
MOCK_CLEAR(spdk_bdev_readv_blocks_ext); MOCK_CLEAR(spdk_bdev_readv_blocks_ext);
g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
MOCK_SET(spdk_bdev_writev_blocks, -EINVAL); MOCK_SET(spdk_bdev_writev_blocks_ext, -EINVAL);
vbdev_crypto_submit_request(g_io_ch, g_bdev_io); vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
poll_threads(); poll_threads();
CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
MOCK_CLEAR(spdk_bdev_writev_blocks); MOCK_CLEAR(spdk_bdev_writev_blocks_ext);
/* Test error returned in bdev cpl */ /* Test error returned in bdev cpl */
g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
ut_spdk_bdev_writev_blocks = -EINVAL; ut_spdk_bdev_writev_blocks_ext = -EINVAL;
vbdev_crypto_submit_request(g_io_ch, g_bdev_io); vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
poll_threads(); poll_threads();
poll_threads(); poll_threads();
CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
ut_spdk_bdev_writev_blocks = 0; ut_spdk_bdev_writev_blocks_ext = 0;
/* the same for read path */ /* the same for read path */
/* Test error returned from bdev */ /* Test error returned from bdev */
@ -373,7 +341,7 @@ static void
test_simple_write(void) test_simple_write(void)
{ {
/* Single element block size write */ /* Single element block size write */
g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
g_bdev_io->u.bdev.iovcnt = 1; g_bdev_io->u.bdev.iovcnt = 1;
g_bdev_io->u.bdev.num_blocks = 1; g_bdev_io->u.bdev.num_blocks = 1;
g_bdev_io->u.bdev.offset_blocks = 0; g_bdev_io->u.bdev.offset_blocks = 0;
@ -455,12 +423,6 @@ test_reset(void)
static void static void
test_crypto_op_complete(void) test_crypto_op_complete(void)
{ {
/* Make sure completion code respects failure. */
g_completion_called = false;
_crypto_operation_complete(g_bdev_io, -1);
CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
CU_ASSERT(g_completion_called == true);
/* Test read completion. */ /* Test read completion. */
g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
@ -473,8 +435,8 @@ test_crypto_op_complete(void)
g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
g_completion_called = false; g_completion_called = false;
MOCK_CLEAR(spdk_bdev_writev_blocks); MOCK_CLEAR(spdk_bdev_writev_blocks_ext);
_crypto_operation_complete(g_bdev_io, 0); crypto_write(g_crypto_ch, g_bdev_io);
poll_threads(); poll_threads();
CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
CU_ASSERT(g_completion_called == true); CU_ASSERT(g_completion_called == true);
@ -483,19 +445,11 @@ test_crypto_op_complete(void)
g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
g_completion_called = false; g_completion_called = false;
MOCK_SET(spdk_bdev_writev_blocks, -EINVAL); MOCK_SET(spdk_bdev_writev_blocks_ext, -EINVAL);
_crypto_operation_complete(g_bdev_io, 0); crypto_write(g_crypto_ch, g_bdev_io);
CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
CU_ASSERT(g_completion_called == true);
MOCK_CLEAR(spdk_bdev_writev_blocks);
/* Test bogus type for this completion. */
g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
g_completion_called = false;
_crypto_operation_complete(g_bdev_io, 0);
CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
CU_ASSERT(g_completion_called == true); CU_ASSERT(g_completion_called == true);
MOCK_CLEAR(spdk_bdev_writev_blocks_ext);
} }
static void static void