lib/reduce: Copy user's buffers if SGL is not supported

In the compression operation we may have SGL input
if user's buffer is fragmented or less than chunk_size.
If the backing device doesn't support SGL input then
we should copy user's buffers into decomp_buffer
(including paddings if any).
In the decompression operation, if the backing device
doesn't support SGL output, we use a single output buffer
which is pointing to decomp_buffer. Once the operation
completes, we should copy the result into user's buffers.

Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Change-Id: Ic7fddd38374bb6898256633eacd192dbaf36541a
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11970
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Alexey Marchuk 2022-03-15 19:32:25 +04:00 committed by Tomasz Zawadzki
parent c81c10c529
commit 42f59f5006
2 changed files with 561 additions and 2 deletions

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -126,6 +127,7 @@ struct spdk_reduce_vol_request {
int num_backing_ops;
uint32_t num_io_units;
bool chunk_is_compressed;
bool copy_after_decompress;
uint64_t offset;
uint64_t logical_map_index;
uint64_t length;
@ -1148,7 +1150,7 @@ _reduce_vol_compress_chunk(struct spdk_reduce_vol_request *req, reduce_request_f
req->comp_buf_iov[0].iov_base = req->comp_buf;
req->comp_buf_iov[0].iov_len = vol->params.chunk_size;
vol->backing_dev->compress(vol->backing_dev,
&req->decomp_iov[0], req->decomp_iovcnt, req->comp_buf_iov, 1,
req->decomp_iov, req->decomp_iovcnt, req->comp_buf_iov, 1,
&req->backing_cb_args);
}
@ -1179,6 +1181,19 @@ _reduce_vol_decompress_chunk(struct spdk_reduce_vol_request *req, reduce_request
req->decomp_iovcnt = 0;
chunk_offset = req->offset % vol->logical_blocks_per_chunk;
/* If backing device doesn't support SGL output then we should copy the result of decompression to user's buffer
* if at least one of the conditions below is true:
* 1. User's buffer is fragmented
* 2. Length of the user's buffer is less than the chunk */
req->copy_after_decompress = !vol->backing_dev->sgl_out && (req->iovcnt > 1 ||
req->iov[0].iov_len < vol->params.chunk_size);
if (req->copy_after_decompress) {
req->decomp_iov[0].iov_base = req->decomp_buf;
req->decomp_iov[0].iov_len = vol->params.chunk_size;
req->decomp_iovcnt = 1;
goto decompress;
}
if (chunk_offset) {
/* first iov point to our scratch buffer for any offset into the chunk */
req->decomp_iov[0].iov_base = req->decomp_buf;
@ -1205,15 +1220,62 @@ _reduce_vol_decompress_chunk(struct spdk_reduce_vol_request *req, reduce_request
}
assert(ttl_len == vol->params.chunk_size);
decompress:
assert(!req->copy_after_decompress || (req->copy_after_decompress && req->decomp_iovcnt == 1));
req->backing_cb_args.cb_fn = next_fn;
req->backing_cb_args.cb_arg = req;
req->comp_buf_iov[0].iov_base = req->comp_buf;
req->comp_buf_iov[0].iov_len = req->chunk->compressed_size;
vol->backing_dev->decompress(vol->backing_dev,
req->comp_buf_iov, 1, &req->decomp_iov[0], req->decomp_iovcnt,
req->comp_buf_iov, 1, req->decomp_iov, req->decomp_iovcnt,
&req->backing_cb_args);
}
static inline void
_prepare_compress_chunk_copy_user_buffers(struct spdk_reduce_vol_request *req, bool zero_paddings)
{
struct spdk_reduce_vol *vol = req->vol;
char *padding_buffer = zero_paddings ? g_zero_buf : req->decomp_buf;
uint64_t chunk_offset, ttl_len = 0;
uint64_t remainder = 0;
char *copy_offset = NULL;
uint32_t lbsize = vol->params.logical_block_size;
int i;
req->decomp_iov[0].iov_base = req->decomp_buf;
req->decomp_iov[0].iov_len = vol->params.chunk_size;
req->decomp_iovcnt = 1;
copy_offset = req->decomp_iov[0].iov_base;
chunk_offset = req->offset % vol->logical_blocks_per_chunk;
if (chunk_offset) {
ttl_len += chunk_offset * lbsize;
/* copy_offset already points to padding buffer if zero_paddings=false */
if (zero_paddings) {
memcpy(copy_offset, padding_buffer, ttl_len);
}
copy_offset += ttl_len;
}
/* now the user data iov, direct from the user buffer */
for (i = 0; i < req->iovcnt; i++) {
memcpy(copy_offset, req->iov[i].iov_base, req->iov[i].iov_len);
copy_offset += req->iov[i].iov_len;
ttl_len += req->iov[i].iov_len;
}
remainder = vol->params.chunk_size - ttl_len;
if (remainder) {
/* copy_offset already points to padding buffer if zero_paddings=false */
if (zero_paddings) {
memcpy(copy_offset, padding_buffer + ttl_len, remainder);
}
ttl_len += remainder;
}
assert(ttl_len == req->vol->params.chunk_size);
}
/* This function can be called when we are compressing a new data or in case of read-modify-write
* In the first case possible paddings should be filled with zeroes, in the second case the paddings
* should point to already read and decompressed buffer */
@ -1227,6 +1289,16 @@ _prepare_compress_chunk(struct spdk_reduce_vol_request *req, bool zero_paddings)
uint32_t lbsize = vol->params.logical_block_size;
int i;
/* If backing device doesn't support SGL input then we should copy user's buffer into decomp_buf
* if at least one of the conditions below is true:
* 1. User's buffer is fragmented
* 2. Length of the user's buffer is less than the chunk */
if (!vol->backing_dev->sgl_in && (req->iovcnt > 1 ||
req->iov[0].iov_len < vol->params.chunk_size)) {
_prepare_compress_chunk_copy_user_buffers(req, zero_paddings);
return;
}
req->decomp_iovcnt = 0;
chunk_offset = req->offset % vol->logical_blocks_per_chunk;
@ -1326,6 +1398,18 @@ _read_decompress_done(void *_req, int reduce_errno)
return;
}
if (req->copy_after_decompress) {
uint64_t chunk_offset = req->offset % vol->logical_blocks_per_chunk;
char *decomp_buffer = (char *)req->decomp_buf + chunk_offset * vol->params.logical_block_size;
int i;
for (i = 0; i < req->iovcnt; i++) {
memcpy(req->iov[i].iov_base, decomp_buffer, req->iov[i].iov_len);
decomp_buffer += req->iov[i].iov_len;
assert(decomp_buffer <= (char *)req->decomp_buf + vol->params.chunk_size);
}
}
_reduce_vol_complete_req(req, 0);
}
@ -1479,6 +1563,7 @@ spdk_reduce_vol_readv(struct spdk_reduce_vol *vol,
req->offset = offset;
req->logical_map_index = logical_map_index;
req->length = length;
req->copy_after_decompress = false;
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;
@ -1553,6 +1638,7 @@ spdk_reduce_vol_writev(struct spdk_reduce_vol *vol,
req->offset = offset;
req->logical_map_index = logical_map_index;
req->length = length;
req->copy_after_decompress = false;
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;

View File

@ -3,6 +3,7 @@
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -48,6 +49,7 @@ static size_t g_persistent_pm_buf_len;
static char *g_backing_dev_buf;
static char g_path[REDUCE_PATH_MAX];
static char *g_decomp_buf;
static int g_decompressed_len;
#define TEST_MD_PATH "/tmp"
@ -541,6 +543,8 @@ backing_dev_init(struct spdk_reduce_backing_dev *backing_dev, struct spdk_reduce
backing_dev->unmap = backing_dev_unmap;
backing_dev->compress = backing_dev_compress;
backing_dev->decompress = backing_dev_decompress;
backing_dev->sgl_in = true;
backing_dev->sgl_out = true;
g_decomp_buf = calloc(1, params->chunk_size);
SPDK_CU_ASSERT_FATAL(g_decomp_buf != NULL);
@ -1264,6 +1268,473 @@ compress_algorithm(void)
CU_ASSERT(rc == -ENOSPC);
}
static void
test_prepare_compress_chunk(void)
{
struct spdk_reduce_vol vol = {};
struct spdk_reduce_backing_dev backing_dev = {};
struct spdk_reduce_vol_request req = {};
char decomp_buffer[16 * 1024] = {};
char comp_buffer[16 * 1024] = {};
char user_buffer[16 * 1024] = {};
struct iovec user_iov[2] = {};
size_t user_buffer_iov_len = 8192;
size_t remainder_bytes;
size_t offset_bytes;
size_t memcmp_offset;
uint32_t i;
vol.params.chunk_size = 16 * 1024;
vol.params.backing_io_unit_size = 4096;
vol.params.logical_block_size = 512;
backing_dev_init(&backing_dev, &vol.params, 512);
vol.backing_dev = &backing_dev;
vol.logical_blocks_per_chunk = vol.params.chunk_size / vol.params.logical_block_size;
req.vol = &vol;
req.decomp_buf = decomp_buffer;
req.comp_buf = comp_buffer;
req.iov = user_iov;
req.iovcnt = 2;
req.offset = 0;
/* Part 1 - backing dev supports sgl_in */
/* Test 1 - user's buffers length equals to chunk_size */
for (i = 0; i < 2; i++) {
req.iov[i].iov_base = user_buffer + i * user_buffer_iov_len;
req.iov[i].iov_len = user_buffer_iov_len;
}
_prepare_compress_chunk(&req, false);
CU_ASSERT(req.decomp_iovcnt == 2);
for (i = 0; i < 2; i++) {
CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
}
_prepare_compress_chunk(&req, true);
CU_ASSERT(req.decomp_iovcnt == 2);
for (i = 0; i < 2; i++) {
CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
}
/* Test 2 - user's buffer less than chunk_size, without offset */
user_buffer_iov_len = 4096;
remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
for (i = 0; i < 2; i++) {
req.iov[i].iov_base = user_buffer + i * user_buffer_iov_len;
req.iov[i].iov_len = user_buffer_iov_len;
}
_prepare_compress_chunk(&req, false);
CU_ASSERT(req.decomp_iovcnt == 3);
for (i = 0; i < 2; i++) {
CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
}
CU_ASSERT(req.decomp_iov[i].iov_base == req.decomp_buf + user_buffer_iov_len * 2);
CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
_prepare_compress_chunk(&req, true);
CU_ASSERT(req.decomp_iovcnt == 3);
for (i = 0; i < 2; i++) {
CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
}
CU_ASSERT(req.decomp_iov[i].iov_base == g_zero_buf + user_buffer_iov_len * 2);
CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
/* Test 3 - user's buffer less than chunk_size, non zero offset */
user_buffer_iov_len = 4096;
req.offset = 3;
offset_bytes = req.offset * vol.params.logical_block_size;
remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
_prepare_compress_chunk(&req, false);
CU_ASSERT(req.decomp_iovcnt == 4);
CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
CU_ASSERT(req.decomp_iov[0].iov_len == offset_bytes);
for (i = 0; i < 2; i++) {
CU_ASSERT(req.decomp_iov[i + 1].iov_base == req.iov[i].iov_base);
CU_ASSERT(req.decomp_iov[i + 1].iov_len == req.iov[i].iov_len);
}
CU_ASSERT(req.decomp_iov[3].iov_base == req.decomp_buf + offset_bytes + user_buffer_iov_len * 2);
CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
_prepare_compress_chunk(&req, true);
CU_ASSERT(req.decomp_iovcnt == 4);
CU_ASSERT(req.decomp_iov[0].iov_base == g_zero_buf);
CU_ASSERT(req.decomp_iov[0].iov_len == offset_bytes);
for (i = 0; i < 2; i++) {
CU_ASSERT(req.decomp_iov[i + 1].iov_base == req.iov[i].iov_base);
CU_ASSERT(req.decomp_iov[i + 1].iov_len == req.iov[i].iov_len);
}
CU_ASSERT(req.decomp_iov[3].iov_base == g_zero_buf + offset_bytes + user_buffer_iov_len * 2);
CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
/* Part 2 - backing dev doesn't support sgl_in */
/* Test 1 - user's buffers length equals to chunk_size
* user's buffers are copied */
vol.backing_dev->sgl_in = false;
req.offset = 0;
user_buffer_iov_len = 8192;
for (i = 0; i < 2; i++) {
req.iov[i].iov_base = user_buffer + i * user_buffer_iov_len;
req.iov[i].iov_len = user_buffer_iov_len;
memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
}
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
_prepare_compress_chunk(&req, false);
CU_ASSERT(req.decomp_iovcnt == 1);
CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base, req.iov[0].iov_len) == 0);
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + req.iov[0].iov_len, req.iov[1].iov_base,
req.iov[1].iov_len) == 0);
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
_prepare_compress_chunk(&req, true);
CU_ASSERT(req.decomp_iovcnt == 1);
CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base, req.iov[0].iov_base, req.iov[0].iov_len) == 0);
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + req.iov[0].iov_len, req.iov[1].iov_base,
req.iov[1].iov_len) == 0);
/* Test 2 - single user's buffer length equals to chunk_size
* User's buffer is not copied */
req.iov[0].iov_base = user_buffer;
req.iov[0].iov_len = vol.params.chunk_size;
req.iovcnt = 1;
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
_prepare_compress_chunk(&req, false);
CU_ASSERT(req.decomp_iovcnt == 1);
CU_ASSERT(req.decomp_iov[0].iov_base == req.iov[0].iov_base);
CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
_prepare_compress_chunk(&req, true);
CU_ASSERT(req.decomp_iovcnt == 1);
CU_ASSERT(req.decomp_iov[0].iov_base == req.iov[0].iov_base);
CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
/* Test 3 - user's buffer less than chunk_size, without offset
* User's buffers are copied */
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
user_buffer_iov_len = 4096;
req.iovcnt = 2;
remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
for (i = 0; i < 2; i++) {
req.iov[i].iov_base = user_buffer + i * user_buffer_iov_len;
req.iov[i].iov_len = user_buffer_iov_len;
}
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
_prepare_compress_chunk(&req, false);
CU_ASSERT(req.decomp_iovcnt == 1);
CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
memcmp_offset = 0;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
req.iov[0].iov_len) == 0);
memcmp_offset += req.iov[0].iov_len;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
req.iov[1].iov_len) == 0);
memcmp_offset += req.iov[0].iov_len;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.decomp_buf + memcmp_offset,
remainder_bytes) == 0);
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
_prepare_compress_chunk(&req, true);
CU_ASSERT(req.decomp_iovcnt == 1);
CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
memcmp_offset = 0;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
req.iov[0].iov_len) == 0);
memcmp_offset += req.iov[0].iov_len;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
req.iov[1].iov_len) == 0);
memcmp_offset += req.iov[0].iov_len;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, g_zero_buf + memcmp_offset,
remainder_bytes) == 0);
/* Test 4 - user's buffer less than chunk_size, non zero offset
* user's buffers are copied */
req.offset = 3;
offset_bytes = req.offset * vol.params.logical_block_size;
remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
_prepare_compress_chunk(&req, false);
CU_ASSERT(req.decomp_iovcnt == 1);
CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
memcmp_offset = 0;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.decomp_buf, offset_bytes) == 0);
memcmp_offset += offset_bytes;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
req.iov[0].iov_len) == 0);
memcmp_offset += req.iov[0].iov_len;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
req.iov[1].iov_len) == 0);
memcmp_offset += req.iov[1].iov_len;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.decomp_buf + memcmp_offset,
remainder_bytes) == 0);
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
_prepare_compress_chunk(&req, true);
CU_ASSERT(req.decomp_iovcnt == 1);
CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
memcmp_offset = 0;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, g_zero_buf, offset_bytes) == 0);
memcmp_offset += offset_bytes;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[0].iov_base,
req.iov[0].iov_len) == 0);
memcmp_offset += req.iov[0].iov_len;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, req.iov[1].iov_base,
req.iov[1].iov_len) == 0);
memcmp_offset += req.iov[1].iov_len;
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + memcmp_offset, g_zero_buf + memcmp_offset,
remainder_bytes) == 0);
}
static void _reduce_vol_op_complete(void *ctx, int reduce_errno)
{
g_reduce_errno = reduce_errno;
}
static void
dummy_backing_dev_decompress(struct spdk_reduce_backing_dev *backing_dev,
struct iovec *src_iov, int src_iovcnt,
struct iovec *dst_iov, int dst_iovcnt,
struct spdk_reduce_vol_cb_args *args)
{
args->cb_fn(args->cb_arg, g_decompressed_len);
}
static void test_reduce_decompress_chunk(void)
{
struct spdk_reduce_vol vol = {};
struct spdk_reduce_backing_dev backing_dev = {};
struct spdk_reduce_vol_request req = {};
char decomp_buffer[16 * 1024] = {};
char comp_buffer[16 * 1024] = {};
char user_buffer[16 * 1024] = {};
struct iovec user_iov[2] = {};
struct iovec comp_buf_iov = {};
struct spdk_reduce_chunk_map chunk = {};
size_t user_buffer_iov_len = 8192;
size_t remainder_bytes;
size_t offset_bytes;
uint32_t i;
vol.params.chunk_size = 16 * 1024;
vol.params.backing_io_unit_size = 4096;
vol.params.logical_block_size = 512;
backing_dev_init(&backing_dev, &vol.params, 512);
backing_dev.decompress = dummy_backing_dev_decompress;
vol.backing_dev = &backing_dev;
vol.logical_blocks_per_chunk = vol.params.chunk_size / vol.params.logical_block_size;
TAILQ_INIT(&vol.executing_requests);
TAILQ_INIT(&vol.queued_requests);
TAILQ_INIT(&vol.free_requests);
chunk.compressed_size = user_buffer_iov_len / 2;
req.chunk = &chunk;
req.vol = &vol;
req.decomp_buf = decomp_buffer;
req.comp_buf = comp_buffer;
req.comp_buf_iov = &comp_buf_iov;
req.iov = user_iov;
req.iovcnt = 2;
req.offset = 0;
req.cb_fn = _reduce_vol_op_complete;
/* Part 1 - backing dev supports sgl_out */
/* Test 1 - user's buffers length equals to chunk_size */
for (i = 0; i < 2; i++) {
req.iov[i].iov_base = user_buffer + i * user_buffer_iov_len;
req.iov[i].iov_len = user_buffer_iov_len;
memset(req.iov[i].iov_base, 0, req.iov[i].iov_len);
}
TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
g_reduce_errno = -1;
g_decompressed_len = vol.params.chunk_size;
_reduce_vol_decompress_chunk(&req, _read_decompress_done);
CU_ASSERT(g_reduce_errno == 0);
CU_ASSERT(req.copy_after_decompress == false);
CU_ASSERT(req.decomp_iovcnt == 2);
for (i = 0; i < 2; i++) {
CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
}
CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
/* Test 2 - user's buffer less than chunk_size, without offset */
TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
g_reduce_errno = -1;
user_buffer_iov_len = 4096;
for (i = 0; i < 2; i++) {
req.iov[i].iov_base = user_buffer + i * user_buffer_iov_len;
req.iov[i].iov_len = user_buffer_iov_len;
memset(req.iov[i].iov_base, 0, req.iov[i].iov_len);
}
remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
_reduce_vol_decompress_chunk(&req, _read_decompress_done);
CU_ASSERT(g_reduce_errno == 0);
CU_ASSERT(req.copy_after_decompress == false);
CU_ASSERT(req.decomp_iovcnt == 3);
for (i = 0; i < 2; i++) {
CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
}
CU_ASSERT(req.decomp_iov[i].iov_base == req.decomp_buf + user_buffer_iov_len * 2);
CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
/* Test 3 - user's buffer less than chunk_size, non zero offset */
req.offset = 3;
offset_bytes = req.offset * vol.params.logical_block_size;
remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
g_reduce_errno = -1;
_reduce_vol_decompress_chunk(&req, _read_decompress_done);
CU_ASSERT(g_reduce_errno == 0);
CU_ASSERT(req.copy_after_decompress == false);
CU_ASSERT(req.decomp_iovcnt == 4);
CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
CU_ASSERT(req.decomp_iov[0].iov_len == offset_bytes);
for (i = 0; i < 2; i++) {
CU_ASSERT(req.decomp_iov[i + 1].iov_base == req.iov[i].iov_base);
CU_ASSERT(req.decomp_iov[i + 1].iov_len == req.iov[i].iov_len);
}
CU_ASSERT(req.decomp_iov[3].iov_base == req.decomp_buf + offset_bytes + user_buffer_iov_len * 2);
CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
/* Part 2 - backing dev doesn't support sgl_out */
/* Test 1 - user's buffers length equals to chunk_size
* user's buffers are copied */
vol.backing_dev->sgl_out = false;
req.offset = 0;
user_buffer_iov_len = 8192;
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
for (i = 0; i < 2; i++) {
req.iov[i].iov_base = user_buffer + i * user_buffer_iov_len;
req.iov[i].iov_len = user_buffer_iov_len;
memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
}
TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
g_reduce_errno = -1;
_reduce_vol_decompress_chunk(&req, _read_decompress_done);
CU_ASSERT(g_reduce_errno == 0);
CU_ASSERT(req.copy_after_decompress == true);
CU_ASSERT(req.decomp_iovcnt == 1);
CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
CU_ASSERT(memcmp(req.iov[0].iov_base, req.decomp_iov[0].iov_base, req.iov[0].iov_len) == 0);
CU_ASSERT(memcmp(req.iov[1].iov_base, req.decomp_iov[0].iov_base + req.iov[0].iov_len,
req.iov[1].iov_len) == 0);
CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
/* Test 2 - single user's buffer length equals to chunk_size
* User's buffer is not copied */
req.iov[0].iov_base = user_buffer;
req.iov[0].iov_len = vol.params.chunk_size;
req.iovcnt = 1;
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
g_reduce_errno = -1;
_reduce_vol_decompress_chunk(&req, _read_decompress_done);
CU_ASSERT(g_reduce_errno == 0);
CU_ASSERT(req.copy_after_decompress == false);
CU_ASSERT(req.decomp_iovcnt == 1);
CU_ASSERT(req.decomp_iov[0].iov_base == req.iov[0].iov_base);
CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
/* Test 3 - user's buffer less than chunk_size, without offset
* User's buffers are copied */
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
user_buffer_iov_len = 4096;
req.iovcnt = 2;
remainder_bytes = vol.params.chunk_size - user_buffer_iov_len * 2;
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
for (i = 0; i < 2; i++) {
req.iov[i].iov_base = user_buffer + i * user_buffer_iov_len;
req.iov[i].iov_len = user_buffer_iov_len;
memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
}
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
g_reduce_errno = -1;
_reduce_vol_decompress_chunk(&req, _read_decompress_done);
CU_ASSERT(g_reduce_errno == 0);
CU_ASSERT(req.copy_after_decompress == true);
CU_ASSERT(req.decomp_iovcnt == 1);
CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
CU_ASSERT(memcmp(req.iov[0].iov_base, req.decomp_iov[0].iov_base,
req.iov[0].iov_len) == 0);
CU_ASSERT(memcmp(req.iov[1].iov_base, req.decomp_iov[0].iov_base + req.iov[0].iov_len,
req.iov[1].iov_len) == 0);
CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
/* Test 4 - user's buffer less than chunk_size, non zero offset
* user's buffers are copied */
req.offset = 3;
offset_bytes = req.offset * vol.params.logical_block_size;
remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
for (i = 0; i < 2; i++) {
req.iov[i].iov_base = user_buffer + i * user_buffer_iov_len;
req.iov[i].iov_len = user_buffer_iov_len;
memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
}
memset(req.decomp_buf, 0xa, vol.params.chunk_size);
TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
g_reduce_errno = -1;
_prepare_compress_chunk(&req, false);
_reduce_vol_decompress_chunk(&req, _read_decompress_done);
CU_ASSERT(g_reduce_errno == 0);
CU_ASSERT(req.copy_after_decompress == true);
CU_ASSERT(req.decomp_iovcnt == 1);
CU_ASSERT(req.decomp_iov[0].iov_base == req.decomp_buf);
CU_ASSERT(req.decomp_iov[0].iov_len == vol.params.chunk_size);
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + offset_bytes, req.iov[0].iov_base,
req.iov[0].iov_len) == 0);
CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + offset_bytes + req.iov[0].iov_len,
req.iov[1].iov_base,
req.iov[1].iov_len) == 0);
CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);
}
int
main(int argc, char **argv)
{
@ -1288,6 +1759,8 @@ main(int argc, char **argv)
CU_ADD_TEST(suite, defer_bdev_io);
CU_ADD_TEST(suite, overlapped);
CU_ADD_TEST(suite, compress_algorithm);
CU_ADD_TEST(suite, test_prepare_compress_chunk);
CU_ADD_TEST(suite, test_reduce_decompress_chunk);
g_unlink_path = g_path;
g_unlink_callback = unlink_cb;