2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2016-07-20 18:16:23 +00:00
|
|
|
* Copyright (c) Intel Corporation.
|
|
|
|
* All rights reserved.
|
2022-04-12 16:32:25 +00:00
|
|
|
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2016-07-20 18:16:23 +00:00
|
|
|
*/
|
|
|
|
|
2017-05-02 18:18:25 +00:00
|
|
|
#include "spdk/stdinc.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
|
|
|
|
#include "spdk/bdev.h"
|
2022-08-08 20:31:08 +00:00
|
|
|
#include "spdk/accel.h"
|
2016-08-17 20:35:18 +00:00
|
|
|
#include "spdk/env.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
#include "spdk/log.h"
|
2018-06-11 20:32:15 +00:00
|
|
|
#include "spdk/thread.h"
|
2018-08-09 10:56:48 +00:00
|
|
|
#include "spdk/event.h"
|
2019-05-23 07:53:44 +00:00
|
|
|
#include "spdk/rpc.h"
|
|
|
|
#include "spdk/util.h"
|
|
|
|
#include "spdk/string.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2022-04-25 17:13:18 +00:00
|
|
|
#include "bdev_internal.h"
|
2016-07-20 18:16:23 +00:00
|
|
|
#include "CUnit/Basic.h"
|
|
|
|
|
2016-10-12 16:32:10 +00:00
|
|
|
#define BUFFER_IOVS 1024
|
2018-03-02 19:49:36 +00:00
|
|
|
#define BUFFER_SIZE 260 * 1024
|
2016-07-20 18:16:23 +00:00
|
|
|
#define BDEV_TASK_ARRAY_SIZE 2048
|
|
|
|
|
2016-10-11 16:02:37 +00:00
|
|
|
pthread_mutex_t g_test_mutex;
|
|
|
|
pthread_cond_t g_test_cond;
|
|
|
|
|
2019-09-13 22:30:47 +00:00
|
|
|
static struct spdk_thread *g_thread_init;
|
|
|
|
static struct spdk_thread *g_thread_ut;
|
|
|
|
static struct spdk_thread *g_thread_io;
|
2019-05-23 07:53:44 +00:00
|
|
|
static bool g_wait_for_tests = false;
|
2019-09-13 22:30:47 +00:00
|
|
|
static int g_num_failures = 0;
|
2020-10-09 21:20:52 +00:00
|
|
|
static bool g_shutdown = false;
|
2018-08-09 10:48:03 +00:00
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
struct io_target {
|
|
|
|
struct spdk_bdev *bdev;
|
2017-06-29 18:23:50 +00:00
|
|
|
struct spdk_bdev_desc *bdev_desc;
|
2016-10-11 16:02:37 +00:00
|
|
|
struct spdk_io_channel *ch;
|
2016-07-20 18:16:23 +00:00
|
|
|
struct io_target *next;
|
|
|
|
};
|
|
|
|
|
2016-10-11 16:02:37 +00:00
|
|
|
struct bdevio_request {
|
|
|
|
char *buf;
|
2020-01-23 19:16:55 +00:00
|
|
|
char *fused_buf;
|
2016-10-11 16:02:37 +00:00
|
|
|
int data_len;
|
|
|
|
uint64_t offset;
|
2016-10-12 16:32:10 +00:00
|
|
|
struct iovec iov[BUFFER_IOVS];
|
|
|
|
int iovcnt;
|
2020-01-23 19:16:55 +00:00
|
|
|
struct iovec fused_iov[BUFFER_IOVS];
|
|
|
|
int fused_iovcnt;
|
2016-10-11 16:02:37 +00:00
|
|
|
struct io_target *target;
|
|
|
|
};
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
struct io_target *g_io_targets = NULL;
|
2019-05-21 14:17:19 +00:00
|
|
|
struct io_target *g_current_io_target = NULL;
|
2019-05-23 07:53:44 +00:00
|
|
|
static void rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-01-05 20:09:44 +00:00
|
|
|
static void
|
2019-09-13 22:30:47 +00:00
|
|
|
execute_spdk_function(spdk_msg_fn fn, void *arg)
|
2017-01-05 20:09:44 +00:00
|
|
|
{
|
|
|
|
pthread_mutex_lock(&g_test_mutex);
|
2019-09-13 22:30:47 +00:00
|
|
|
spdk_thread_send_msg(g_thread_io, fn, arg);
|
2017-01-05 20:09:44 +00:00
|
|
|
pthread_cond_wait(&g_test_cond, &g_test_mutex);
|
|
|
|
pthread_mutex_unlock(&g_test_mutex);
|
|
|
|
}
|
|
|
|
|
2016-10-11 16:02:37 +00:00
|
|
|
static void
|
|
|
|
wake_ut_thread(void)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&g_test_mutex);
|
|
|
|
pthread_cond_signal(&g_test_cond);
|
|
|
|
pthread_mutex_unlock(&g_test_mutex);
|
|
|
|
}
|
|
|
|
|
2017-01-05 19:44:24 +00:00
|
|
|
static void
|
2019-09-13 22:30:47 +00:00
|
|
|
__get_io_channel(void *arg)
|
2017-01-05 19:44:24 +00:00
|
|
|
{
|
2019-09-13 22:30:47 +00:00
|
|
|
struct io_target *target = arg;
|
2017-01-05 19:44:24 +00:00
|
|
|
|
2017-06-29 18:23:50 +00:00
|
|
|
target->ch = spdk_bdev_get_io_channel(target->bdev_desc);
|
2017-10-18 09:01:37 +00:00
|
|
|
assert(target->ch);
|
2017-01-05 19:44:24 +00:00
|
|
|
wake_ut_thread();
|
|
|
|
}
|
|
|
|
|
2021-03-02 10:33:51 +00:00
|
|
|
static void
|
|
|
|
bdevio_construct_target_open_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
|
|
|
|
void *event_ctx)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-05-15 12:29:55 +00:00
|
|
|
static int
|
|
|
|
bdevio_construct_target(struct spdk_bdev *bdev)
|
|
|
|
{
|
|
|
|
struct io_target *target;
|
|
|
|
int rc;
|
|
|
|
uint64_t num_blocks = spdk_bdev_get_num_blocks(bdev);
|
|
|
|
uint32_t block_size = spdk_bdev_get_block_size(bdev);
|
|
|
|
|
|
|
|
target = malloc(sizeof(struct io_target));
|
|
|
|
if (target == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2021-03-02 10:33:51 +00:00
|
|
|
rc = spdk_bdev_open_ext(spdk_bdev_get_name(bdev), true, bdevio_construct_target_open_cb, NULL,
|
|
|
|
&target->bdev_desc);
|
2019-05-15 12:29:55 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
free(target);
|
|
|
|
SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
printf(" %s: %" PRIu64 " blocks of %" PRIu32 " bytes (%" PRIu64 " MiB)\n",
|
|
|
|
spdk_bdev_get_name(bdev),
|
|
|
|
num_blocks, block_size,
|
|
|
|
(num_blocks * block_size + 1024 * 1024 - 1) / (1024 * 1024));
|
|
|
|
|
|
|
|
target->bdev = bdev;
|
|
|
|
target->next = g_io_targets;
|
2019-09-13 22:30:47 +00:00
|
|
|
execute_spdk_function(__get_io_channel, target);
|
2019-05-15 12:29:55 +00:00
|
|
|
g_io_targets = target;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
static int
|
|
|
|
bdevio_construct_targets(void)
|
|
|
|
{
|
|
|
|
struct spdk_bdev *bdev;
|
2017-06-29 18:23:50 +00:00
|
|
|
int rc;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-01-10 21:05:07 +00:00
|
|
|
printf("I/O targets:\n");
|
|
|
|
|
2017-06-29 20:16:26 +00:00
|
|
|
bdev = spdk_bdev_first_leaf();
|
2016-08-01 21:31:02 +00:00
|
|
|
while (bdev != NULL) {
|
2019-05-15 12:29:55 +00:00
|
|
|
rc = bdevio_construct_target(bdev);
|
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("Could not construct bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
|
2019-05-15 14:22:18 +00:00
|
|
|
return rc;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
2017-06-29 20:16:26 +00:00
|
|
|
bdev = spdk_bdev_next_leaf(bdev);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2019-05-15 14:15:12 +00:00
|
|
|
if (g_io_targets == NULL) {
|
|
|
|
SPDK_ERRLOG("No bdevs to perform tests on\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-05 19:44:24 +00:00
|
|
|
static void
|
2019-09-13 22:30:47 +00:00
|
|
|
__put_io_channel(void *arg)
|
2017-01-05 19:44:24 +00:00
|
|
|
{
|
2019-09-13 22:30:47 +00:00
|
|
|
struct io_target *target = arg;
|
2017-01-05 19:44:24 +00:00
|
|
|
|
|
|
|
spdk_put_io_channel(target->ch);
|
|
|
|
wake_ut_thread();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bdevio_cleanup_targets(void)
|
|
|
|
{
|
|
|
|
struct io_target *target;
|
|
|
|
|
|
|
|
target = g_io_targets;
|
|
|
|
while (target != NULL) {
|
2019-09-13 22:30:47 +00:00
|
|
|
execute_spdk_function(__put_io_channel, target);
|
2017-06-29 18:23:50 +00:00
|
|
|
spdk_bdev_close(target->bdev_desc);
|
2017-01-05 19:44:24 +00:00
|
|
|
g_io_targets = target->next;
|
|
|
|
free(target);
|
|
|
|
target = g_io_targets;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-16 20:25:03 +00:00
|
|
|
static bool g_completion_success;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
initialize_buffer(char **buf, int pattern, int size)
|
|
|
|
{
|
2019-06-27 04:59:47 +00:00
|
|
|
*buf = spdk_zmalloc(size, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
|
2016-07-20 18:16:23 +00:00
|
|
|
memset(*buf, pattern, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-05-16 20:25:03 +00:00
|
|
|
quick_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2017-05-16 20:25:03 +00:00
|
|
|
g_completion_success = success;
|
2016-07-20 18:16:23 +00:00
|
|
|
spdk_bdev_free_io(bdev_io);
|
2016-10-11 16:02:37 +00:00
|
|
|
wake_ut_thread();
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
static uint64_t
|
|
|
|
bdev_bytes_to_blocks(struct spdk_bdev *bdev, uint64_t bytes)
|
|
|
|
{
|
|
|
|
uint32_t block_size = spdk_bdev_get_block_size(bdev);
|
|
|
|
|
|
|
|
CU_ASSERT(bytes % block_size == 0);
|
|
|
|
return bytes / block_size;
|
|
|
|
}
|
|
|
|
|
2016-10-11 16:02:37 +00:00
|
|
|
static void
|
2019-09-13 22:30:47 +00:00
|
|
|
__blockdev_write(void *arg)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2019-09-13 22:30:47 +00:00
|
|
|
struct bdevio_request *req = arg;
|
2016-10-11 16:02:37 +00:00
|
|
|
struct io_target *target = req->target;
|
2017-06-05 18:39:38 +00:00
|
|
|
int rc;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2016-10-12 16:32:10 +00:00
|
|
|
if (req->iovcnt) {
|
2017-07-06 19:39:19 +00:00
|
|
|
rc = spdk_bdev_writev(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
|
2017-06-05 18:39:38 +00:00
|
|
|
req->data_len, quick_test_complete, NULL);
|
2016-10-12 16:32:10 +00:00
|
|
|
} else {
|
2017-07-06 19:39:19 +00:00
|
|
|
rc = spdk_bdev_write(target->bdev_desc, target->ch, req->buf, req->offset,
|
2017-06-05 18:39:38 +00:00
|
|
|
req->data_len, quick_test_complete, NULL);
|
2016-10-12 16:32:10 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 18:39:38 +00:00
|
|
|
if (rc) {
|
2017-05-16 20:25:03 +00:00
|
|
|
g_completion_success = false;
|
2016-10-11 16:02:37 +00:00
|
|
|
wake_ut_thread();
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
static void
|
2019-09-13 22:30:47 +00:00
|
|
|
__blockdev_write_zeroes(void *arg)
|
2017-07-28 22:34:24 +00:00
|
|
|
{
|
2019-09-13 22:30:47 +00:00
|
|
|
struct bdevio_request *req = arg;
|
2017-07-28 22:34:24 +00:00
|
|
|
struct io_target *target = req->target;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = spdk_bdev_write_zeroes(target->bdev_desc, target->ch, req->offset,
|
|
|
|
req->data_len, quick_test_complete, NULL);
|
|
|
|
if (rc) {
|
|
|
|
g_completion_success = false;
|
|
|
|
wake_ut_thread();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-23 19:16:55 +00:00
|
|
|
static void
|
|
|
|
__blockdev_compare_and_write(void *arg)
|
|
|
|
{
|
|
|
|
struct bdevio_request *req = arg;
|
|
|
|
struct io_target *target = req->target;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
2020-01-23 19:16:55 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = spdk_bdev_comparev_and_writev_blocks(target->bdev_desc, target->ch, req->iov, req->iovcnt,
|
2022-04-19 09:15:15 +00:00
|
|
|
req->fused_iov, req->fused_iovcnt, bdev_bytes_to_blocks(bdev, req->offset),
|
|
|
|
bdev_bytes_to_blocks(bdev, req->data_len), quick_test_complete, NULL);
|
2020-01-23 19:16:55 +00:00
|
|
|
|
|
|
|
if (rc) {
|
|
|
|
g_completion_success = false;
|
|
|
|
wake_ut_thread();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-12 16:32:10 +00:00
|
|
|
static void
|
|
|
|
sgl_chop_buffer(struct bdevio_request *req, int iov_len)
|
|
|
|
{
|
|
|
|
int data_len = req->data_len;
|
|
|
|
char *buf = req->buf;
|
|
|
|
|
|
|
|
req->iovcnt = 0;
|
2017-12-07 23:23:48 +00:00
|
|
|
if (!iov_len) {
|
2016-10-12 16:32:10 +00:00
|
|
|
return;
|
2017-12-07 23:23:48 +00:00
|
|
|
}
|
2016-10-12 16:32:10 +00:00
|
|
|
|
|
|
|
for (; data_len > 0 && req->iovcnt < BUFFER_IOVS; req->iovcnt++) {
|
2017-12-07 23:23:48 +00:00
|
|
|
if (data_len < iov_len) {
|
2016-10-12 16:32:10 +00:00
|
|
|
iov_len = data_len;
|
2017-12-07 23:23:48 +00:00
|
|
|
}
|
2016-10-12 16:32:10 +00:00
|
|
|
|
|
|
|
req->iov[req->iovcnt].iov_base = buf;
|
|
|
|
req->iov[req->iovcnt].iov_len = iov_len;
|
|
|
|
|
|
|
|
buf += iov_len;
|
|
|
|
data_len -= iov_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL_FATAL(data_len, 0);
|
|
|
|
}
|
|
|
|
|
2020-01-23 19:16:55 +00:00
|
|
|
static void
|
|
|
|
sgl_chop_fused_buffer(struct bdevio_request *req, int iov_len)
|
|
|
|
{
|
|
|
|
int data_len = req->data_len;
|
|
|
|
char *buf = req->fused_buf;
|
|
|
|
|
|
|
|
req->fused_iovcnt = 0;
|
|
|
|
if (!iov_len) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (; data_len > 0 && req->fused_iovcnt < BUFFER_IOVS; req->fused_iovcnt++) {
|
|
|
|
if (data_len < iov_len) {
|
|
|
|
iov_len = data_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
req->fused_iov[req->fused_iovcnt].iov_base = buf;
|
|
|
|
req->fused_iov[req->fused_iovcnt].iov_len = iov_len;
|
|
|
|
|
|
|
|
buf += iov_len;
|
|
|
|
data_len -= iov_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
CU_ASSERT_EQUAL_FATAL(data_len, 0);
|
|
|
|
}
|
|
|
|
|
2016-10-11 16:02:37 +00:00
|
|
|
static void
|
|
|
|
blockdev_write(struct io_target *target, char *tx_buf,
|
2016-10-12 16:32:10 +00:00
|
|
|
uint64_t offset, int data_len, int iov_len)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2016-10-11 16:02:37 +00:00
|
|
|
struct bdevio_request req;
|
|
|
|
|
|
|
|
req.target = target;
|
|
|
|
req.buf = tx_buf;
|
|
|
|
req.data_len = data_len;
|
|
|
|
req.offset = offset;
|
2016-10-12 16:32:10 +00:00
|
|
|
sgl_chop_buffer(&req, iov_len);
|
2016-10-11 16:02:37 +00:00
|
|
|
|
2017-05-16 20:25:03 +00:00
|
|
|
g_completion_success = false;
|
2016-10-11 16:02:37 +00:00
|
|
|
|
2019-09-13 22:30:47 +00:00
|
|
|
execute_spdk_function(__blockdev_write, &req);
|
2016-10-11 16:02:37 +00:00
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2020-01-23 19:16:55 +00:00
|
|
|
static void
|
|
|
|
_blockdev_compare_and_write(struct io_target *target, char *cmp_buf, char *write_buf,
|
|
|
|
uint64_t offset, int data_len, int iov_len)
|
|
|
|
{
|
|
|
|
struct bdevio_request req;
|
|
|
|
|
|
|
|
req.target = target;
|
|
|
|
req.buf = cmp_buf;
|
|
|
|
req.fused_buf = write_buf;
|
|
|
|
req.data_len = data_len;
|
|
|
|
req.offset = offset;
|
|
|
|
sgl_chop_buffer(&req, iov_len);
|
|
|
|
sgl_chop_fused_buffer(&req, iov_len);
|
|
|
|
|
|
|
|
g_completion_success = false;
|
|
|
|
|
|
|
|
execute_spdk_function(__blockdev_compare_and_write, &req);
|
|
|
|
}
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
static void
|
|
|
|
blockdev_write_zeroes(struct io_target *target, char *tx_buf,
|
|
|
|
uint64_t offset, int data_len)
|
|
|
|
{
|
|
|
|
struct bdevio_request req;
|
|
|
|
|
|
|
|
req.target = target;
|
|
|
|
req.buf = tx_buf;
|
|
|
|
req.data_len = data_len;
|
|
|
|
req.offset = offset;
|
|
|
|
|
|
|
|
g_completion_success = false;
|
|
|
|
|
2019-09-13 22:30:47 +00:00
|
|
|
execute_spdk_function(__blockdev_write_zeroes, &req);
|
2017-07-28 22:34:24 +00:00
|
|
|
}
|
|
|
|
|
2016-10-11 16:02:37 +00:00
|
|
|
static void
|
2019-09-13 22:30:47 +00:00
|
|
|
__blockdev_read(void *arg)
|
2016-10-11 16:02:37 +00:00
|
|
|
{
|
2019-09-13 22:30:47 +00:00
|
|
|
struct bdevio_request *req = arg;
|
2016-10-11 16:02:37 +00:00
|
|
|
struct io_target *target = req->target;
|
2017-06-05 18:39:38 +00:00
|
|
|
int rc;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2016-10-12 16:32:10 +00:00
|
|
|
if (req->iovcnt) {
|
2017-07-06 19:39:19 +00:00
|
|
|
rc = spdk_bdev_readv(target->bdev_desc, target->ch, req->iov, req->iovcnt, req->offset,
|
2017-06-05 18:39:38 +00:00
|
|
|
req->data_len, quick_test_complete, NULL);
|
2016-10-12 16:32:10 +00:00
|
|
|
} else {
|
2017-07-06 19:39:19 +00:00
|
|
|
rc = spdk_bdev_read(target->bdev_desc, target->ch, req->buf, req->offset,
|
2017-06-05 18:39:38 +00:00
|
|
|
req->data_len, quick_test_complete, NULL);
|
2016-10-12 16:32:10 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 18:39:38 +00:00
|
|
|
if (rc) {
|
2017-05-16 20:25:03 +00:00
|
|
|
g_completion_success = false;
|
2016-10-11 16:02:37 +00:00
|
|
|
wake_ut_thread();
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-11 16:02:37 +00:00
|
|
|
static void
|
|
|
|
blockdev_read(struct io_target *target, char *rx_buf,
|
2016-10-12 16:32:10 +00:00
|
|
|
uint64_t offset, int data_len, int iov_len)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2016-10-11 16:02:37 +00:00
|
|
|
struct bdevio_request req;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2016-10-11 16:02:37 +00:00
|
|
|
req.target = target;
|
|
|
|
req.buf = rx_buf;
|
|
|
|
req.data_len = data_len;
|
|
|
|
req.offset = offset;
|
2016-10-12 16:32:10 +00:00
|
|
|
req.iovcnt = 0;
|
|
|
|
sgl_chop_buffer(&req, iov_len);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-05-16 20:25:03 +00:00
|
|
|
g_completion_success = false;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2019-09-13 22:30:47 +00:00
|
|
|
execute_spdk_function(__blockdev_read, &req);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2016-09-14 18:03:58 +00:00
|
|
|
blockdev_write_read_data_match(char *rx_buf, char *tx_buf, int data_length)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2022-08-21 16:53:13 +00:00
|
|
|
return memcmp(rx_buf, tx_buf, data_length);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-10-12 16:32:10 +00:00
|
|
|
blockdev_write_read(uint32_t data_length, uint32_t iov_len, int pattern, uint64_t offset,
|
2017-07-28 22:34:24 +00:00
|
|
|
int expected_rc, bool write_zeroes)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
|
|
|
struct io_target *target;
|
|
|
|
char *tx_buf = NULL;
|
|
|
|
char *rx_buf = NULL;
|
|
|
|
int rc;
|
2022-09-20 09:01:49 +00:00
|
|
|
uint64_t write_offset = offset;
|
|
|
|
uint32_t write_data_len = data_length;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2019-05-21 14:17:19 +00:00
|
|
|
target = g_current_io_target;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2022-09-20 09:01:49 +00:00
|
|
|
if (spdk_bdev_get_write_unit_size(target->bdev) > 1 && expected_rc == 0) {
|
|
|
|
uint32_t write_unit_bytes;
|
|
|
|
|
|
|
|
write_unit_bytes = spdk_bdev_get_write_unit_size(target->bdev) *
|
|
|
|
spdk_bdev_get_block_size(target->bdev);
|
|
|
|
write_offset -= offset % write_unit_bytes;
|
|
|
|
write_data_len += (offset - write_offset);
|
|
|
|
|
|
|
|
if (write_data_len % write_unit_bytes) {
|
|
|
|
write_data_len += write_unit_bytes - write_data_len % write_unit_bytes;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-21 14:17:19 +00:00
|
|
|
if (!write_zeroes) {
|
2022-09-20 09:01:49 +00:00
|
|
|
initialize_buffer(&tx_buf, pattern, write_data_len);
|
2019-05-21 14:17:19 +00:00
|
|
|
initialize_buffer(&rx_buf, 0, data_length);
|
2017-07-28 22:34:24 +00:00
|
|
|
|
2022-09-20 09:01:49 +00:00
|
|
|
blockdev_write(target, tx_buf, write_offset, write_data_len, iov_len);
|
2019-05-21 14:17:19 +00:00
|
|
|
} else {
|
2022-09-20 09:01:49 +00:00
|
|
|
initialize_buffer(&tx_buf, 0, write_data_len);
|
2019-05-21 14:17:19 +00:00
|
|
|
initialize_buffer(&rx_buf, pattern, data_length);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2022-09-20 09:01:49 +00:00
|
|
|
blockdev_write_zeroes(target, tx_buf, write_offset, write_data_len);
|
2019-05-21 14:17:19 +00:00
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
|
|
|
|
|
2019-05-21 14:17:19 +00:00
|
|
|
if (expected_rc == 0) {
|
|
|
|
CU_ASSERT_EQUAL(g_completion_success, true);
|
|
|
|
} else {
|
|
|
|
CU_ASSERT_EQUAL(g_completion_success, false);
|
|
|
|
}
|
|
|
|
blockdev_read(target, rx_buf, offset, data_length, iov_len);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2019-05-21 14:17:19 +00:00
|
|
|
if (expected_rc == 0) {
|
|
|
|
CU_ASSERT_EQUAL(g_completion_success, true);
|
|
|
|
} else {
|
|
|
|
CU_ASSERT_EQUAL(g_completion_success, false);
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2019-05-21 14:17:19 +00:00
|
|
|
if (g_completion_success) {
|
2022-09-20 09:01:49 +00:00
|
|
|
rc = blockdev_write_read_data_match(rx_buf, tx_buf + (offset - write_offset), data_length);
|
2019-05-21 14:17:19 +00:00
|
|
|
/* Assert the write by comparing it with values read
|
|
|
|
* from each blockdev */
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
2022-08-21 16:53:13 +00:00
|
|
|
|
|
|
|
spdk_free(rx_buf);
|
|
|
|
spdk_free(tx_buf);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2020-01-23 19:16:55 +00:00
|
|
|
static void
|
|
|
|
blockdev_compare_and_write(uint32_t data_length, uint32_t iov_len, uint64_t offset)
|
|
|
|
{
|
|
|
|
struct io_target *target;
|
|
|
|
char *tx_buf = NULL;
|
|
|
|
char *write_buf = NULL;
|
|
|
|
char *rx_buf = NULL;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
target = g_current_io_target;
|
|
|
|
|
|
|
|
initialize_buffer(&tx_buf, 0xAA, data_length);
|
|
|
|
initialize_buffer(&rx_buf, 0, data_length);
|
|
|
|
initialize_buffer(&write_buf, 0xBB, data_length);
|
|
|
|
|
|
|
|
blockdev_write(target, tx_buf, offset, data_length, iov_len);
|
|
|
|
CU_ASSERT_EQUAL(g_completion_success, true);
|
|
|
|
|
|
|
|
_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
|
|
|
|
CU_ASSERT_EQUAL(g_completion_success, true);
|
|
|
|
|
|
|
|
_blockdev_compare_and_write(target, tx_buf, write_buf, offset, data_length, iov_len);
|
|
|
|
CU_ASSERT_EQUAL(g_completion_success, false);
|
|
|
|
|
|
|
|
blockdev_read(target, rx_buf, offset, data_length, iov_len);
|
|
|
|
CU_ASSERT_EQUAL(g_completion_success, true);
|
|
|
|
rc = blockdev_write_read_data_match(rx_buf, write_buf, data_length);
|
|
|
|
/* Assert the write by comparing it with values read
|
|
|
|
* from each blockdev */
|
|
|
|
CU_ASSERT_EQUAL(rc, 0);
|
2022-08-21 16:53:13 +00:00
|
|
|
|
|
|
|
spdk_free(rx_buf);
|
|
|
|
spdk_free(tx_buf);
|
|
|
|
spdk_free(write_buf);
|
2020-01-23 19:16:55 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 18:16:23 +00:00
|
|
|
static void
|
2022-04-19 09:15:15 +00:00
|
|
|
blockdev_write_read_block(void)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
|
|
|
uint32_t data_length;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = 1 block */
|
|
|
|
data_length = spdk_bdev_get_block_size(bdev);
|
2016-07-20 18:16:23 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
|
|
|
offset = 0;
|
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
2016-10-11 16:02:37 +00:00
|
|
|
* of write and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-04-19 09:15:15 +00:00
|
|
|
blockdev_write_zeroes_read_block(void)
|
2017-07-28 22:34:24 +00:00
|
|
|
{
|
|
|
|
uint32_t data_length;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
2017-07-28 22:34:24 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = 1 block */
|
|
|
|
data_length = spdk_bdev_get_block_size(bdev);
|
2017-07-28 22:34:24 +00:00
|
|
|
offset = 0;
|
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
|
|
|
* of write_zeroes and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
|
|
|
|
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This i/o will not have to split at the bdev layer.
|
|
|
|
*/
|
|
|
|
static void
|
2022-04-25 17:13:18 +00:00
|
|
|
blockdev_write_zeroes_read_no_split(void)
|
2017-07-28 22:34:24 +00:00
|
|
|
{
|
|
|
|
uint32_t data_length;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
2017-07-28 22:34:24 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = block size aligned ZERO_BUFFER_SIZE */
|
2022-04-25 17:13:18 +00:00
|
|
|
data_length = ZERO_BUFFER_SIZE; /* from bdev_internal.h */
|
2022-04-19 09:15:15 +00:00
|
|
|
data_length -= ZERO_BUFFER_SIZE % spdk_bdev_get_block_size(bdev);
|
2017-07-28 22:34:24 +00:00
|
|
|
offset = 0;
|
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
|
|
|
* of write_zeroes and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
|
|
|
|
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This i/o will have to split at the bdev layer if
|
|
|
|
* write-zeroes is not supported by the bdev.
|
|
|
|
*/
|
|
|
|
static void
|
2022-04-25 17:13:18 +00:00
|
|
|
blockdev_write_zeroes_read_split(void)
|
2017-07-28 22:34:24 +00:00
|
|
|
{
|
|
|
|
uint32_t data_length;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
2017-07-28 22:34:24 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = block size aligned 3 * ZERO_BUFFER_SIZE */
|
2022-04-25 17:13:18 +00:00
|
|
|
data_length = 3 * ZERO_BUFFER_SIZE; /* from bdev_internal.h */
|
2022-04-19 09:15:15 +00:00
|
|
|
data_length -= data_length % spdk_bdev_get_block_size(bdev);
|
2017-07-28 22:34:24 +00:00
|
|
|
offset = 0;
|
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
|
|
|
* of write_zeroes and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
|
|
|
|
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This i/o will have to split at the bdev layer if
|
|
|
|
* write-zeroes is not supported by the bdev. It also
|
|
|
|
* tests a write size that is not an even multiple of
|
|
|
|
* the bdev layer zero buffer size.
|
|
|
|
*/
|
|
|
|
static void
|
2022-04-25 17:13:18 +00:00
|
|
|
blockdev_write_zeroes_read_split_partial(void)
|
2017-07-28 22:34:24 +00:00
|
|
|
{
|
|
|
|
uint32_t data_length;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
|
|
|
uint32_t block_size = spdk_bdev_get_block_size(bdev);
|
2017-07-28 22:34:24 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = block size aligned 7 * ZERO_BUFFER_SIZE / 2 */
|
2022-04-25 17:13:18 +00:00
|
|
|
data_length = ZERO_BUFFER_SIZE * 7 / 2;
|
2022-04-19 09:15:15 +00:00
|
|
|
data_length -= data_length % block_size;
|
2017-07-28 22:34:24 +00:00
|
|
|
offset = 0;
|
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
|
|
|
* of write_zeroes and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
|
|
|
|
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 1);
|
2016-10-12 16:32:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-04-19 09:15:15 +00:00
|
|
|
blockdev_writev_readv_block(void)
|
2016-10-12 16:32:10 +00:00
|
|
|
{
|
|
|
|
uint32_t data_length, iov_len;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
2016-10-12 16:32:10 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = 1 block */
|
|
|
|
data_length = spdk_bdev_get_block_size(bdev);
|
|
|
|
iov_len = data_length;
|
2016-10-12 16:32:10 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
|
|
|
offset = 0;
|
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
|
|
|
* of write and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
|
2016-10-12 16:32:10 +00:00
|
|
|
}
|
|
|
|
|
2020-01-23 19:16:55 +00:00
|
|
|
static void
|
|
|
|
blockdev_comparev_and_writev(void)
|
|
|
|
{
|
|
|
|
uint32_t data_length, iov_len;
|
|
|
|
uint64_t offset;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
2020-01-23 19:16:55 +00:00
|
|
|
|
2022-08-21 16:32:37 +00:00
|
|
|
if (spdk_bdev_is_md_separate(bdev)) {
|
2022-08-17 15:31:01 +00:00
|
|
|
/* TODO: remove this check once bdev layer properly supports
|
2022-08-21 16:32:37 +00:00
|
|
|
* compare and write for bdevs with separate md.
|
2022-08-17 15:31:01 +00:00
|
|
|
*/
|
|
|
|
SPDK_ERRLOG("skipping comparev_and_writev on bdev %s since it has\n"
|
2022-08-21 16:32:37 +00:00
|
|
|
"separate metadata which is not supported yet.\n",
|
2022-08-17 15:31:01 +00:00
|
|
|
spdk_bdev_get_name(bdev));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = acwu size */
|
|
|
|
data_length = spdk_bdev_get_block_size(bdev) * spdk_bdev_get_acwu(bdev);
|
|
|
|
iov_len = data_length;
|
2020-01-23 19:16:55 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
|
|
|
offset = 0;
|
|
|
|
|
|
|
|
blockdev_compare_and_write(data_length, iov_len, offset);
|
|
|
|
}
|
|
|
|
|
2016-10-12 16:32:10 +00:00
|
|
|
static void
|
2022-04-19 09:15:15 +00:00
|
|
|
blockdev_writev_readv_30x1block(void)
|
2016-10-12 16:32:10 +00:00
|
|
|
{
|
|
|
|
uint32_t data_length, iov_len;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
|
|
|
uint32_t block_size = spdk_bdev_get_block_size(bdev);
|
2016-10-12 16:32:10 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = 30 * block size */
|
|
|
|
data_length = block_size * 30;
|
|
|
|
iov_len = block_size;
|
2016-10-12 16:32:10 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
|
|
|
offset = 0;
|
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
|
|
|
* of write and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-04-19 09:15:15 +00:00
|
|
|
blockdev_write_read_8blocks(void)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
|
|
|
uint32_t data_length;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = 8 * block size */
|
|
|
|
data_length = spdk_bdev_get_block_size(bdev) * 8;
|
2016-07-20 18:16:23 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
2022-04-19 09:15:15 +00:00
|
|
|
offset = data_length;
|
2016-07-20 18:16:23 +00:00
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
2016-10-11 16:02:37 +00:00
|
|
|
* of write and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
|
2016-10-12 16:32:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-04-19 09:15:15 +00:00
|
|
|
blockdev_writev_readv_8blocks(void)
|
2016-10-12 16:32:10 +00:00
|
|
|
{
|
|
|
|
uint32_t data_length, iov_len;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
2016-10-12 16:32:10 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = 8 * block size */
|
|
|
|
data_length = spdk_bdev_get_block_size(bdev) * 8;
|
|
|
|
iov_len = data_length;
|
2016-10-12 16:32:10 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
2022-04-19 09:15:15 +00:00
|
|
|
offset = data_length;
|
2016-10-12 16:32:10 +00:00
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
|
|
|
* of write and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
blockdev_write_read_size_gt_128k(void)
|
|
|
|
{
|
|
|
|
uint32_t data_length;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
|
|
|
uint32_t block_size = spdk_bdev_get_block_size(bdev);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = block size aligned 128K + 1 block */
|
|
|
|
data_length = 128 * 1024;
|
|
|
|
data_length -= data_length % block_size;
|
|
|
|
data_length += block_size;
|
2016-07-20 18:16:23 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
2022-04-19 09:15:15 +00:00
|
|
|
offset = block_size * 2;
|
2016-07-20 18:16:23 +00:00
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
2016-10-11 16:02:37 +00:00
|
|
|
* of write and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
|
2016-10-12 16:32:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
blockdev_writev_readv_size_gt_128k(void)
|
|
|
|
{
|
|
|
|
uint32_t data_length, iov_len;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
|
|
|
uint32_t block_size = spdk_bdev_get_block_size(bdev);
|
2016-10-12 16:32:10 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = block size aligned 128K + 1 block */
|
|
|
|
data_length = 128 * 1024;
|
|
|
|
data_length -= data_length % block_size;
|
|
|
|
data_length += block_size;
|
|
|
|
iov_len = data_length;
|
2016-10-12 16:32:10 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
2022-04-19 09:15:15 +00:00
|
|
|
offset = block_size * 2;
|
2016-10-12 16:32:10 +00:00
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
|
|
|
* of write and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
|
2016-10-12 16:32:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
blockdev_writev_readv_size_gt_128k_two_iov(void)
|
|
|
|
{
|
|
|
|
uint32_t data_length, iov_len;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
|
|
|
uint32_t block_size = spdk_bdev_get_block_size(bdev);
|
2016-10-12 16:32:10 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = block size aligned 128K + 1 block */
|
|
|
|
data_length = 128 * 1024;
|
|
|
|
data_length -= data_length % block_size;
|
|
|
|
iov_len = data_length;
|
|
|
|
data_length += block_size;
|
2016-10-12 16:32:10 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
2022-04-19 09:15:15 +00:00
|
|
|
offset = block_size * 2;
|
2016-10-12 16:32:10 +00:00
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
|
|
|
* of write and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
blockdev_write_read(data_length, iov_len, pattern, offset, expected_rc, 0);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
blockdev_write_read_invalid_size(void)
|
|
|
|
{
|
|
|
|
uint32_t data_length;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
|
|
|
uint32_t block_size = spdk_bdev_get_block_size(bdev);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
|
|
|
/* Data size is not a multiple of the block size */
|
2022-04-19 09:15:15 +00:00
|
|
|
data_length = block_size - 1;
|
2016-07-20 18:16:23 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
2022-04-19 09:15:15 +00:00
|
|
|
offset = block_size * 2;
|
2016-07-20 18:16:23 +00:00
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are invalid, hence the expected return value
|
|
|
|
* of write and read for all blockdevs is < 0 */
|
|
|
|
expected_rc = -1;
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
blockdev_write_read_offset_plus_nbytes_equals_bdev_size(void)
|
|
|
|
{
|
2022-09-20 08:39:04 +00:00
|
|
|
uint32_t data_length;
|
2016-07-20 18:16:23 +00:00
|
|
|
uint64_t offset;
|
2022-09-20 08:39:04 +00:00
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
|
|
|
uint32_t block_size = spdk_bdev_get_block_size(bdev);
|
2017-05-12 17:29:00 +00:00
|
|
|
|
2022-09-20 08:39:04 +00:00
|
|
|
data_length = block_size;
|
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
2019-05-21 14:17:19 +00:00
|
|
|
/* The start offset has been set to a marginal value
|
|
|
|
* such that offset + nbytes == Total size of
|
|
|
|
* blockdev. */
|
|
|
|
offset = ((spdk_bdev_get_num_blocks(bdev) - 1) * block_size);
|
2022-09-20 08:39:04 +00:00
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
|
|
|
* of write and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2022-09-20 08:39:04 +00:00
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
blockdev_write_read_offset_plus_nbytes_gt_bdev_size(void)
|
|
|
|
{
|
2022-09-20 08:39:04 +00:00
|
|
|
uint32_t data_length;
|
2016-07-20 18:16:23 +00:00
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
2022-09-20 08:39:04 +00:00
|
|
|
int expected_rc;
|
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
2022-04-19 09:15:15 +00:00
|
|
|
uint32_t block_size = spdk_bdev_get_block_size(bdev);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
|
|
|
/* Tests the overflow condition of the blockdevs. */
|
2022-04-19 09:15:15 +00:00
|
|
|
data_length = block_size * 2;
|
2016-07-20 18:16:23 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
|
|
|
pattern = 0xA3;
|
|
|
|
|
2019-05-21 14:17:19 +00:00
|
|
|
/* The start offset has been set to a valid value
|
|
|
|
* but offset + nbytes is greater than the Total size
|
|
|
|
* of the blockdev. The test should fail. */
|
2022-04-19 09:15:15 +00:00
|
|
|
offset = (spdk_bdev_get_num_blocks(bdev) - 1) * block_size;
|
2022-09-20 08:39:04 +00:00
|
|
|
/* Params are invalid, hence the expected return value
|
|
|
|
* of write and read for all blockdevs is < 0 */
|
|
|
|
expected_rc = -1;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2022-09-20 08:39:04 +00:00
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
blockdev_write_read_max_offset(void)
|
|
|
|
{
|
|
|
|
int data_length;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
data_length = spdk_bdev_get_block_size(bdev);
|
2016-07-20 18:16:23 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
|
|
|
/* The start offset has been set to UINT64_MAX such that
|
|
|
|
* adding nbytes wraps around and points to an invalid address. */
|
|
|
|
offset = UINT64_MAX;
|
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are invalid, hence the expected return value
|
|
|
|
* of write and read for all blockdevs is < 0 */
|
|
|
|
expected_rc = -1;
|
|
|
|
|
2017-07-28 22:34:24 +00:00
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-04-19 09:15:15 +00:00
|
|
|
blockdev_overlapped_write_read_2blocks(void)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
|
|
|
int data_length;
|
|
|
|
uint64_t offset;
|
|
|
|
int pattern;
|
|
|
|
int expected_rc;
|
2022-04-19 09:15:15 +00:00
|
|
|
struct io_target *target = g_current_io_target;
|
|
|
|
struct spdk_bdev *bdev = target->bdev;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Data size = 2 blocks */
|
|
|
|
data_length = spdk_bdev_get_block_size(bdev) * 2;
|
2016-07-20 18:16:23 +00:00
|
|
|
CU_ASSERT_TRUE(data_length < BUFFER_SIZE);
|
|
|
|
offset = 0;
|
|
|
|
pattern = 0xA3;
|
|
|
|
/* Params are valid, hence the expected return value
|
2016-10-11 16:02:37 +00:00
|
|
|
* of write and read for all blockdevs is 0. */
|
|
|
|
expected_rc = 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
/* Assert the write by comparing it with values read
|
|
|
|
* from the same offset for each blockdev */
|
2017-07-28 22:34:24 +00:00
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Overwrite the pattern 0xbb of size 2*block size on an address offset
|
|
|
|
* overlapping with the address written above and assert the new value in
|
2016-07-20 18:16:23 +00:00
|
|
|
* the overlapped address range */
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Populate 2*block size with value 0xBB */
|
2016-07-20 18:16:23 +00:00
|
|
|
pattern = 0xBB;
|
2022-04-19 09:15:15 +00:00
|
|
|
/* Offset = 1 block; Overlap offset addresses and write value 0xbb */
|
|
|
|
offset = spdk_bdev_get_block_size(bdev);
|
2016-07-20 18:16:23 +00:00
|
|
|
/* Assert the write by comparing it with values read
|
|
|
|
* from the overlapped offset for each blockdev */
|
2017-07-28 22:34:24 +00:00
|
|
|
blockdev_write_read(data_length, 0, pattern, offset, expected_rc, 0);
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
2016-08-02 17:54:09 +00:00
|
|
|
static void
|
2019-09-13 22:30:47 +00:00
|
|
|
__blockdev_reset(void *arg)
|
2016-08-02 17:54:09 +00:00
|
|
|
{
|
2019-09-13 22:30:47 +00:00
|
|
|
struct bdevio_request *req = arg;
|
2016-08-02 17:54:09 +00:00
|
|
|
struct io_target *target = req->target;
|
|
|
|
int rc;
|
|
|
|
|
2017-07-06 19:39:19 +00:00
|
|
|
rc = spdk_bdev_reset(target->bdev_desc, target->ch, quick_test_complete, NULL);
|
2016-08-02 17:54:09 +00:00
|
|
|
if (rc < 0) {
|
2017-05-16 20:25:03 +00:00
|
|
|
g_completion_success = false;
|
2016-08-02 17:54:09 +00:00
|
|
|
wake_ut_thread();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-05-24 13:19:31 +00:00
|
|
|
blockdev_test_reset(void)
|
2016-08-02 17:54:09 +00:00
|
|
|
{
|
|
|
|
struct bdevio_request req;
|
2019-05-24 13:19:31 +00:00
|
|
|
struct io_target *target;
|
2022-04-12 16:32:25 +00:00
|
|
|
bool reset_supported;
|
2016-08-02 17:54:09 +00:00
|
|
|
|
2019-05-24 13:19:31 +00:00
|
|
|
target = g_current_io_target;
|
2016-08-02 17:54:09 +00:00
|
|
|
req.target = target;
|
|
|
|
|
2022-04-12 16:32:25 +00:00
|
|
|
reset_supported = spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_RESET);
|
2017-05-16 20:25:03 +00:00
|
|
|
g_completion_success = false;
|
2016-08-02 17:54:09 +00:00
|
|
|
|
2019-09-13 22:30:47 +00:00
|
|
|
execute_spdk_function(__blockdev_reset, &req);
|
2016-08-02 17:54:09 +00:00
|
|
|
|
2022-04-12 16:32:25 +00:00
|
|
|
CU_ASSERT_EQUAL(g_completion_success, reset_supported);
|
2016-08-02 17:54:09 +00:00
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2019-05-02 22:51:19 +00:00
|
|
|
struct bdevio_passthrough_request {
|
|
|
|
struct spdk_nvme_cmd cmd;
|
|
|
|
void *buf;
|
|
|
|
uint32_t len;
|
|
|
|
struct io_target *target;
|
|
|
|
int sct;
|
|
|
|
int sc;
|
2019-10-07 16:08:55 +00:00
|
|
|
uint32_t cdw0;
|
2019-05-02 22:51:19 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
|
|
|
|
{
|
|
|
|
struct bdevio_passthrough_request *pt_req = arg;
|
|
|
|
|
2019-10-07 16:08:55 +00:00
|
|
|
spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc);
|
2019-05-02 22:51:19 +00:00
|
|
|
spdk_bdev_free_io(bdev_io);
|
|
|
|
wake_ut_thread();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-09-13 22:30:47 +00:00
|
|
|
__blockdev_nvme_passthru(void *arg)
|
2019-05-02 22:51:19 +00:00
|
|
|
{
|
2019-09-13 22:30:47 +00:00
|
|
|
struct bdevio_passthrough_request *pt_req = arg;
|
2019-05-02 22:51:19 +00:00
|
|
|
struct io_target *target = pt_req->target;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = spdk_bdev_nvme_io_passthru(target->bdev_desc, target->ch,
|
|
|
|
&pt_req->cmd, pt_req->buf, pt_req->len,
|
|
|
|
nvme_pt_test_complete, pt_req);
|
|
|
|
if (rc) {
|
|
|
|
wake_ut_thread();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-05-24 13:19:31 +00:00
|
|
|
blockdev_test_nvme_passthru_rw(void)
|
2019-05-02 22:51:19 +00:00
|
|
|
{
|
|
|
|
struct bdevio_passthrough_request pt_req;
|
|
|
|
void *write_buf, *read_buf;
|
2019-05-24 13:19:31 +00:00
|
|
|
struct io_target *target;
|
|
|
|
|
|
|
|
target = g_current_io_target;
|
2019-05-02 22:51:19 +00:00
|
|
|
|
|
|
|
if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&pt_req, 0, sizeof(pt_req));
|
|
|
|
pt_req.target = target;
|
|
|
|
pt_req.cmd.opc = SPDK_NVME_OPC_WRITE;
|
|
|
|
pt_req.cmd.nsid = 1;
|
|
|
|
*(uint64_t *)&pt_req.cmd.cdw10 = 4;
|
|
|
|
pt_req.cmd.cdw12 = 0;
|
|
|
|
|
|
|
|
pt_req.len = spdk_bdev_get_block_size(target->bdev);
|
2019-06-27 04:59:47 +00:00
|
|
|
write_buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
|
2019-05-02 22:51:19 +00:00
|
|
|
memset(write_buf, 0xA5, pt_req.len);
|
|
|
|
pt_req.buf = write_buf;
|
|
|
|
|
|
|
|
pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
|
|
|
|
pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
|
2019-09-13 22:30:47 +00:00
|
|
|
execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
|
2019-05-02 22:51:19 +00:00
|
|
|
CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
|
|
|
|
CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
|
|
|
|
|
|
|
|
pt_req.cmd.opc = SPDK_NVME_OPC_READ;
|
2019-06-27 04:59:47 +00:00
|
|
|
read_buf = spdk_zmalloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
|
2019-05-02 22:51:19 +00:00
|
|
|
pt_req.buf = read_buf;
|
|
|
|
|
|
|
|
pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
|
|
|
|
pt_req.sc = SPDK_NVME_SC_INVALID_FIELD;
|
2019-09-13 22:30:47 +00:00
|
|
|
execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
|
2019-05-02 22:51:19 +00:00
|
|
|
CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
|
|
|
|
CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
|
|
|
|
|
|
|
|
CU_ASSERT(!memcmp(read_buf, write_buf, pt_req.len));
|
2019-06-27 04:59:47 +00:00
|
|
|
spdk_free(read_buf);
|
|
|
|
spdk_free(write_buf);
|
2019-05-02 22:51:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-05-24 13:19:31 +00:00
|
|
|
blockdev_test_nvme_passthru_vendor_specific(void)
|
2019-05-02 22:51:19 +00:00
|
|
|
{
|
2019-05-24 13:19:31 +00:00
|
|
|
struct bdevio_passthrough_request pt_req;
|
|
|
|
struct io_target *target;
|
2019-05-02 22:51:19 +00:00
|
|
|
|
2019-05-21 14:17:19 +00:00
|
|
|
target = g_current_io_target;
|
2019-05-02 23:09:47 +00:00
|
|
|
|
|
|
|
if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_IO)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&pt_req, 0, sizeof(pt_req));
|
|
|
|
pt_req.target = target;
|
|
|
|
pt_req.cmd.opc = 0x7F; /* choose known invalid opcode */
|
|
|
|
pt_req.cmd.nsid = 1;
|
|
|
|
|
|
|
|
pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
|
|
|
|
pt_req.sc = SPDK_NVME_SC_SUCCESS;
|
2019-10-07 16:08:55 +00:00
|
|
|
pt_req.cdw0 = 0xbeef;
|
2019-09-13 22:30:47 +00:00
|
|
|
execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
|
2019-05-02 23:09:47 +00:00
|
|
|
CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
|
|
|
|
CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
|
2019-10-07 16:08:55 +00:00
|
|
|
CU_ASSERT(pt_req.cdw0 == 0x0);
|
2019-05-02 23:09:47 +00:00
|
|
|
}
|
|
|
|
|
2019-05-15 12:07:14 +00:00
|
|
|
static void
|
2019-09-13 22:30:47 +00:00
|
|
|
__blockdev_nvme_admin_passthru(void *arg)
|
2019-05-15 12:07:14 +00:00
|
|
|
{
|
2019-09-13 22:30:47 +00:00
|
|
|
struct bdevio_passthrough_request *pt_req = arg;
|
2019-05-15 12:07:14 +00:00
|
|
|
struct io_target *target = pt_req->target;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = spdk_bdev_nvme_admin_passthru(target->bdev_desc, target->ch,
|
|
|
|
&pt_req->cmd, pt_req->buf, pt_req->len,
|
|
|
|
nvme_pt_test_complete, pt_req);
|
|
|
|
if (rc) {
|
|
|
|
wake_ut_thread();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
blockdev_test_nvme_admin_passthru(void)
|
|
|
|
{
|
|
|
|
struct io_target *target;
|
|
|
|
struct bdevio_passthrough_request pt_req;
|
|
|
|
|
|
|
|
target = g_current_io_target;
|
|
|
|
|
|
|
|
if (!spdk_bdev_io_type_supported(target->bdev, SPDK_BDEV_IO_TYPE_NVME_ADMIN)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&pt_req, 0, sizeof(pt_req));
|
|
|
|
pt_req.target = target;
|
|
|
|
pt_req.cmd.opc = SPDK_NVME_OPC_IDENTIFY;
|
|
|
|
pt_req.cmd.nsid = 0;
|
|
|
|
*(uint64_t *)&pt_req.cmd.cdw10 = SPDK_NVME_IDENTIFY_CTRLR;
|
|
|
|
|
|
|
|
pt_req.len = sizeof(struct spdk_nvme_ctrlr_data);
|
2019-06-27 04:59:47 +00:00
|
|
|
pt_req.buf = spdk_malloc(pt_req.len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
|
2019-05-15 12:07:14 +00:00
|
|
|
|
|
|
|
pt_req.sct = SPDK_NVME_SCT_GENERIC;
|
|
|
|
pt_req.sc = SPDK_NVME_SC_SUCCESS;
|
2019-09-13 22:30:47 +00:00
|
|
|
execute_spdk_function(__blockdev_nvme_admin_passthru, &pt_req);
|
2019-05-15 12:07:14 +00:00
|
|
|
CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
|
|
|
|
CU_ASSERT(pt_req.sc == SPDK_NVME_SC_SUCCESS);
|
|
|
|
}
|
|
|
|
|
2016-10-11 16:02:37 +00:00
|
|
|
static void
|
2019-09-13 22:30:47 +00:00
|
|
|
__stop_init_thread(void *arg)
|
2016-07-20 18:16:23 +00:00
|
|
|
{
|
2019-09-13 22:30:47 +00:00
|
|
|
unsigned num_failures = g_num_failures;
|
|
|
|
struct spdk_jsonrpc_request *request = arg;
|
|
|
|
|
|
|
|
g_num_failures = 0;
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2017-10-16 15:14:36 +00:00
|
|
|
bdevio_cleanup_targets();
|
2020-10-09 21:20:52 +00:00
|
|
|
if (g_wait_for_tests && !g_shutdown) {
|
2019-05-23 07:53:44 +00:00
|
|
|
/* Do not stop the app yet, wait for another RPC */
|
|
|
|
rpc_perform_tests_cb(num_failures, request);
|
|
|
|
return;
|
|
|
|
}
|
2017-10-16 15:14:36 +00:00
|
|
|
spdk_app_stop(num_failures);
|
|
|
|
}
|
2017-07-07 20:15:22 +00:00
|
|
|
|
2017-10-16 15:14:36 +00:00
|
|
|
static void
|
2019-05-23 07:53:44 +00:00
|
|
|
stop_init_thread(unsigned num_failures, struct spdk_jsonrpc_request *request)
|
2017-10-16 15:14:36 +00:00
|
|
|
{
|
2019-09-13 22:30:47 +00:00
|
|
|
g_num_failures = num_failures;
|
2017-01-05 19:44:24 +00:00
|
|
|
|
2019-09-13 22:30:47 +00:00
|
|
|
spdk_thread_send_msg(g_thread_init, __stop_init_thread, request);
|
2017-10-16 15:14:36 +00:00
|
|
|
}
|
|
|
|
|
2019-05-21 14:17:19 +00:00
|
|
|
static int
|
|
|
|
suite_init(void)
|
2017-10-16 15:14:36 +00:00
|
|
|
{
|
2019-05-21 14:17:19 +00:00
|
|
|
if (g_current_io_target == NULL) {
|
|
|
|
g_current_io_target = g_io_targets;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
2019-05-21 14:17:19 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
suite_fini(void)
|
|
|
|
{
|
|
|
|
g_current_io_target = g_current_io_target->next;
|
|
|
|
return 0;
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2019-05-21 14:17:19 +00:00
|
|
|
#define SUITE_NAME_MAX 64
|
|
|
|
|
|
|
|
static int
|
|
|
|
__setup_ut_on_single_target(struct io_target *target)
|
|
|
|
{
|
|
|
|
unsigned rc = 0;
|
|
|
|
CU_pSuite suite = NULL;
|
|
|
|
char name[SUITE_NAME_MAX];
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "bdevio tests on: %s", spdk_bdev_get_name(target->bdev));
|
|
|
|
suite = CU_add_suite(name, suite_init, suite_fini);
|
2016-07-20 18:16:23 +00:00
|
|
|
if (suite == NULL) {
|
|
|
|
CU_cleanup_registry();
|
2019-05-21 14:17:19 +00:00
|
|
|
rc = CU_get_error();
|
|
|
|
return -rc;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (
|
2022-04-19 09:15:15 +00:00
|
|
|
CU_add_test(suite, "blockdev write read block",
|
|
|
|
blockdev_write_read_block) == NULL
|
|
|
|
|| CU_add_test(suite, "blockdev write zeroes read block",
|
|
|
|
blockdev_write_zeroes_read_block) == NULL
|
2022-04-25 17:13:18 +00:00
|
|
|
|| CU_add_test(suite, "blockdev write zeroes read no split",
|
|
|
|
blockdev_write_zeroes_read_no_split) == NULL
|
2022-04-19 09:15:15 +00:00
|
|
|
|| CU_add_test(suite, "blockdev write zeroes read split",
|
|
|
|
blockdev_write_zeroes_read_split) == NULL
|
2022-04-25 17:13:18 +00:00
|
|
|
|| CU_add_test(suite, "blockdev write zeroes read split partial",
|
|
|
|
blockdev_write_zeroes_read_split_partial) == NULL
|
2019-05-10 00:03:15 +00:00
|
|
|
|| CU_add_test(suite, "blockdev reset",
|
|
|
|
blockdev_test_reset) == NULL
|
2022-04-19 09:15:15 +00:00
|
|
|
|| CU_add_test(suite, "blockdev write read 8 blocks",
|
|
|
|
blockdev_write_read_8blocks) == NULL
|
2016-07-20 18:16:23 +00:00
|
|
|
|| CU_add_test(suite, "blockdev write read size > 128k",
|
|
|
|
blockdev_write_read_size_gt_128k) == NULL
|
|
|
|
|| CU_add_test(suite, "blockdev write read invalid size",
|
|
|
|
blockdev_write_read_invalid_size) == NULL
|
|
|
|
|| CU_add_test(suite, "blockdev write read offset + nbytes == size of blockdev",
|
|
|
|
blockdev_write_read_offset_plus_nbytes_equals_bdev_size) == NULL
|
|
|
|
|| CU_add_test(suite, "blockdev write read offset + nbytes > size of blockdev",
|
|
|
|
blockdev_write_read_offset_plus_nbytes_gt_bdev_size) == NULL
|
|
|
|
|| CU_add_test(suite, "blockdev write read max offset",
|
|
|
|
blockdev_write_read_max_offset) == NULL
|
2022-04-19 09:15:15 +00:00
|
|
|
|| CU_add_test(suite, "blockdev write read 2 blocks on overlapped address offset",
|
|
|
|
blockdev_overlapped_write_read_2blocks) == NULL
|
|
|
|
|| CU_add_test(suite, "blockdev writev readv 8 blocks",
|
|
|
|
blockdev_writev_readv_8blocks) == NULL
|
|
|
|
|| CU_add_test(suite, "blockdev writev readv 30 x 1block",
|
|
|
|
blockdev_writev_readv_30x1block) == NULL
|
|
|
|
|| CU_add_test(suite, "blockdev writev readv block",
|
|
|
|
blockdev_writev_readv_block) == NULL
|
2016-10-12 16:32:10 +00:00
|
|
|
|| CU_add_test(suite, "blockdev writev readv size > 128k",
|
|
|
|
blockdev_writev_readv_size_gt_128k) == NULL
|
|
|
|
|| CU_add_test(suite, "blockdev writev readv size > 128k in two iovs",
|
|
|
|
blockdev_writev_readv_size_gt_128k_two_iov) == NULL
|
2022-04-19 09:15:15 +00:00
|
|
|
|| CU_add_test(suite, "blockdev comparev and writev",
|
|
|
|
blockdev_comparev_and_writev) == NULL
|
2019-05-02 22:51:19 +00:00
|
|
|
|| CU_add_test(suite, "blockdev nvme passthru rw",
|
|
|
|
blockdev_test_nvme_passthru_rw) == NULL
|
2019-05-02 23:09:47 +00:00
|
|
|
|| CU_add_test(suite, "blockdev nvme passthru vendor specific",
|
|
|
|
blockdev_test_nvme_passthru_vendor_specific) == NULL
|
2019-05-15 12:07:14 +00:00
|
|
|
|| CU_add_test(suite, "blockdev nvme admin passthru",
|
|
|
|
blockdev_test_nvme_admin_passthru) == NULL
|
2016-07-20 18:16:23 +00:00
|
|
|
) {
|
|
|
|
CU_cleanup_registry();
|
2019-05-21 14:17:19 +00:00
|
|
|
rc = CU_get_error();
|
|
|
|
return -rc;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|
2019-05-21 14:17:19 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
|
2019-05-21 14:17:19 +00:00
|
|
|
static void
|
2019-09-13 22:30:47 +00:00
|
|
|
__run_ut_thread(void *arg)
|
2019-05-21 14:17:19 +00:00
|
|
|
{
|
2019-09-13 22:30:47 +00:00
|
|
|
struct spdk_jsonrpc_request *request = arg;
|
2019-05-21 14:17:19 +00:00
|
|
|
int rc = 0;
|
|
|
|
struct io_target *target;
|
|
|
|
unsigned num_failures;
|
|
|
|
|
|
|
|
if (CU_initialize_registry() != CUE_SUCCESS) {
|
|
|
|
/* CUnit error, probably won't recover */
|
|
|
|
rc = CU_get_error();
|
2019-05-23 07:53:44 +00:00
|
|
|
stop_init_thread(-rc, request);
|
2019-05-21 14:17:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
target = g_io_targets;
|
|
|
|
while (target != NULL) {
|
|
|
|
rc = __setup_ut_on_single_target(target);
|
|
|
|
if (rc < 0) {
|
|
|
|
/* CUnit error, probably won't recover */
|
2019-05-23 07:53:44 +00:00
|
|
|
stop_init_thread(-rc, request);
|
2019-05-21 14:17:19 +00:00
|
|
|
}
|
|
|
|
target = target->next;
|
|
|
|
}
|
2016-07-20 18:16:23 +00:00
|
|
|
CU_basic_set_mode(CU_BRM_VERBOSE);
|
|
|
|
CU_basic_run_tests();
|
|
|
|
num_failures = CU_get_number_of_failures();
|
|
|
|
CU_cleanup_registry();
|
2019-05-21 14:17:19 +00:00
|
|
|
|
2019-05-23 07:53:44 +00:00
|
|
|
stop_init_thread(num_failures, request);
|
2017-10-16 15:14:36 +00:00
|
|
|
}
|
|
|
|
|
2019-09-13 22:30:47 +00:00
|
|
|
static void
|
|
|
|
__construct_targets(void *arg)
|
|
|
|
{
|
|
|
|
if (bdevio_construct_targets() < 0) {
|
|
|
|
spdk_app_stop(-1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_thread_send_msg(g_thread_ut, __run_ut_thread, NULL);
|
|
|
|
}
|
|
|
|
|
2017-10-16 15:14:36 +00:00
|
|
|
static void
|
2019-03-04 20:52:59 +00:00
|
|
|
test_main(void *arg1)
|
2017-10-16 15:14:36 +00:00
|
|
|
{
|
2020-09-18 01:22:49 +00:00
|
|
|
struct spdk_cpuset tmpmask = {};
|
2021-02-03 09:31:29 +00:00
|
|
|
uint32_t i;
|
2017-10-16 15:14:36 +00:00
|
|
|
|
|
|
|
pthread_mutex_init(&g_test_mutex, NULL);
|
|
|
|
pthread_cond_init(&g_test_cond, NULL);
|
|
|
|
|
2021-02-03 09:31:29 +00:00
|
|
|
/* This test runs specifically on at least three cores.
|
|
|
|
* g_thread_init is the app_thread on main core from event framework.
|
|
|
|
* Next two are only for the tests and should always be on separate CPU cores. */
|
|
|
|
if (spdk_env_get_core_count() < 3) {
|
2018-08-09 10:48:03 +00:00
|
|
|
spdk_app_stop(-1);
|
2019-09-13 22:30:47 +00:00
|
|
|
return;
|
2018-08-09 10:48:03 +00:00
|
|
|
}
|
|
|
|
|
2021-02-03 09:31:29 +00:00
|
|
|
SPDK_ENV_FOREACH_CORE(i) {
|
|
|
|
if (i == spdk_env_get_current_core()) {
|
|
|
|
g_thread_init = spdk_get_thread();
|
|
|
|
continue;
|
2019-09-13 22:30:47 +00:00
|
|
|
}
|
2021-02-03 09:31:29 +00:00
|
|
|
spdk_cpuset_zero(&tmpmask);
|
|
|
|
spdk_cpuset_set_cpu(&tmpmask, i, true);
|
|
|
|
if (g_thread_ut == NULL) {
|
|
|
|
g_thread_ut = spdk_thread_create("ut_thread", &tmpmask);
|
|
|
|
} else if (g_thread_io == NULL) {
|
2019-12-20 08:09:45 +00:00
|
|
|
g_thread_io = spdk_thread_create("io_thread", &tmpmask);
|
2019-09-13 22:30:47 +00:00
|
|
|
}
|
|
|
|
|
2017-10-16 15:14:36 +00:00
|
|
|
}
|
|
|
|
|
2019-09-13 22:30:47 +00:00
|
|
|
if (g_wait_for_tests) {
|
|
|
|
/* Do not perform any tests until RPC is received */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
spdk_thread_send_msg(g_thread_init, __construct_targets, NULL);
|
2016-10-11 16:02:37 +00:00
|
|
|
}
|
|
|
|
|
2018-08-09 10:56:48 +00:00
|
|
|
static void
|
|
|
|
bdevio_usage(void)
|
|
|
|
{
|
2019-05-23 07:53:44 +00:00
|
|
|
printf(" -w start bdevio app and wait for RPC to start the tests\n");
|
2018-08-09 10:56:48 +00:00
|
|
|
}
|
|
|
|
|
2019-01-08 19:40:06 +00:00
|
|
|
static int
|
2018-08-09 10:56:48 +00:00
|
|
|
bdevio_parse_arg(int ch, char *arg)
|
|
|
|
{
|
2019-05-23 07:53:44 +00:00
|
|
|
switch (ch) {
|
|
|
|
case 'w':
|
|
|
|
g_wait_for_tests = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2019-01-08 19:40:06 +00:00
|
|
|
return 0;
|
2018-08-09 10:56:48 +00:00
|
|
|
}
|
|
|
|
|
2019-05-23 07:53:44 +00:00
|
|
|
struct rpc_perform_tests {
|
|
|
|
char *name;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
free_rpc_perform_tests(struct rpc_perform_tests *r)
|
|
|
|
{
|
|
|
|
free(r->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct spdk_json_object_decoder rpc_perform_tests_decoders[] = {
|
|
|
|
{"name", offsetof(struct rpc_perform_tests, name), spdk_json_decode_string, true},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
rpc_perform_tests_cb(unsigned num_failures, struct spdk_jsonrpc_request *request)
|
|
|
|
{
|
|
|
|
struct spdk_json_write_ctx *w;
|
|
|
|
|
2019-06-11 12:37:43 +00:00
|
|
|
if (num_failures == 0) {
|
|
|
|
w = spdk_jsonrpc_begin_result(request);
|
|
|
|
spdk_json_write_uint32(w, num_failures);
|
|
|
|
spdk_jsonrpc_end_result(request, w);
|
|
|
|
} else {
|
|
|
|
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
|
|
|
"%d test cases failed", num_failures);
|
2019-05-23 07:53:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rpc_perform_tests(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
|
|
|
|
{
|
|
|
|
struct rpc_perform_tests req = {NULL};
|
|
|
|
struct spdk_bdev *bdev;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (params && spdk_json_decode_object(params, rpc_perform_tests_decoders,
|
|
|
|
SPDK_COUNTOF(rpc_perform_tests_decoders),
|
|
|
|
&req)) {
|
|
|
|
SPDK_ERRLOG("spdk_json_decode_object failed\n");
|
|
|
|
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, "Invalid parameters");
|
|
|
|
goto invalid;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req.name) {
|
|
|
|
bdev = spdk_bdev_get_by_name(req.name);
|
|
|
|
if (bdev == NULL) {
|
|
|
|
SPDK_ERRLOG("Bdev '%s' does not exist\n", req.name);
|
|
|
|
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
|
|
|
"Bdev '%s' does not exist: %s",
|
|
|
|
req.name, spdk_strerror(ENODEV));
|
|
|
|
goto invalid;
|
|
|
|
}
|
|
|
|
rc = bdevio_construct_target(bdev);
|
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("Could not construct target for bdev '%s'\n", spdk_bdev_get_name(bdev));
|
|
|
|
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
|
|
|
"Could not construct target for bdev '%s': %s",
|
|
|
|
spdk_bdev_get_name(bdev), spdk_strerror(-rc));
|
|
|
|
goto invalid;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rc = bdevio_construct_targets();
|
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("Could not construct targets for all bdevs\n");
|
|
|
|
spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
|
|
|
|
"Could not construct targets for all bdevs: %s",
|
|
|
|
spdk_strerror(-rc));
|
|
|
|
goto invalid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free_rpc_perform_tests(&req);
|
|
|
|
|
2019-09-13 22:30:47 +00:00
|
|
|
spdk_thread_send_msg(g_thread_ut, __run_ut_thread, request);
|
2019-05-23 07:53:44 +00:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
invalid:
|
|
|
|
free_rpc_perform_tests(&req);
|
|
|
|
}
|
|
|
|
SPDK_RPC_REGISTER("perform_tests", rpc_perform_tests, SPDK_RPC_RUNTIME)
|
|
|
|
|
2020-10-09 21:20:52 +00:00
|
|
|
static void
|
|
|
|
spdk_bdevio_shutdown_cb(void)
|
|
|
|
{
|
|
|
|
g_shutdown = true;
|
|
|
|
spdk_thread_send_msg(g_thread_init, __stop_init_thread, NULL);
|
|
|
|
}
|
|
|
|
|
2016-10-11 16:02:37 +00:00
|
|
|
int
|
|
|
|
main(int argc, char **argv)
|
|
|
|
{
|
2018-08-17 14:39:25 +00:00
|
|
|
int rc;
|
2017-06-06 06:31:47 +00:00
|
|
|
struct spdk_app_opts opts = {};
|
2016-10-11 16:02:37 +00:00
|
|
|
|
2020-11-30 11:38:37 +00:00
|
|
|
spdk_app_opts_init(&opts, sizeof(opts));
|
2019-02-21 13:19:25 +00:00
|
|
|
opts.name = "bdevio";
|
2018-08-09 10:56:48 +00:00
|
|
|
opts.reactor_mask = "0x7";
|
2020-10-09 21:20:52 +00:00
|
|
|
opts.shutdown_cb = spdk_bdevio_shutdown_cb;
|
2018-08-09 10:56:48 +00:00
|
|
|
|
2019-05-23 07:53:44 +00:00
|
|
|
if ((rc = spdk_app_parse_args(argc, argv, &opts, "w", NULL,
|
2018-08-17 14:39:25 +00:00
|
|
|
bdevio_parse_arg, bdevio_usage)) !=
|
|
|
|
SPDK_APP_PARSE_ARGS_SUCCESS) {
|
|
|
|
return rc;
|
|
|
|
}
|
2016-10-11 16:02:37 +00:00
|
|
|
|
2019-02-28 21:42:07 +00:00
|
|
|
rc = spdk_app_start(&opts, test_main, NULL);
|
2016-10-11 16:02:37 +00:00
|
|
|
spdk_app_fini();
|
|
|
|
|
2018-08-17 14:39:25 +00:00
|
|
|
return rc;
|
2016-07-20 18:16:23 +00:00
|
|
|
}
|