ocssd: vector commands

This patch adds support for following Open-Channel vector commands:
* Vector Chunk Reset
* Vector Chunk Write
* Vector Chunk Read
* Vector Chunk Copy

Implementation is consistent with Open-Channel specification (rev. 2.0)

This patch provides unit tests for new public NVMe
Open-Channel namespace commands.

Change-Id: Ic71be9357c61c5de82ca672e82a71aa933bd1875
Signed-off-by: Jakub Radtke <jakub.radtke@intel.com>
Reviewed-on: https://review.gerrithub.io/414969
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Xiaodong Liu <xiaodong.liu@intel.com>
Reviewed-by: Young Tack Jin <youngtack.jin@circuitblvd.com>
This commit is contained in:
Jakub Radtke 2018-06-07 11:16:52 +02:00 committed by Jim Harris
parent 160850f0a6
commit c182da27b5
9 changed files with 1092 additions and 2 deletions

View File

@ -73,6 +73,151 @@ int spdk_nvme_ocssd_ctrlr_cmd_geometry(struct spdk_nvme_ctrlr *ctrlr, uint32_t n
void *payload, uint32_t payload_size,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Submits a vector reset command to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the command
* \param qpair I/O queue pair to submit the request
* \param lba_list an array of LBAs for processing.
* LBAs must correspond to the start of chunks to reset.
* Must be allocated through spdk_dma_malloc() or its variants
* \param num_lbas number of LBAs stored in lba_list
* \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function
*
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*/
int spdk_nvme_ocssd_ns_cmd_vector_reset(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
uint64_t *lba_list, uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Submits a vector write command to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the command
* \param qpair I/O queue pair to submit the request
* \param buffer virtual address pointer to the data payload
* \param lba_list an array of LBAs for processing.
* Must be allocated through spdk_dma_malloc() or its variants
* \param num_lbas number of LBAs stored in lba_list
* \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function
* \param io_flags set flags, defined by the SPDK_OCSSD_IO_FLAGS_* entries
* in spdk/nvme_ocssd_spec.h, for this I/O.
*
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*/
int spdk_nvme_ocssd_ns_cmd_vector_write(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
void *buffer,
uint64_t *lba_list, uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags);
/**
* \brief Submits a vector write command to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the command
* \param qpair I/O queue pair to submit the request
* \param buffer virtual address pointer to the data payload
* \param metadata virtual address pointer to the metadata payload, the length
* of metadata is specified by spdk_nvme_ns_get_md_size()
* \param lba_list an array of LBAs for processing.
* Must be allocated through spdk_dma_malloc() or its variants
* \param num_lbas number of LBAs stored in lba_list
* \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function
* \param io_flags set flags, defined by the SPDK_OCSSD_IO_FLAGS_* entries
* in spdk/nvme_ocssd_spec.h, for this I/O.
*
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*/
int spdk_nvme_ocssd_ns_cmd_vector_write_with_md(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
void *buffer, void *metadata,
uint64_t *lba_list, uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags);
/**
* \brief Submits a vector read command to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the command
* \param qpair I/O queue pair to submit the request
* \param buffer virtual address pointer to the data payload
* \param lba_list an array of LBAs for processing.
* Must be allocated through spdk_dma_malloc() or its variants
* \param num_lbas number of LBAs stored in lba_list
* \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function
* \param io_flags set flags, defined by the SPDK_OCSSD_IO_FLAGS_* entries
* in spdk/nvme_ocssd_spec.h, for this I/O.
*
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*/
int spdk_nvme_ocssd_ns_cmd_vector_read(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
void *buffer,
uint64_t *lba_list, uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags);
/**
* \brief Submits a vector read command to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the command
* \param qpair I/O queue pair to submit the request
* \param buffer virtual address pointer to the data payload
* \param metadata virtual address pointer to the metadata payload, the length
* of metadata is specified by spdk_nvme_ns_get_md_size()
* \param lba_list an array of LBAs for processing.
* Must be allocated through spdk_dma_malloc() or its variants
* \param num_lbas number of LBAs stored in lba_list
* \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function
* \param io_flags set flags, defined by the SPDK_OCSSD_IO_FLAGS_* entries
* in spdk/nvme_ocssd_spec.h, for this I/O.
*
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*/
int spdk_nvme_ocssd_ns_cmd_vector_read_with_md(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
void *buffer, void *metadata,
uint64_t *lba_list, uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags);
/**
* \brief Submits a vector copy command to the specified NVMe namespace.
*
* \param ns NVMe namespace to submit the command
* \param qpair I/O queue pair to submit the request
* \param dst_lba_list an array of destination LBAs for processing.
* Must be allocated through spdk_dma_malloc() or its variants
* \param src_lba_list an array of source LBAs for processing.
* Must be allocated through spdk_dma_malloc() or its variants
* \param num_lbas number of LBAs stored in src_lba_list and dst_lba_list
* \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function
* \param io_flags set flags, defined by the SPDK_OCSSD_IO_FLAGS_* entries
* in spdk/nvme_ocssd_spec.h, for this I/O.
*
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*/
int spdk_nvme_ocssd_ns_cmd_vector_copy(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
uint64_t *dst_lba_list, uint64_t *src_lba_list,
uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags);
#ifdef __cplusplus
}
#endif

View File

@ -47,6 +47,9 @@ extern "C" {
#include "spdk/assert.h"
/** A maximum number of LBAs that can be issued by vector I/O commands */
#define SPDK_NVME_OCSSD_MAX_LBAL_ENTRIES 64
struct spdk_ocssd_dev_lba_fmt {
/** Contiguous number of bits assigned to Group addressing */
uint8_t grp_len;

View File

@ -34,7 +34,8 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
C_SRCS = nvme_ctrlr_cmd.c nvme_ctrlr.c nvme_ns_cmd.c nvme_ns.c nvme_pcie.c nvme_qpair.c nvme.c nvme_quirks.c nvme_transport.c nvme_uevent.c nvme_ctrlr_ocssd_cmd.c
C_SRCS = nvme_ctrlr_cmd.c nvme_ctrlr.c nvme_ns_cmd.c nvme_ns.c nvme_pcie.c nvme_qpair.c nvme.c nvme_quirks.c nvme_transport.c nvme_uevent.c nvme_ctrlr_ocssd_cmd.c \
nvme_ns_ocssd_cmd.c
C_SRCS-$(CONFIG_RDMA) += nvme_rdma.c
LIBNAME = nvme

View File

@ -0,0 +1,227 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "spdk/nvme_ocssd.h"
#include "nvme_internal.h"
int
spdk_nvme_ocssd_ns_cmd_vector_reset(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
uint64_t *lba_list, uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
if (!lba_list || (num_lbas == 0) ||
(num_lbas > SPDK_NVME_OCSSD_MAX_LBAL_ENTRIES)) {
return -EINVAL;
}
req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
if (req == NULL) {
return -ENOMEM;
}
cmd = &req->cmd;
cmd->opc = SPDK_OCSSD_OPC_VECTOR_RESET;
cmd->nsid = ns->id;
/*
* Dword 10 and 11 store a pointer to the list of logical block addresses.
* If there is a single entry in the LBA list, the logical block
* address should be stored instead.
*/
if (num_lbas == 1) {
*(uint64_t *)&cmd->cdw10 = *lba_list;
} else {
*(uint64_t *)&cmd->cdw10 = spdk_vtophys(lba_list);
}
cmd->cdw12 = num_lbas - 1;
return nvme_qpair_submit_request(qpair, req);
}
static int
_nvme_ocssd_ns_cmd_vector_rw_with_md(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
void *buffer, void *metadata,
uint64_t *lba_list, uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
enum spdk_ocssd_io_opcode opc,
uint32_t io_flags)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
struct nvme_payload payload;
uint32_t valid_flags = SPDK_OCSSD_IO_FLAGS_LIMITED_RETRY;
if (io_flags & ~valid_flags) {
return -EINVAL;
}
if (!buffer || !lba_list || (num_lbas == 0) ||
(num_lbas > SPDK_NVME_OCSSD_MAX_LBAL_ENTRIES)) {
return -EINVAL;
}
payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
req = nvme_allocate_request(qpair, &payload, num_lbas * ns->sector_size, cb_fn, cb_arg);
if (req == NULL) {
return -ENOMEM;
}
cmd = &req->cmd;
cmd->opc = opc;
cmd->nsid = ns->id;
/*
* Dword 10 and 11 store a pointer to the list of logical block addresses.
* If there is a single entry in the LBA list, the logical block
* address should be stored instead.
*/
if (num_lbas == 1) {
*(uint64_t *)&cmd->cdw10 = *lba_list;
} else {
*(uint64_t *)&cmd->cdw10 = spdk_vtophys(lba_list);
}
cmd->cdw12 = num_lbas - 1;
cmd->cdw12 |= io_flags;
return nvme_qpair_submit_request(qpair, req);
}
int
spdk_nvme_ocssd_ns_cmd_vector_write_with_md(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
void *buffer, void *metadata,
uint64_t *lba_list, uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags)
{
return _nvme_ocssd_ns_cmd_vector_rw_with_md(ns, qpair, buffer, metadata, lba_list,
num_lbas, cb_fn, cb_arg, SPDK_OCSSD_OPC_VECTOR_WRITE, io_flags);
}
int
spdk_nvme_ocssd_ns_cmd_vector_write(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
void *buffer,
uint64_t *lba_list, uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags)
{
return _nvme_ocssd_ns_cmd_vector_rw_with_md(ns, qpair, buffer, NULL, lba_list,
num_lbas, cb_fn, cb_arg, SPDK_OCSSD_OPC_VECTOR_WRITE, io_flags);
}
int
spdk_nvme_ocssd_ns_cmd_vector_read_with_md(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
void *buffer, void *metadata,
uint64_t *lba_list, uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags)
{
return _nvme_ocssd_ns_cmd_vector_rw_with_md(ns, qpair, buffer, metadata, lba_list,
num_lbas, cb_fn, cb_arg, SPDK_OCSSD_OPC_VECTOR_READ, io_flags);
}
int
spdk_nvme_ocssd_ns_cmd_vector_read(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
void *buffer,
uint64_t *lba_list, uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags)
{
return _nvme_ocssd_ns_cmd_vector_rw_with_md(ns, qpair, buffer, NULL, lba_list,
num_lbas, cb_fn, cb_arg, SPDK_OCSSD_OPC_VECTOR_READ, io_flags);
}
int
spdk_nvme_ocssd_ns_cmd_vector_copy(struct spdk_nvme_ns *ns,
struct spdk_nvme_qpair *qpair,
uint64_t *dst_lba_list,
uint64_t *src_lba_list,
uint32_t num_lbas,
spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
uint32_t valid_flags = SPDK_OCSSD_IO_FLAGS_LIMITED_RETRY;
if (io_flags & ~valid_flags) {
return -EINVAL;
}
if (!dst_lba_list || !src_lba_list || (num_lbas == 0) ||
(num_lbas > SPDK_NVME_OCSSD_MAX_LBAL_ENTRIES)) {
return -EINVAL;
}
req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
if (req == NULL) {
return -ENOMEM;
}
cmd = &req->cmd;
cmd->opc = SPDK_OCSSD_OPC_VECTOR_COPY;
cmd->nsid = ns->id;
/*
* Dword 10 and 11 store a pointer to the list of source logical
* block addresses.
* Dword 14 and 15 store a pointer to the list of destination logical
* block addresses.
* If there is a single entry in the LBA list, the logical block
* address should be stored instead.
*/
if (num_lbas == 1) {
*(uint64_t *)&cmd->cdw10 = *src_lba_list;
*(uint64_t *)&cmd->cdw14 = *dst_lba_list;
} else {
*(uint64_t *)&cmd->cdw10 = spdk_vtophys(src_lba_list);
*(uint64_t *)&cmd->cdw14 = spdk_vtophys(dst_lba_list);
}
cmd->cdw12 = num_lbas - 1;
cmd->cdw12 |= io_flags;
return nvme_qpair_submit_request(qpair, req);
}

View File

@ -34,7 +34,8 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y = nvme.c nvme_ctrlr.c nvme_ctrlr_cmd.c nvme_ns.c nvme_ns_cmd.c nvme_pcie.c nvme_qpair.c nvme_quirks.c nvme_ctrlr_ocssd_cmd.c
DIRS-y = nvme.c nvme_ctrlr.c nvme_ctrlr_cmd.c nvme_ns.c nvme_ns_cmd.c nvme_pcie.c nvme_qpair.c nvme_quirks.c nvme_ctrlr_ocssd_cmd.c \
nvme_ns_ocssd_cmd.c
.PHONY: all clean $(DIRS-y)

View File

@ -0,0 +1 @@
nvme_ns_ocssd_cmd_ut

View File

@ -0,0 +1,38 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
TEST_FILE = nvme_ns_ocssd_cmd_ut.c
include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk

View File

@ -0,0 +1,673 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "spdk_cunit.h"
#include "nvme/nvme_ns_ocssd_cmd.c"
#include "nvme/nvme_ns_cmd.c"
#include "nvme/nvme.c"
#include "common/lib/test_env.c"
DEFINE_STUB(spdk_nvme_qpair_process_completions, int32_t,
(struct spdk_nvme_qpair *qpair,
uint32_t max_completions), 0);
static struct nvme_driver _g_nvme_driver = {
.lock = PTHREAD_MUTEX_INITIALIZER,
};
static struct nvme_request *g_request = NULL;
int
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
g_request = req;
return 0;
}
void
nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{
}
void
nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
{
return;
}
int
nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
{
return 0;
}
void
nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
{
return;
}
void
spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
{
memset(opts, 0, sizeof(*opts));
}
bool
spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
{
return true;
}
struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
const struct spdk_nvme_ctrlr_opts *opts,
void *devhandle)
{
return NULL;
}
int
nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
{
return 0;
}
int
nvme_transport_ctrlr_scan(const struct spdk_nvme_transport_id *trid,
void *cb_ctx,
spdk_nvme_probe_cb probe_cb,
spdk_nvme_remove_cb remove_cb,
bool direct_connect)
{
return 0;
}
uint32_t
spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
{
return ns->ctrlr->max_xfer_size;
}
static void
prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_qpair *qpair,
uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
uint32_t stripe_size, bool extended_lba)
{
uint32_t num_requests = 32;
uint32_t i;
ctrlr->max_xfer_size = max_xfer_size;
/*
* Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
* so that we test the SGL splitting path.
*/
ctrlr->flags = 0;
ctrlr->min_page_size = 4096;
ctrlr->page_size = 4096;
memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
memset(ns, 0, sizeof(*ns));
ns->ctrlr = ctrlr;
ns->sector_size = sector_size;
ns->extended_lba_size = sector_size;
if (extended_lba) {
ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
ns->extended_lba_size += md_size;
}
ns->md_size = md_size;
ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
memset(qpair, 0, sizeof(*qpair));
qpair->ctrlr = ctrlr;
qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
for (i = 0; i < num_requests; i++) {
struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
}
g_request = NULL;
}
static void
cleanup_after_test(struct spdk_nvme_qpair *qpair)
{
free(qpair->req_buf);
}
static void
test_nvme_ocssd_ns_cmd_vector_reset_single_entry(void)
{
const uint32_t max_xfer_size = 0x10000;
const uint32_t sector_size = 0x1000;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
uint64_t lba_list = 0x12345678;
spdk_nvme_ocssd_ns_cmd_vector_reset(&ns, &qpair, &lba_list, 1,
NULL, NULL);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_RESET);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw10 == lba_list);
CU_ASSERT(g_request->cmd.cdw12 == 0);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
}
static void
test_nvme_ocssd_ns_cmd_vector_reset(void)
{
const uint32_t max_xfer_size = 0x10000;
const uint32_t sector_size = 0x1000;
const uint32_t vector_size = 0x10;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
uint64_t lba_list[vector_size];
spdk_nvme_ocssd_ns_cmd_vector_reset(&ns, &qpair, lba_list, vector_size,
NULL, NULL);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_RESET);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
}
static void
test_nvme_ocssd_ns_cmd_vector_read_with_md_single_entry(void)
{
const uint32_t max_xfer_size = 0x10000;
const uint32_t sector_size = 0x1000;
const uint32_t md_size = 0x80;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
char *buffer = malloc(sector_size);
char *metadata = malloc(md_size);
uint64_t lba_list = 0x12345678;
SPDK_CU_ASSERT_FATAL(buffer != NULL);
SPDK_CU_ASSERT_FATAL(metadata != NULL);
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
rc = spdk_nvme_ocssd_ns_cmd_vector_read_with_md(&ns, &qpair, buffer, metadata,
&lba_list, 1, NULL, NULL, 0);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
CU_ASSERT(g_request->payload.md == metadata);
CU_ASSERT(g_request->payload_size == PAGE_SIZE);
CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw10 == lba_list);
CU_ASSERT(g_request->cmd.cdw12 == 0);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
free(buffer);
free(metadata);
}
static void
test_nvme_ocssd_ns_cmd_vector_read_with_md(void)
{
const uint32_t max_xfer_size = 0x10000;
const uint32_t sector_size = 0x1000;
const uint32_t md_size = 0x80;
const uint32_t vector_size = 0x10;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
char *buffer = malloc(sector_size * vector_size);
char *metadata = malloc(md_size * vector_size);
uint64_t lba_list[vector_size];
SPDK_CU_ASSERT_FATAL(buffer != NULL);
SPDK_CU_ASSERT_FATAL(metadata != NULL);
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
rc = spdk_nvme_ocssd_ns_cmd_vector_read_with_md(&ns, &qpair, buffer, metadata,
lba_list, vector_size,
NULL, NULL, 0);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
CU_ASSERT(g_request->payload.md == metadata);
CU_ASSERT(g_request->payload_size == max_xfer_size);
CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
free(buffer);
free(metadata);
}
static void
test_nvme_ocssd_ns_cmd_vector_read_single_entry(void)
{
const uint32_t max_xfer_size = 0x10000;
const uint32_t sector_size = 0x1000;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
char *buffer = malloc(sector_size);
uint64_t lba_list = 0x12345678;
SPDK_CU_ASSERT_FATAL(buffer != NULL);
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
rc = spdk_nvme_ocssd_ns_cmd_vector_read(&ns, &qpair, buffer, &lba_list, 1,
NULL, NULL, 0);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
CU_ASSERT(g_request->payload_size == PAGE_SIZE);
CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw10 == lba_list);
CU_ASSERT(g_request->cmd.cdw12 == 0);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
free(buffer);
}
static void
test_nvme_ocssd_ns_cmd_vector_read(void)
{
const uint32_t max_xfer_size = 0x10000;
const uint32_t sector_size = 0x1000;
const uint32_t vector_size = 0x10;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
char *buffer = malloc(sector_size * vector_size);
uint64_t lba_list[vector_size];
SPDK_CU_ASSERT_FATAL(buffer != NULL);
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
rc = spdk_nvme_ocssd_ns_cmd_vector_read(&ns, &qpair, buffer, lba_list, vector_size,
NULL, NULL, 0);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
CU_ASSERT(g_request->payload_size == max_xfer_size);
CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
free(buffer);
}
static void
test_nvme_ocssd_ns_cmd_vector_write_with_md_single_entry(void)
{
const uint32_t max_xfer_size = 0x10000;
const uint32_t sector_size = 0x1000;
const uint32_t md_size = 0x80;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
char *buffer = malloc(sector_size);
char *metadata = malloc(md_size);
uint64_t lba_list = 0x12345678;
SPDK_CU_ASSERT_FATAL(buffer != NULL);
SPDK_CU_ASSERT_FATAL(metadata != NULL);
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
spdk_nvme_ocssd_ns_cmd_vector_write_with_md(&ns, &qpair, buffer, metadata,
&lba_list, 1, NULL, NULL, 0);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
CU_ASSERT(g_request->payload.md == metadata);
CU_ASSERT(g_request->payload_size == PAGE_SIZE);
CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw10 == lba_list);
CU_ASSERT(g_request->cmd.cdw12 == 0);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
free(buffer);
free(metadata);
}
static void
test_nvme_ocssd_ns_cmd_vector_write_with_md(void)
{
const uint32_t max_xfer_size = 0x10000;
const uint32_t sector_size = 0x1000;
const uint32_t md_size = 0x80;
const uint32_t vector_size = 0x10;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
char *buffer = malloc(sector_size * vector_size);
char *metadata = malloc(md_size * vector_size);
uint64_t lba_list[vector_size];
SPDK_CU_ASSERT_FATAL(buffer != NULL);
SPDK_CU_ASSERT_FATAL(metadata != NULL);
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
spdk_nvme_ocssd_ns_cmd_vector_write_with_md(&ns, &qpair, buffer, metadata,
lba_list, vector_size,
NULL, NULL, 0);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
CU_ASSERT(g_request->payload.md == metadata);
CU_ASSERT(g_request->payload_size == max_xfer_size);
CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
free(buffer);
free(metadata);
}
static void
test_nvme_ocssd_ns_cmd_vector_write_single_entry(void)
{
const uint32_t max_xfer_size = 0x10000;
const uint32_t sector_size = 0x1000;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
char *buffer = malloc(sector_size);
uint64_t lba_list = 0x12345678;
SPDK_CU_ASSERT_FATAL(buffer != NULL);
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
spdk_nvme_ocssd_ns_cmd_vector_write(&ns, &qpair, buffer,
&lba_list, 1, NULL, NULL, 0);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
CU_ASSERT(g_request->payload_size == PAGE_SIZE);
CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw10 == lba_list);
CU_ASSERT(g_request->cmd.cdw12 == 0);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
free(buffer);
}
static void
test_nvme_ocssd_ns_cmd_vector_write(void)
{
const uint32_t max_xfer_size = 0x10000;
const uint32_t sector_size = 0x1000;
const uint32_t vector_size = 0x10;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
char *buffer = malloc(sector_size * vector_size);
uint64_t lba_list[vector_size];
SPDK_CU_ASSERT_FATAL(buffer != NULL);
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
spdk_nvme_ocssd_ns_cmd_vector_write(&ns, &qpair, buffer,
lba_list, vector_size,
NULL, NULL, 0);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
CU_ASSERT(g_request->payload_size == max_xfer_size);
CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
free(buffer);
}
static void
test_nvme_ocssd_ns_cmd_vector_copy_single_entry(void)
{
const uint32_t max_xfer_size = 0x10000;
const uint32_t sector_size = 0x1000;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
uint64_t src_lba_list = 0x12345678;
uint64_t dst_lba_list = 0x87654321;
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
spdk_nvme_ocssd_ns_cmd_vector_copy(&ns, &qpair, &dst_lba_list, &src_lba_list, 1,
NULL, NULL, 0);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_COPY);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw10 == src_lba_list);
CU_ASSERT(g_request->cmd.cdw12 == 0);
CU_ASSERT(g_request->cmd.cdw14 == dst_lba_list);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
}
static void
test_nvme_ocssd_ns_cmd_vector_copy(void)
{
const uint32_t max_xfer_size = 0x10000;
const uint32_t sector_size = 0x1000;
const uint32_t vector_size = 0x10;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_qpair qpair;
int rc = 0;
uint64_t src_lba_list[vector_size];
uint64_t dst_lba_list[vector_size];
prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
spdk_nvme_ocssd_ns_cmd_vector_copy(&ns, &qpair,
dst_lba_list, src_lba_list, vector_size,
NULL, NULL, 0);
SPDK_CU_ASSERT_FATAL(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_COPY);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
nvme_free_request(g_request);
cleanup_after_test(&qpair);
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
unsigned int num_failures;
if (CU_initialize_registry() != CUE_SUCCESS) {
return CU_get_error();
}
suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
if (suite == NULL) {
CU_cleanup_registry();
return CU_get_error();
}
if (
CU_add_test(suite, "nvme_ns_ocssd_cmd_vector_reset", test_nvme_ocssd_ns_cmd_vector_reset) == NULL
|| CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_reset_single_entry",
test_nvme_ocssd_ns_cmd_vector_reset_single_entry) == NULL
|| CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_read_with_md",
test_nvme_ocssd_ns_cmd_vector_read_with_md) == NULL
|| CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_read_with_md_single_entry",
test_nvme_ocssd_ns_cmd_vector_read_with_md_single_entry) == NULL
|| CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_read", test_nvme_ocssd_ns_cmd_vector_read) == NULL
|| CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_read_single_entry",
test_nvme_ocssd_ns_cmd_vector_read_single_entry) == NULL
|| CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_write_with_md",
test_nvme_ocssd_ns_cmd_vector_write_with_md) == NULL
|| CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_write_with_md_single_entry",
test_nvme_ocssd_ns_cmd_vector_write_with_md_single_entry) == NULL
|| CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_write", test_nvme_ocssd_ns_cmd_vector_write) == NULL
|| CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_write_single_entry",
test_nvme_ocssd_ns_cmd_vector_write_single_entry) == NULL
|| CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_copy", test_nvme_ocssd_ns_cmd_vector_copy) == NULL
|| CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_copy_single_entry",
test_nvme_ocssd_ns_cmd_vector_copy_single_entry) == NULL
) {
CU_cleanup_registry();
return CU_get_error();
}
g_spdk_nvme_driver = &_g_nvme_driver;
CU_basic_set_mode(CU_BRM_VERBOSE);
CU_basic_run_tests();
num_failures = CU_get_number_of_failures();
CU_cleanup_registry();
return num_failures;
}

View File

@ -74,6 +74,7 @@ $valgrind $testdir/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut
$valgrind $testdir/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut
$valgrind $testdir/lib/nvme/nvme_ns.c/nvme_ns_ut
$valgrind $testdir/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut
$valgrind $testdir/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut
$valgrind $testdir/lib/nvme/nvme_qpair.c/nvme_qpair_ut
$valgrind $testdir/lib/nvme/nvme_pcie.c/nvme_pcie_ut
$valgrind $testdir/lib/nvme/nvme_quirks.c/nvme_quirks_ut