nvme: add FUA and LR support

Support for the Force Unit Access and Limited Retry
bits on reads and writes.

Change-Id: I9860848358377d63a967a4ba6ee9c061faf284d4
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Ben Walker 2015-12-30 15:52:17 -07:00 committed by Gerrit Code Review
parent 9195cfb207
commit 81f4046402
7 changed files with 98 additions and 32 deletions

View File

@ -347,7 +347,7 @@ submit_single_io(struct ns_worker_ctx *ns_ctx)
#endif #endif
{ {
rc = nvme_ns_cmd_read(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks, rc = nvme_ns_cmd_read(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task); entry->io_size_blocks, io_complete, task, 0);
} }
} else { } else {
#if HAVE_LIBAIO #if HAVE_LIBAIO
@ -358,7 +358,7 @@ submit_single_io(struct ns_worker_ctx *ns_ctx)
#endif #endif
{ {
rc = nvme_ns_cmd_write(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks, rc = nvme_ns_cmd_write(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task); entry->io_size_blocks, io_complete, task, 0);
} }
} }

View File

@ -349,9 +349,12 @@ uint64_t nvme_ns_get_num_sectors(struct nvme_namespace *ns);
*/ */
uint64_t nvme_ns_get_size(struct nvme_namespace *ns); uint64_t nvme_ns_get_size(struct nvme_namespace *ns);
/**
* \brief Namespace command support flags.
*/
enum nvme_namespace_flags { enum nvme_namespace_flags {
NVME_NS_DEALLOCATE_SUPPORTED = 0x1, NVME_NS_DEALLOCATE_SUPPORTED = 0x1, /**< The deallocate command is supported */
NVME_NS_FLUSH_SUPPORTED = 0x2, NVME_NS_FLUSH_SUPPORTED = 0x2, /**< The flush command is supported */
}; };
/** /**
@ -390,6 +393,8 @@ typedef int (*nvme_req_next_sge_fn_t)(void *cb_arg, uint64_t *address, uint32_t
* \param lba_count length (in sectors) for the write operation * \param lba_count length (in sectors) for the write operation
* \param cb_fn callback function to invoke when the I/O is completed * \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function * \param cb_arg argument to pass to the callback function
* \param io_flags set flags, defined by the NVME_IO_FLAGS_* entries
* in spdk/nvme_spec.h, for this I/O.
* *
* \return 0 if successfully submitted, ENOMEM if an nvme_request * \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request * structure cannot be allocated for the I/O request
@ -399,7 +404,7 @@ typedef int (*nvme_req_next_sge_fn_t)(void *cb_arg, uint64_t *address, uint32_t
*/ */
int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn, uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
void *cb_arg); void *cb_arg, uint32_t io_flags);
/** /**
* \brief Submits a write I/O to the specified NVMe namespace. * \brief Submits a write I/O to the specified NVMe namespace.
@ -409,6 +414,7 @@ int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
* \param lba_count length (in sectors) for the write operation * \param lba_count length (in sectors) for the write operation
* \param cb_fn callback function to invoke when the I/O is completed * \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function * \param cb_arg argument to pass to the callback function
* \param io_flags set flags, defined in nvme_spec.h, for this I/O
* \param reset_sgl_fn callback function to reset scattered payload * \param reset_sgl_fn callback function to reset scattered payload
* \param next_sge_fn callback function to iterate each scattered * \param next_sge_fn callback function to iterate each scattered
* payload memory segment * payload memory segment
@ -420,7 +426,7 @@ int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
* nvme_register_io_thread(). * nvme_register_io_thread().
*/ */
int nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count, int nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
nvme_cb_fn_t cb_fn, void *cb_arg, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t io_flags,
nvme_req_reset_sgl_fn_t reset_sgl_fn, nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn); nvme_req_next_sge_fn_t next_sge_fn);
@ -433,6 +439,7 @@ int nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_cou
* \param lba_count length (in sectors) for the read operation * \param lba_count length (in sectors) for the read operation
* \param cb_fn callback function to invoke when the I/O is completed * \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function * \param cb_arg argument to pass to the callback function
* \param io_flags set flags, defined in nvme_spec.h, for this I/O
* *
* \return 0 if successfully submitted, ENOMEM if an nvme_request * \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request * structure cannot be allocated for the I/O request
@ -442,7 +449,7 @@ int nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_cou
*/ */
int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn, uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
void *cb_arg); void *cb_arg, uint32_t io_flags);
/** /**
* \brief Submits a read I/O to the specified NVMe namespace. * \brief Submits a read I/O to the specified NVMe namespace.
@ -452,6 +459,7 @@ int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
* \param lba_count length (in sectors) for the read operation * \param lba_count length (in sectors) for the read operation
* \param cb_fn callback function to invoke when the I/O is completed * \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function * \param cb_arg argument to pass to the callback function
* \param io_flags set flags, defined in nvme_spec.h, for this I/O
* \param reset_sgl_fn callback function to reset scattered payload * \param reset_sgl_fn callback function to reset scattered payload
* \param next_sge_fn callback function to iterate each scattered * \param next_sge_fn callback function to iterate each scattered
* payload memory segment * payload memory segment
@ -463,7 +471,7 @@ int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
* nvme_register_io_thread(). * nvme_register_io_thread().
*/ */
int nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count, int nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
nvme_cb_fn_t cb_fn, void *cb_arg, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t io_flags,
nvme_req_reset_sgl_fn_t reset_sgl_fn, nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn); nvme_req_next_sge_fn_t next_sge_fn);

View File

@ -1104,4 +1104,7 @@ SPDK_STATIC_ASSERT(sizeof(struct nvme_firmware_page) == 512, "Incorrect size");
#define nvme_completion_is_error(cpl) \ #define nvme_completion_is_error(cpl) \
((cpl)->status.sc != 0 || (cpl)->status.sct != 0) ((cpl)->status.sc != 0 || (cpl)->status.sct != 0)
#define NVME_IO_FLAGS_FORCE_UNIT_ACCESS (1U << 30)
#define NVME_IO_FLAGS_LIMITED_RETRY (1U << 31)
#endif #endif

View File

@ -41,7 +41,8 @@
static struct nvme_request * static struct nvme_request *
_nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba, _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
uint32_t opc, nvme_req_reset_sgl_fn_t reset_sgl_fn, uint32_t opc, uint32_t io_flags,
nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn); nvme_req_next_sge_fn_t next_sge_fn);
static void static void
@ -91,7 +92,7 @@ static struct nvme_request *
_nvme_ns_cmd_split_request(struct nvme_namespace *ns, void *payload, _nvme_ns_cmd_split_request(struct nvme_namespace *ns, void *payload,
uint64_t lba, uint32_t lba_count, uint64_t lba, uint32_t lba_count,
nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t opc, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t opc,
struct nvme_request *req, uint32_t io_flags, struct nvme_request *req,
uint32_t sectors_per_max_io, uint32_t sector_mask, uint32_t sectors_per_max_io, uint32_t sector_mask,
nvme_req_reset_sgl_fn_t reset_sgl_fn, nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn) nvme_req_next_sge_fn_t next_sge_fn)
@ -106,7 +107,7 @@ _nvme_ns_cmd_split_request(struct nvme_namespace *ns, void *payload,
lba_count = nvme_min(remaining_lba_count, lba_count); lba_count = nvme_min(remaining_lba_count, lba_count);
child = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn, child = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn,
cb_arg, opc, reset_sgl_fn, next_sge_fn); cb_arg, opc, io_flags, reset_sgl_fn, next_sge_fn);
if (child == NULL) { if (child == NULL) {
nvme_free_request(req); nvme_free_request(req);
return NULL; return NULL;
@ -127,7 +128,8 @@ _nvme_ns_cmd_split_request(struct nvme_namespace *ns, void *payload,
static struct nvme_request * static struct nvme_request *
_nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba, _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
uint32_t opc, nvme_req_reset_sgl_fn_t reset_sgl_fn, uint32_t opc, uint32_t io_flags,
nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn) nvme_req_next_sge_fn_t next_sge_fn)
{ {
struct nvme_request *req; struct nvme_request *req;
@ -137,6 +139,11 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
uint32_t sectors_per_max_io; uint32_t sectors_per_max_io;
uint32_t sectors_per_stripe; uint32_t sectors_per_stripe;
if (io_flags & 0xFFFF) {
/* The bottom 16 bits must be empty */
return NULL;
}
sector_size = ns->sector_size; sector_size = ns->sector_size;
sectors_per_max_io = ns->sectors_per_max_io; sectors_per_max_io = ns->sectors_per_max_io;
sectors_per_stripe = ns->sectors_per_stripe; sectors_per_stripe = ns->sectors_per_stripe;
@ -159,11 +166,11 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
(((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) { (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) {
return _nvme_ns_cmd_split_request(ns, payload, lba, lba_count, cb_fn, cb_arg, opc, return _nvme_ns_cmd_split_request(ns, payload, lba, lba_count, cb_fn, cb_arg, opc,
req, sectors_per_stripe, sectors_per_stripe - 1, io_flags, req, sectors_per_stripe, sectors_per_stripe - 1,
reset_sgl_fn, next_sge_fn); reset_sgl_fn, next_sge_fn);
} else if (lba_count > sectors_per_max_io) { } else if (lba_count > sectors_per_max_io) {
return _nvme_ns_cmd_split_request(ns, payload, lba, lba_count, cb_fn, cb_arg, opc, return _nvme_ns_cmd_split_request(ns, payload, lba, lba_count, cb_fn, cb_arg, opc,
req, sectors_per_max_io, 0, io_flags, req, sectors_per_max_io, 0,
reset_sgl_fn, next_sge_fn); reset_sgl_fn, next_sge_fn);
} else { } else {
cmd = &req->cmd; cmd = &req->cmd;
@ -172,7 +179,9 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
tmp_lba = (uint64_t *)&cmd->cdw10; tmp_lba = (uint64_t *)&cmd->cdw10;
*tmp_lba = lba; *tmp_lba = lba;
cmd->cdw12 = lba_count - 1; cmd->cdw12 = lba_count - 1;
cmd->cdw12 |= io_flags;
} }
return req; return req;
@ -180,11 +189,13 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, void *payload, uint64_t lba,
int int
nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba, nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg) uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
uint32_t io_flags)
{ {
struct nvme_request *req; struct nvme_request *req;
req = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, NULL, NULL); req = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, io_flags,
NULL, NULL);
if (req != NULL) { if (req != NULL) {
nvme_ctrlr_submit_io_request(ns->ctrlr, req); nvme_ctrlr_submit_io_request(ns->ctrlr, req);
return 0; return 0;
@ -195,14 +206,14 @@ nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
int int
nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count, nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
nvme_cb_fn_t cb_fn, void *cb_arg, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t io_flags,
nvme_req_reset_sgl_fn_t reset_sgl_fn, nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn) nvme_req_next_sge_fn_t next_sge_fn)
{ {
struct nvme_request *req; struct nvme_request *req;
req = _nvme_ns_cmd_rw(ns, NULL, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, reset_sgl_fn, req = _nvme_ns_cmd_rw(ns, NULL, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, io_flags,
next_sge_fn); reset_sgl_fn, next_sge_fn);
if (req != NULL) { if (req != NULL) {
nvme_ctrlr_submit_io_request(ns->ctrlr, req); nvme_ctrlr_submit_io_request(ns->ctrlr, req);
return 0; return 0;
@ -213,11 +224,13 @@ nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
int int
nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba, nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg) uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
uint32_t io_flags)
{ {
struct nvme_request *req; struct nvme_request *req;
req = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, NULL, NULL); req = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, io_flags,
NULL, NULL);
if (req != NULL) { if (req != NULL) {
nvme_ctrlr_submit_io_request(ns->ctrlr, req); nvme_ctrlr_submit_io_request(ns->ctrlr, req);
return 0; return 0;
@ -228,14 +241,14 @@ nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
int int
nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count, nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
nvme_cb_fn_t cb_fn, void *cb_arg, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t io_flags,
nvme_req_reset_sgl_fn_t reset_sgl_fn, nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn) nvme_req_next_sge_fn_t next_sge_fn)
{ {
struct nvme_request *req; struct nvme_request *req;
req = _nvme_ns_cmd_rw(ns, NULL, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, reset_sgl_fn, req = _nvme_ns_cmd_rw(ns, NULL, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, io_flags,
next_sge_fn); reset_sgl_fn, next_sge_fn);
if (req != NULL) { if (req != NULL) {
nvme_ctrlr_submit_io_request(ns->ctrlr, req); nvme_ctrlr_submit_io_request(ns->ctrlr, req);
return 0; return 0;

View File

@ -192,10 +192,10 @@ submit_single_io(struct ns_worker_ctx *ns_ctx)
if ((g_rw_percentage == 100) || if ((g_rw_percentage == 100) ||
(g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) { (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) {
rc = nvme_ns_cmd_read(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks, rc = nvme_ns_cmd_read(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task); entry->io_size_blocks, io_complete, task, 0);
} else { } else {
rc = nvme_ns_cmd_write(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks, rc = nvme_ns_cmd_write(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task); entry->io_size_blocks, io_complete, task, 0);
} }
if (rc != 0) { if (rc != 0) {

View File

@ -304,7 +304,7 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
} }
rc = nvme_ns_cmd_writev(ns, BASE_LBA_START, lba_count, rc = nvme_ns_cmd_writev(ns, BASE_LBA_START, lba_count,
io_complete, req, io_complete, req, 0,
nvme_request_reset_sgl, nvme_request_reset_sgl,
nvme_request_next_sge); nvme_request_next_sge);
@ -333,7 +333,7 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
} }
rc = nvme_ns_cmd_readv(ns, BASE_LBA_START, lba_count, rc = nvme_ns_cmd_readv(ns, BASE_LBA_START, lba_count,
io_complete, req, io_complete, req, 0,
nvme_request_reset_sgl, nvme_request_reset_sgl,
nvme_request_next_sge); nvme_request_next_sge);

View File

@ -119,7 +119,7 @@ split_test(void)
lba = 0; lba = 0;
lba_count = 1; lba_count = 1;
rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL); rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, 0);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL); SPDK_CU_ASSERT_FATAL(g_request != NULL);
@ -155,7 +155,7 @@ split_test2(void)
lba = 0; lba = 0;
lba_count = (256 * 1024) / 512; lba_count = (256 * 1024) / 512;
rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL); rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, 0);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL); SPDK_CU_ASSERT_FATAL(g_request != NULL);
@ -210,7 +210,7 @@ split_test3(void)
lba = 10; /* Start at an LBA that isn't aligned to the stripe size */ lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
lba_count = (256 * 1024) / 512; lba_count = (256 * 1024) / 512;
rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL); rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, 0);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL); SPDK_CU_ASSERT_FATAL(g_request != NULL);
@ -267,7 +267,8 @@ split_test4(void)
lba = 10; /* Start at an LBA that isn't aligned to the stripe size */ lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
lba_count = (256 * 1024) / 512; lba_count = (256 * 1024) / 512;
rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL); rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL,
NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL); SPDK_CU_ASSERT_FATAL(g_request != NULL);
@ -281,6 +282,8 @@ split_test4(void)
CU_ASSERT(child->payload_size == (256 - 10) * 512); CU_ASSERT(child->payload_size == (256 - 10) * 512);
CU_ASSERT(cmd_lba == 10); CU_ASSERT(cmd_lba == 10);
CU_ASSERT(cmd_lba_count == 256 - 10); CU_ASSERT(cmd_lba_count == 256 - 10);
CU_ASSERT((child->cmd.cdw12 & NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
CU_ASSERT((child->cmd.cdw12 & NVME_IO_FLAGS_LIMITED_RETRY) == 0);
nvme_free_request(child); nvme_free_request(child);
child = TAILQ_FIRST(&g_request->children); child = TAILQ_FIRST(&g_request->children);
@ -290,6 +293,8 @@ split_test4(void)
CU_ASSERT(child->payload_size == 128 * 1024); CU_ASSERT(child->payload_size == 128 * 1024);
CU_ASSERT(cmd_lba == 256); CU_ASSERT(cmd_lba == 256);
CU_ASSERT(cmd_lba_count == 256); CU_ASSERT(cmd_lba_count == 256);
CU_ASSERT((child->cmd.cdw12 & NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
CU_ASSERT((child->cmd.cdw12 & NVME_IO_FLAGS_LIMITED_RETRY) == 0);
nvme_free_request(child); nvme_free_request(child);
child = TAILQ_FIRST(&g_request->children); child = TAILQ_FIRST(&g_request->children);
@ -299,6 +304,8 @@ split_test4(void)
CU_ASSERT(child->payload_size == 10 * 512); CU_ASSERT(child->payload_size == 10 * 512);
CU_ASSERT(cmd_lba == 512); CU_ASSERT(cmd_lba == 512);
CU_ASSERT(cmd_lba_count == 10); CU_ASSERT(cmd_lba_count == 10);
CU_ASSERT((child->cmd.cdw12 & NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
CU_ASSERT((child->cmd.cdw12 & NVME_IO_FLAGS_LIMITED_RETRY) == 0);
nvme_free_request(child); nvme_free_request(child);
CU_ASSERT(TAILQ_EMPTY(&g_request->children)); CU_ASSERT(TAILQ_EMPTY(&g_request->children));
@ -362,6 +369,40 @@ test_nvme_ns_cmd_deallocate(void)
CU_ASSERT(rc != 0); CU_ASSERT(rc != 0);
} }
static void
test_io_flags(void)
{
struct nvme_namespace ns;
struct nvme_controller ctrlr;
void *payload;
uint64_t lba;
uint32_t lba_count;
int rc;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 128 * 1024);
payload = malloc(256 * 1024);
lba = 0;
lba_count = (4 * 1024) / 512;
rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL,
NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
CU_ASSERT(rc == 0);
CU_ASSERT_FATAL(g_request != NULL);
CU_ASSERT((g_request->cmd.cdw12 & NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
CU_ASSERT((g_request->cmd.cdw12 & NVME_IO_FLAGS_LIMITED_RETRY) == 0);
nvme_free_request(g_request);
rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL,
NVME_IO_FLAGS_LIMITED_RETRY);
CU_ASSERT(rc == 0);
CU_ASSERT_FATAL(g_request != NULL);
CU_ASSERT((g_request->cmd.cdw12 & NVME_IO_FLAGS_FORCE_UNIT_ACCESS) == 0);
CU_ASSERT((g_request->cmd.cdw12 & NVME_IO_FLAGS_LIMITED_RETRY) != 0);
nvme_free_request(g_request);
free(payload);
}
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
CU_pSuite suite = NULL; CU_pSuite suite = NULL;
@ -384,6 +425,7 @@ int main(int argc, char **argv)
|| CU_add_test(suite, "split_test4", split_test4) == NULL || CU_add_test(suite, "split_test4", split_test4) == NULL
|| CU_add_test(suite, "nvme_ns_cmd_flush testing", test_nvme_ns_cmd_flush) == NULL || CU_add_test(suite, "nvme_ns_cmd_flush testing", test_nvme_ns_cmd_flush) == NULL
|| CU_add_test(suite, "nvme_ns_cmd_deallocate testing", test_nvme_ns_cmd_deallocate) == NULL || CU_add_test(suite, "nvme_ns_cmd_deallocate testing", test_nvme_ns_cmd_deallocate) == NULL
|| CU_add_test(suite, "io_flags", test_io_flags) == NULL
) { ) {
CU_cleanup_registry(); CU_cleanup_registry();
return CU_get_error(); return CU_get_error();