nvme: allow setting of completion queue CDW0

This change allows setting of the NVMe completion queue
CDW0 in spdk_bdev_io_complete_nvme_status.

Before that change, handling of vendor specific NVMe IO
commands was limited since there wasn't a way to return
command specific info back to the initiator.

Change-Id: I250d5df3bd1e62ddb89a011503d42bd4c8390f9b
Signed-off-by: Michael Haeuptle <michael.haeuptle@hpe.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/470678
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Michael Haeuptle 2019-10-07 16:08:55 +00:00 committed by Jim Harris
parent 2ec99adad9
commit 53b92a6c18
9 changed files with 49 additions and 20 deletions

View File

@ -2,6 +2,13 @@
## v19.10: (Upcoming Release)
### bdev
Added new parameter `cdw0` to `spdk_bdev_io_complete_nvme_status()` and
`spdk_bdev_io_get_nvme_status()` that allows setting/getting
the NVMe completion queue DW0 entry. This allows vendor specific IO commands
to return commmand specific completion info back to the initiator.
### bdev zone
Added new public header for zoned bdev. Zoned bdev is an extension

View File

@ -1359,13 +1359,16 @@ void spdk_bdev_get_device_stat(struct spdk_bdev *bdev, struct spdk_bdev_io_stat
spdk_bdev_get_device_stat_cb cb, void *cb_arg);
/**
* Get the status of bdev_io as an NVMe status code.
* Get the status of bdev_io as an NVMe status code and command specific
* completion queue value.
*
* \param bdev_io I/O to get the status from.
* \param cdw0 Command specific completion queue value
* \param sct Status Code Type return value, as defined by the NVMe specification.
* \param sc Status Code return value, as defined by the NVMe specification.
*/
void spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, int *sct, int *sc);
void spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct,
int *sc);
/**
* Get the status of bdev_io as a SCSI status code.

View File

@ -556,8 +556,9 @@ struct spdk_bdev_io {
/** Error information from a device */
union {
/** Only valid when status is SPDK_BDEV_IO_STATUS_NVME_ERROR */
struct {
/** NVMe completion queue entry DW0 */
uint32_t cdw0;
/** NVMe status code type */
uint8_t sct;
/** NVMe status code */
@ -817,13 +818,15 @@ void spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io,
enum spdk_bdev_io_status status);
/**
* Complete a bdev_io with an NVMe status code.
* Complete a bdev_io with an NVMe status code and DW0 completion queue entry
*
* \param bdev_io I/O to complete.
* \param cdw0 NVMe Completion Queue DW0 value (set to 0 if not applicable)
* \param sct NVMe Status Code Type.
* \param sc NVMe Status Code.
*/
void spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, int sct, int sc);
void spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct,
int sc);
/**
* Complete a bdev_io with a SCSI status code.

View File

@ -1917,6 +1917,7 @@ spdk_bdev_io_init(struct spdk_bdev_io *bdev_io,
bdev_io->internal.orig_iovs = NULL;
bdev_io->internal.orig_iovcnt = 0;
bdev_io->internal.orig_md_buf = NULL;
bdev_io->internal.error.nvme.cdw0 = 0;
}
static bool
@ -3710,6 +3711,7 @@ _spdk_bdev_ch_retry_io(struct spdk_bdev_channel *bdev_ch)
bdev_io->internal.ch->io_outstanding++;
shared_resource->io_outstanding++;
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
bdev_io->internal.error.nvme.cdw0 = 0;
bdev->fn_table->submit_request(spdk_bdev_io_get_io_channel(bdev_io), bdev_io);
if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NOMEM) {
break;
@ -3933,24 +3935,27 @@ spdk_bdev_io_get_scsi_status(const struct spdk_bdev_io *bdev_io,
}
void
spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, int sct, int sc)
spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc)
{
if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) {
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
} else {
bdev_io->internal.error.nvme.sct = sct;
bdev_io->internal.error.nvme.sc = sc;
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR;
}
bdev_io->internal.error.nvme.cdw0 = cdw0;
bdev_io->internal.error.nvme.sct = sct;
bdev_io->internal.error.nvme.sc = sc;
spdk_bdev_io_complete(bdev_io, bdev_io->internal.status);
}
void
spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, int *sct, int *sc)
spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc)
{
assert(sct != NULL);
assert(sc != NULL);
assert(cdw0 != NULL);
if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR) {
*sct = bdev_io->internal.error.nvme.sct;
@ -3962,6 +3967,8 @@ spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, int *sct, int *
*sct = SPDK_NVME_SCT_GENERIC;
*sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
}
*cdw0 = bdev_io->internal.error.nvme.cdw0;
}
struct spdk_thread *

View File

@ -93,8 +93,10 @@ nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success,
struct spdk_nvmf_request *req = cb_arg;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
int sc, sct;
uint32_t cdw0;
spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc);
spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
response->cdw0 = cdw0;
response->status.sc = sc;
response->status.sct = sct;
@ -386,12 +388,14 @@ nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success,
struct spdk_nvmf_request *req = unmap_ctx->req;
struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
int sc, sct;
uint32_t cdw0;
unmap_ctx->count--;
if (response->status.sct == SPDK_NVME_SCT_GENERIC &&
response->status.sc == SPDK_NVME_SC_SUCCESS) {
spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc);
spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
response->cdw0 = cdw0;
response->status.sc = sc;
response->status.sct = sct;
}

View File

@ -392,10 +392,11 @@ blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg
struct spdk_vhost_nvme_task *task = cb_arg;
struct spdk_nvme_cmd *cmd = &task->cmd;
int sc, sct;
uint32_t cdw0;
assert(bdev_io != NULL);
spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc);
spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
spdk_bdev_free_io(bdev_io);
task->dnr = !success;
@ -416,13 +417,14 @@ blk_unmap_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
struct spdk_vhost_nvme_task *task = child->parent;
struct spdk_vhost_nvme_dev *nvme = task->nvme;
int sct, sc;
uint32_t cdw0;
assert(bdev_io != NULL);
task->num_children--;
if (!success) {
task->dnr = 1;
spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc);
spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
task->sct = sct;
task->sc = sc;
}

View File

@ -1697,7 +1697,7 @@ bdev_nvme_no_pi_readv_done(void *ref, const struct spdk_nvme_cpl *cpl)
}
/* Return original completion status */
spdk_bdev_io_complete_nvme_status(bdev_io, bio->cpl.status.sct,
spdk_bdev_io_complete_nvme_status(bdev_io, bio->cpl.cdw0, bio->cpl.status.sct,
bio->cpl.status.sc);
}
@ -1729,7 +1729,7 @@ bdev_nvme_readv_done(void *ref, const struct spdk_nvme_cpl *cpl)
}
}
spdk_bdev_io_complete_nvme_status(bdev_io, cpl->status.sct, cpl->status.sc);
spdk_bdev_io_complete_nvme_status(bdev_io, cpl->cdw0, cpl->status.sct, cpl->status.sc);
}
static void
@ -1744,7 +1744,7 @@ bdev_nvme_writev_done(void *ref, const struct spdk_nvme_cpl *cpl)
bdev_nvme_verify_pi_error(bdev_io);
}
spdk_bdev_io_complete_nvme_status(bdev_io, cpl->status.sct, cpl->status.sc);
spdk_bdev_io_complete_nvme_status(bdev_io, cpl->cdw0, cpl->status.sct, cpl->status.sc);
}
static void
@ -1752,7 +1752,7 @@ bdev_nvme_queued_done(void *ref, const struct spdk_nvme_cpl *cpl)
{
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx((struct nvme_bdev_io *)ref);
spdk_bdev_io_complete_nvme_status(bdev_io, cpl->status.sct, cpl->status.sc);
spdk_bdev_io_complete_nvme_status(bdev_io, cpl->cdw0, cpl->status.sct, cpl->status.sc);
}
static void
@ -1762,7 +1762,7 @@ bdev_nvme_admin_passthru_completion(void *ctx)
struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
spdk_bdev_io_complete_nvme_status(bdev_io,
bio->cpl.status.sct, bio->cpl.status.sc);
bio->cpl.cdw0, bio->cpl.status.sct, bio->cpl.status.sc);
}
static void

View File

@ -840,6 +840,7 @@ struct bdevio_passthrough_request {
struct io_target *target;
int sct;
int sc;
uint32_t cdw0;
};
static void
@ -847,7 +848,7 @@ nvme_pt_test_complete(struct spdk_bdev_io *bdev_io, bool success, void *arg)
{
struct bdevio_passthrough_request *pt_req = arg;
spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->sct, &pt_req->sc);
spdk_bdev_io_get_nvme_status(bdev_io, &pt_req->cdw0, &pt_req->sct, &pt_req->sc);
spdk_bdev_free_io(bdev_io);
wake_ut_thread();
}
@ -932,9 +933,11 @@ blockdev_test_nvme_passthru_vendor_specific(void)
pt_req.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
pt_req.sc = SPDK_NVME_SC_SUCCESS;
pt_req.cdw0 = 0xbeef;
execute_spdk_function(__blockdev_nvme_passthru, &pt_req);
CU_ASSERT(pt_req.sct == SPDK_NVME_SCT_GENERIC);
CU_ASSERT(pt_req.sc == SPDK_NVME_SC_INVALID_OPCODE);
CU_ASSERT(pt_req.cdw0 == 0x0);
}
static void

View File

@ -173,7 +173,7 @@ spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct sp
}
DEFINE_STUB_V(spdk_bdev_io_get_nvme_status,
(const struct spdk_bdev_io *bdev_io, int *sct, int *sc));
(const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc));
int
spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,