nvme/fio_plugin: also set appmask/apptag when PRACT is enabled

Previously we only initialize the DIF context when PRACT is 0, because
the DIF library can only support that case, but when end-to-end data
protection feature is enabled and PRACT is set to 1, the controller
will help to check the metadata, but we still need to pass
appmask/apptak to controller.  This patch will fix this case.

Change-Id: Ia62d4f8a7adf822b75541f69ce57aeff8f9eb505
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/482047
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Changpeng Liu 2020-01-18 21:28:07 -05:00 committed by Tomasz Zawadzki
parent a715543325
commit ca073ef3ac

View File

@ -111,12 +111,12 @@ struct spdk_fio_qpair {
struct spdk_nvme_qpair *qpair;
struct spdk_nvme_ns *ns;
uint32_t io_flags;
bool do_nvme_pi;
/* True for DIF and false for DIX, and this is valid only if do_nvme_pi is true. */
bool nvme_pi_enabled;
/* True for DIF and false for DIX, and this is valid only if nvme_pi_enabled is true. */
bool extended_lba;
/* True for protection info transferred at start of metadata,
* false for protection info transferred at end of metadata, and
* this is valid only if do_nvme_pi is true.
* this is valid only if nvme_pi_enabled is true.
*/
bool md_start;
struct spdk_fio_qpair *next;
@ -214,30 +214,6 @@ get_fio_ctrlr(const struct spdk_nvme_transport_id *trid)
return NULL;
}
static bool
fio_do_nvme_pi_check(struct spdk_fio_qpair *fio_qpair)
{
struct spdk_nvme_ns *ns = NULL;
const struct spdk_nvme_ns_data *nsdata;
ns = fio_qpair->ns;
nsdata = spdk_nvme_ns_get_data(ns);
if (spdk_nvme_ns_get_pi_type(ns) ==
SPDK_NVME_FMT_NVM_PROTECTION_DISABLE) {
return false;
}
fio_qpair->md_start = nsdata->dps.md_start;
/* Controller performs PI setup and check */
if (fio_qpair->io_flags & SPDK_NVME_IO_FLAGS_PRACT) {
return false;
}
return true;
}
static void
attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
@ -248,6 +224,7 @@ attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
struct spdk_fio_ctrlr *fio_ctrlr;
struct spdk_fio_qpair *fio_qpair;
struct spdk_nvme_ns *ns;
const struct spdk_nvme_ns_data *nsdata;
struct fio_file *f = fio_thread->current_f;
uint32_t ns_id;
char *p;
@ -292,6 +269,7 @@ attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
g_error = true;
return;
}
nsdata = spdk_nvme_ns_get_data(ns);
fio_qpair = fio_thread->fio_qpair;
while (fio_qpair != NULL) {
@ -330,13 +308,13 @@ attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
fio_thread->fio_qpair = fio_qpair;
if (spdk_nvme_ns_get_flags(ns) & SPDK_NVME_NS_DPS_PI_SUPPORTED) {
assert(spdk_nvme_ns_get_pi_type(ns) != SPDK_NVME_FMT_NVM_PROTECTION_DISABLE);
fio_qpair->io_flags = g_spdk_pract_flag | g_spdk_prchk_flags;
fio_qpair->do_nvme_pi = fio_do_nvme_pi_check(fio_qpair);
if (fio_qpair->do_nvme_pi) {
fio_qpair->extended_lba = spdk_nvme_ns_supports_extended_lba(ns);
fprintf(stdout, "PI type%u enabled with %s\n", spdk_nvme_ns_get_pi_type(ns),
fio_qpair->extended_lba ? "extended lba" : "separate metadata");
}
fio_qpair->nvme_pi_enabled = true;
fio_qpair->md_start = nsdata->dps.md_start;
fio_qpair->extended_lba = spdk_nvme_ns_supports_extended_lba(ns);
fprintf(stdout, "PI type%u enabled with %s\n", spdk_nvme_ns_get_pi_type(ns),
fio_qpair->extended_lba ? "extended lba" : "separate metadata");
}
f->real_file_size = spdk_nvme_ns_get_size(fio_qpair->ns);
@ -581,6 +559,13 @@ fio_extended_lba_setup_pi(struct spdk_fio_qpair *fio_qpair, struct io_u *io_u)
struct iovec iov;
int rc;
/* Set appmask and apptag when PRACT is enabled */
if (fio_qpair->io_flags & SPDK_NVME_IO_FLAGS_PRACT) {
fio_req->dif_ctx.apptag_mask = g_spdk_apptag_mask;
fio_req->dif_ctx.app_tag = g_spdk_apptag;
return 0;
}
extended_lba_size = spdk_nvme_ns_get_extended_sector_size(ns);
md_size = spdk_nvme_ns_get_md_size(ns);
lba = io_u->offset / extended_lba_size;
@ -619,6 +604,13 @@ fio_separate_md_setup_pi(struct spdk_fio_qpair *fio_qpair, struct io_u *io_u)
struct iovec iov, md_iov;
int rc;
/* Set appmask and apptag when PRACT is enabled */
if (fio_qpair->io_flags & SPDK_NVME_IO_FLAGS_PRACT) {
fio_req->dif_ctx.apptag_mask = g_spdk_apptag_mask;
fio_req->dif_ctx.app_tag = g_spdk_apptag;
return 0;
}
block_size = spdk_nvme_ns_get_sector_size(ns);
md_size = spdk_nvme_ns_get_md_size(ns);
lba = io_u->offset / block_size;
@ -659,6 +651,11 @@ fio_extended_lba_verify_pi(struct spdk_fio_qpair *fio_qpair, struct io_u *io_u)
struct spdk_dif_error err_blk = {};
int rc;
/* Do nothing when PRACT is enabled */
if (fio_qpair->io_flags & SPDK_NVME_IO_FLAGS_PRACT) {
return 0;
}
iov.iov_base = io_u->buf;
iov.iov_len = io_u->xfer_buflen;
lba_count = io_u->xfer_buflen / spdk_nvme_ns_get_extended_sector_size(ns);
@ -682,6 +679,11 @@ fio_separate_md_verify_pi(struct spdk_fio_qpair *fio_qpair, struct io_u *io_u)
struct spdk_dif_error err_blk = {};
int rc;
/* Do nothing when PRACT is enabled */
if (fio_qpair->io_flags & SPDK_NVME_IO_FLAGS_PRACT) {
return 0;
}
iov.iov_base = io_u->buf;
iov.iov_len = io_u->xfer_buflen;
lba_count = io_u->xfer_buflen / spdk_nvme_ns_get_sector_size(ns);
@ -705,7 +707,7 @@ static void spdk_fio_completion_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
struct spdk_fio_qpair *fio_qpair = fio_req->fio_qpair;
int rc;
if (fio_qpair->do_nvme_pi && fio_req->io->ddir == DDIR_READ) {
if (fio_qpair->nvme_pi_enabled && fio_req->io->ddir == DDIR_READ) {
if (fio_qpair->extended_lba) {
rc = fio_extended_lba_verify_pi(fio_qpair, fio_req->io);
} else {
@ -784,7 +786,7 @@ spdk_fio_queue(struct thread_data *td, struct io_u *io_u)
if (fio_qpair == NULL || ns == NULL) {
return -ENXIO;
}
if (fio_qpair->do_nvme_pi && !fio_qpair->extended_lba) {
if (fio_qpair->nvme_pi_enabled && !fio_qpair->extended_lba) {
md_buf = fio_req->md_buf;
}
fio_req->fio_qpair = fio_qpair;
@ -803,7 +805,7 @@ spdk_fio_queue(struct thread_data *td, struct io_u *io_u)
lba_count = io_u->xfer_buflen / block_size;
/* TODO: considering situations that fio will randomize and verify io_u */
if (fio_qpair->do_nvme_pi) {
if (fio_qpair->nvme_pi_enabled) {
if (fio_qpair->extended_lba) {
rc = fio_extended_lba_setup_pi(fio_qpair, io_u);
} else {