nvme: Don't use stack variable to track request completion

A pointer to a stack variable is passed as an argument to
nvme_completion_poll_cb function, later this variable is used
to track completion in the spdk_nvme_wait_for_completion() function.
If normal scenario a request submitted to the admin queue will be completed
within the function which submitted the request.
spdk_nvme_wait_for_completion() calls nvme_transport_qpair_process_completions
which may return an error to the caller, the caller may exit from the
function which submitted the request and the pointer to the stack variable
will no longer be valid. Thereby the request may not be completed at that time
and completed later (e.g. when the controller/qpair are destroyed)
and that will lead to call to nvme_completion_poll_cb with the pointer
to invalid stack variable.
Fix - Dynamically allocate status structure to track the completion;
Add a new field to nvme_completion_poll_status structure to track status
objects that need to be freed in a completion callback

Fixes #1125

Change-Id: Ie0cd8316e1284d42a67439b056c48ab89f23e0d0
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/481530
Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Alexey Marchuk 2020-01-13 18:03:51 +03:00 committed by Ben Walker
parent 97a7cacc72
commit 8818ace2f4
8 changed files with 356 additions and 90 deletions

View File

@ -87,6 +87,12 @@ nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
{
struct nvme_completion_poll_status *status = arg;
if (status->timed_out) {
/* There is no routine waiting for the completion of this request, free allocated memory */
free(status);
return;
}
/*
* Copy status into the argument passed by the caller, so that
* the caller can check the status to determine if the
@ -116,8 +122,7 @@ spdk_nvme_wait_for_completion_robust_lock(
struct nvme_completion_poll_status *status,
pthread_mutex_t *robust_mutex)
{
memset(&status->cpl, 0, sizeof(status->cpl));
status->done = false;
memset(status, 0, sizeof(*status));
int rc;
while (status->done == false) {
@ -134,6 +139,9 @@ spdk_nvme_wait_for_completion_robust_lock(
if (rc < 0) {
status->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
status->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
if (status->done == false) {
status->timed_out = true;
}
return -ECANCELED;
}
}
@ -170,8 +178,7 @@ spdk_nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
uint64_t timeout_tsc = 0;
int rc = 0;
memset(&status->cpl, 0, sizeof(status->cpl));
status->done = false;
memset(status, 0, sizeof(*status));
if (timeout_in_secs) {
timeout_tsc = spdk_get_ticks() + timeout_in_secs * spdk_get_ticks_hz();
}
@ -190,6 +197,9 @@ spdk_nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
}
if (status->done == false || rc < 0) {
if (status->done == false) {
status->timed_out = true;
}
return -ECANCELED;
}

View File

@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -527,7 +527,7 @@ nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
{
int rc = 0;
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
struct spdk_nvme_intel_log_page_directory *log_page_directory;
log_page_directory = spdk_zmalloc(sizeof(struct spdk_nvme_intel_log_page_directory),
@ -537,24 +537,35 @@ static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
return -ENXIO;
}
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
SPDK_NVME_GLOBAL_NS_TAG, log_page_directory,
sizeof(struct spdk_nvme_intel_log_page_directory),
0, nvme_completion_poll_cb, &status);
0, nvme_completion_poll_cb, status);
if (rc != 0) {
spdk_free(log_page_directory);
free(status);
return rc;
}
if (spdk_nvme_wait_for_completion_timeout(ctrlr->adminq, &status,
if (spdk_nvme_wait_for_completion_timeout(ctrlr->adminq, status,
ctrlr->opts.admin_timeout_ms / 1000)) {
spdk_free(log_page_directory);
SPDK_WARNLOG("Intel log pages not supported on Intel drive!\n");
if (!status->timed_out) {
free(status);
}
return 0;
}
nvme_ctrlr_construct_intel_support_log_page_list(ctrlr, log_page_directory);
spdk_free(log_page_directory);
free(status);
return 0;
}
@ -594,7 +605,7 @@ static void
nvme_ctrlr_set_arbitration_feature(struct spdk_nvme_ctrlr *ctrlr)
{
uint32_t cdw11;
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
if (ctrlr->opts.arbitration_burst == 0) {
return;
@ -605,6 +616,12 @@ nvme_ctrlr_set_arbitration_feature(struct spdk_nvme_ctrlr *ctrlr)
return;
}
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return;
}
cdw11 = ctrlr->opts.arbitration_burst;
if (spdk_nvme_ctrlr_get_flags(ctrlr) & SPDK_NVME_CTRLR_WRR_SUPPORTED) {
@ -615,15 +632,20 @@ nvme_ctrlr_set_arbitration_feature(struct spdk_nvme_ctrlr *ctrlr)
if (spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ARBITRATION,
cdw11, 0, NULL, 0,
nvme_completion_poll_cb, &status) < 0) {
nvme_completion_poll_cb, status) < 0) {
SPDK_ERRLOG("Set arbitration feature failed\n");
free(status);
return;
}
if (spdk_nvme_wait_for_completion_timeout(ctrlr->adminq, &status,
if (spdk_nvme_wait_for_completion_timeout(ctrlr->adminq, status,
ctrlr->opts.admin_timeout_ms / 1000)) {
SPDK_ERRLOG("Timeout to set arbitration feature\n");
}
if (!status->timed_out) {
free(status);
}
}
static void
@ -1265,7 +1287,7 @@ nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
int
nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int rc;
uint32_t i;
uint32_t num_pages;
@ -1290,6 +1312,12 @@ nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
return -ENOMEM;
}
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 1, 0) && !(ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
/*
* Iterate through the pages and fetch each chunk of 1024 namespaces until
@ -1298,11 +1326,11 @@ nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
for (i = 0; i < num_pages; i++) {
rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, next_nsid,
&new_ns_list[1024 * i], sizeof(struct spdk_nvme_ns_list),
nvme_completion_poll_cb, &status);
nvme_completion_poll_cb, status);
if (rc != 0) {
goto fail;
}
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
if (spdk_nvme_wait_for_completion(ctrlr->adminq, status)) {
SPDK_ERRLOG("nvme_ctrlr_cmd_identify_active_ns_list failed!\n");
rc = -ENXIO;
goto fail;
@ -1332,9 +1360,13 @@ nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
*/
spdk_free(ctrlr->active_ns_list);
ctrlr->active_ns_list = new_ns_list;
free(status);
return 0;
fail:
if (!status->timed_out) {
free(status);
}
spdk_free(new_ns_list);
return rc;
}
@ -2870,19 +2902,30 @@ int
spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
struct spdk_nvme_ctrlr_list *payload)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int res;
struct spdk_nvme_ns *ns;
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
nvme_completion_poll_cb, &status);
nvme_completion_poll_cb, status);
if (res) {
free(status);
return res;
}
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
SPDK_ERRLOG("spdk_nvme_ctrlr_attach_ns failed!\n");
if (!status->timed_out) {
free(status);
}
return -ENXIO;
}
free(status);
res = nvme_ctrlr_identify_active_ns(ctrlr);
if (res) {
@ -2897,19 +2940,30 @@ int
spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
struct spdk_nvme_ctrlr_list *payload)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int res;
struct spdk_nvme_ns *ns;
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
nvme_completion_poll_cb, &status);
nvme_completion_poll_cb, status);
if (res) {
free(status);
return res;
}
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
SPDK_ERRLOG("spdk_nvme_ctrlr_detach_ns failed!\n");
if (!status->timed_out) {
free(status);
}
return -ENXIO;
}
free(status);
res = nvme_ctrlr_identify_active_ns(ctrlr);
if (res) {
@ -2926,22 +2980,33 @@ spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
uint32_t
spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int res;
uint32_t nsid;
struct spdk_nvme_ns *ns;
res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, &status);
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, status);
if (res) {
free(status);
return 0;
}
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
SPDK_ERRLOG("spdk_nvme_ctrlr_create_ns failed!\n");
if (!status->timed_out) {
free(status);
}
return 0;
}
nsid = status.cpl.cdw0;
nsid = status->cpl.cdw0;
ns = &ctrlr->ns[nsid - 1];
free(status);
/* Inactive NS */
res = nvme_ns_construct(ns, nsid, ctrlr);
if (res) {
@ -2955,18 +3020,29 @@ spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_dat
int
spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int res;
struct spdk_nvme_ns *ns;
res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, &status);
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, status);
if (res) {
free(status);
return res;
}
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
SPDK_ERRLOG("spdk_nvme_ctrlr_delete_ns failed!\n");
if (!status->timed_out) {
free(status);
}
return -ENXIO;
}
free(status);
res = nvme_ctrlr_identify_active_ns(ctrlr);
if (res) {
@ -2983,18 +3059,29 @@ int
spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
struct spdk_nvme_format *format)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int res;
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
&status);
if (res) {
free(status);
return res;
}
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
SPDK_ERRLOG("spdk_nvme_ctrlr_format failed!\n");
if (!status->timed_out) {
free(status);
}
return -ENXIO;
}
free(status);
return spdk_nvme_ctrlr_reset(ctrlr);
}
@ -3004,7 +3091,7 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
int slot, enum spdk_nvme_fw_commit_action commit_action, struct spdk_nvme_status *completion_status)
{
struct spdk_nvme_fw_commit fw_commit;
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int res;
unsigned int size_remaining;
unsigned int offset;
@ -3029,6 +3116,12 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
return -1;
}
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
/* Firmware download */
size_remaining = size;
offset = 0;
@ -3039,13 +3132,17 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
nvme_completion_poll_cb,
&status);
status);
if (res) {
free(status);
return res;
}
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
SPDK_ERRLOG("spdk_nvme_ctrlr_fw_image_download failed!\n");
if (!status->timed_out) {
free(status);
}
return -ENXIO;
}
p += transfer;
@ -3059,20 +3156,25 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
fw_commit.ca = commit_action;
res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
&status);
status);
if (res) {
free(status);
return res;
}
res = spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock);
res = spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock);
memcpy(completion_status, &status.cpl.status, sizeof(struct spdk_nvme_status));
memcpy(completion_status, &status->cpl.status, sizeof(struct spdk_nvme_status));
if (!status->timed_out) {
free(status);
}
if (res) {
if (status.cpl.status.sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
status.cpl.status.sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
if (status.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
status.cpl.status.sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) {
if (completion_status->sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
completion_status->sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
if (completion_status->sct == SPDK_NVME_SCT_COMMAND_SPECIFIC &&
completion_status->sc == SPDK_NVME_SC_FIRMWARE_REQ_CONVENTIONAL_RESET) {
SPDK_NOTICELOG("firmware activation requires conventional reset to be performed. !\n");
} else {
SPDK_ERRLOG("nvme_ctrlr_cmd_fw_commit failed!\n");
@ -3123,18 +3225,29 @@ int
spdk_nvme_ctrlr_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
uint16_t spsp, uint8_t nssf, void *payload, size_t size)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int res;
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
res = nvme_ctrlr_cmd_security_receive(ctrlr, secp, spsp, nssf, payload, size,
nvme_completion_poll_cb, &status);
nvme_completion_poll_cb, status);
if (res) {
free(status);
return res;
}
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
SPDK_ERRLOG("spdk_nvme_ctrlr_security_receive failed!\n");
if (!status->timed_out) {
free(status);
}
return -ENXIO;
}
free(status);
return 0;
}
@ -3143,19 +3256,31 @@ int
spdk_nvme_ctrlr_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
uint16_t spsp, uint8_t nssf, void *payload, size_t size)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int res;
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
res = nvme_ctrlr_cmd_security_send(ctrlr, secp, spsp, nssf, payload, size, nvme_completion_poll_cb,
&status);
status);
if (res) {
free(status);
return res;
}
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, status, &ctrlr->ctrlr_lock)) {
SPDK_ERRLOG("spdk_nvme_ctrlr_security_send failed!\n");
if (!status->timed_out) {
free(status);
}
return -ENXIO;
}
free(status);
return 0;
}

View File

@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -45,11 +45,17 @@ nvme_fabric_prop_set_cmd(struct spdk_nvme_ctrlr *ctrlr,
uint32_t offset, uint8_t size, uint64_t value)
{
struct spdk_nvmf_fabric_prop_set_cmd cmd = {};
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int rc;
assert(size == SPDK_NVMF_PROP_SIZE_4 || size == SPDK_NVMF_PROP_SIZE_8);
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
cmd.opcode = SPDK_NVME_OPC_FABRIC;
cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET;
cmd.ofst = offset;
@ -58,15 +64,20 @@ nvme_fabric_prop_set_cmd(struct spdk_nvme_ctrlr *ctrlr,
rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, (struct spdk_nvme_cmd *)&cmd,
NULL, 0,
nvme_completion_poll_cb, &status);
nvme_completion_poll_cb, status);
if (rc < 0) {
free(status);
return rc;
}
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
if (spdk_nvme_wait_for_completion(ctrlr->adminq, status)) {
if (!status->timed_out) {
free(status);
}
SPDK_ERRLOG("Property Set failed\n");
return -1;
}
free(status);
return 0;
}
@ -76,12 +87,18 @@ nvme_fabric_prop_get_cmd(struct spdk_nvme_ctrlr *ctrlr,
uint32_t offset, uint8_t size, uint64_t *value)
{
struct spdk_nvmf_fabric_prop_set_cmd cmd = {};
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
struct spdk_nvmf_fabric_prop_get_rsp *response;
int rc;
assert(size == SPDK_NVMF_PROP_SIZE_4 || size == SPDK_NVMF_PROP_SIZE_8);
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
cmd.opcode = SPDK_NVME_OPC_FABRIC;
cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
cmd.ofst = offset;
@ -89,17 +106,21 @@ nvme_fabric_prop_get_cmd(struct spdk_nvme_ctrlr *ctrlr,
rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, (struct spdk_nvme_cmd *)&cmd,
NULL, 0, nvme_completion_poll_cb,
&status);
status);
if (rc < 0) {
free(status);
return rc;
}
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
if (spdk_nvme_wait_for_completion(ctrlr->adminq, status)) {
if (!status->timed_out) {
free(status);
}
SPDK_ERRLOG("Property Get failed\n");
return -1;
}
response = (struct spdk_nvmf_fabric_prop_get_rsp *)&status.cpl;
response = (struct spdk_nvmf_fabric_prop_get_rsp *)&status->cpl;
if (size == SPDK_NVMF_PROP_SIZE_4) {
*value = response->value.u32.low;
@ -107,6 +128,8 @@ nvme_fabric_prop_get_cmd(struct spdk_nvme_ctrlr *ctrlr,
*value = response->value.u64;
}
free(status);
return 0;
}
@ -205,18 +228,29 @@ static int
nvme_fabric_get_discovery_log_page(struct spdk_nvme_ctrlr *ctrlr,
void *log_page, uint32_t size, uint64_t offset)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int rc;
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
rc = spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_DISCOVERY, 0, log_page, size, offset,
nvme_completion_poll_cb, &status);
nvme_completion_poll_cb, status);
if (rc < 0) {
free(status);
return -1;
}
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
if (spdk_nvme_wait_for_completion(ctrlr->adminq, status)) {
if (!status->timed_out) {
free(status);
}
return -1;
}
free(status);
return 0;
}
@ -229,7 +263,7 @@ nvme_fabric_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
struct spdk_nvme_ctrlr *discovery_ctrlr;
union spdk_nvme_cc_register cc;
int rc;
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
if (strcmp(probe_ctx->trid.subnqn, SPDK_NVMF_DISCOVERY_NQN) != 0) {
/* It is not a discovery_ctrlr info and try to directly connect it */
@ -260,20 +294,32 @@ nvme_fabric_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
return -1;
}
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
/* get the cdata info */
rc = nvme_ctrlr_cmd_identify(discovery_ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
&discovery_ctrlr->cdata, sizeof(discovery_ctrlr->cdata),
nvme_completion_poll_cb, &status);
nvme_completion_poll_cb, status);
if (rc != 0) {
SPDK_ERRLOG("Failed to identify cdata\n");
free(status);
return rc;
}
if (spdk_nvme_wait_for_completion(discovery_ctrlr->adminq, &status)) {
if (spdk_nvme_wait_for_completion(discovery_ctrlr->adminq, status)) {
SPDK_ERRLOG("nvme_identify_controller failed!\n");
if (!status->timed_out) {
free(status);
}
return -ENXIO;
}
free(status);
/* Direct attach through spdk_nvme_connect() API */
if (direct_connect == true) {
/* Set the ready state to skip the normal init process */
@ -341,7 +387,7 @@ nvme_fabric_ctrlr_discover(struct spdk_nvme_ctrlr *ctrlr,
int
nvme_fabric_qpair_connect(struct spdk_nvme_qpair *qpair, uint32_t num_entries)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
struct spdk_nvmf_fabric_connect_rsp *rsp;
struct spdk_nvmf_fabric_connect_cmd cmd;
struct spdk_nvmf_fabric_connect_data *nvmf_data;
@ -364,6 +410,12 @@ nvme_fabric_qpair_connect(struct spdk_nvme_qpair *qpair, uint32_t num_entries)
return -ENOMEM;
}
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = SPDK_NVME_OPC_FABRIC;
cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
@ -386,25 +438,30 @@ nvme_fabric_qpair_connect(struct spdk_nvme_qpair *qpair, uint32_t num_entries)
rc = spdk_nvme_ctrlr_cmd_io_raw(ctrlr, qpair,
(struct spdk_nvme_cmd *)&cmd,
nvmf_data, sizeof(*nvmf_data),
nvme_completion_poll_cb, &status);
nvme_completion_poll_cb, status);
if (rc < 0) {
SPDK_ERRLOG("Connect command failed\n");
spdk_free(nvmf_data);
free(status);
return rc;
}
if (spdk_nvme_wait_for_completion(qpair, &status)) {
if (spdk_nvme_wait_for_completion(qpair, status)) {
SPDK_ERRLOG("Connect command failed\n");
spdk_free(nvmf_data);
if (!status->timed_out) {
free(status);
}
return -EIO;
}
if (nvme_qpair_is_admin_queue(qpair)) {
rsp = (struct spdk_nvmf_fabric_connect_rsp *)&status.cpl;
rsp = (struct spdk_nvmf_fabric_connect_rsp *)&status->cpl;
ctrlr->cntlid = rsp->status_code_specific.success.cntlid;
SPDK_DEBUGLOG(SPDK_LOG_NVME, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cntlid);
}
spdk_free(nvmf_data);
free(status);
return 0;
}

View File

@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -339,6 +339,9 @@ struct nvme_request {
struct nvme_completion_poll_status {
struct spdk_nvme_cpl cpl;
bool done;
/* This flag indicates that the request has been timed out and the memory
must be freed in a completion callback */
bool timed_out;
};
struct nvme_async_event_request {

View File

@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -113,25 +113,36 @@ nvme_ns_set_identify_data(struct spdk_nvme_ns *ns)
static int
nvme_ctrlr_identify_ns(struct spdk_nvme_ns *ns)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
struct spdk_nvme_ns_data *nsdata;
int rc;
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
nsdata = _nvme_ns_get_data(ns);
rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id,
nsdata, sizeof(*nsdata),
nvme_completion_poll_cb, &status);
nvme_completion_poll_cb, status);
if (rc != 0) {
free(status);
return rc;
}
if (spdk_nvme_wait_for_completion_robust_lock(ns->ctrlr->adminq, &status,
if (spdk_nvme_wait_for_completion_robust_lock(ns->ctrlr->adminq, status,
&ns->ctrlr->ctrlr_lock)) {
if (!status->timed_out) {
free(status);
}
/* This can occur if the namespace is not active. Simply zero the
* namespace data and continue. */
nvme_ns_destruct(ns);
return 0;
}
free(status);
nvme_ns_set_identify_data(ns);
@ -141,7 +152,7 @@ nvme_ctrlr_identify_ns(struct spdk_nvme_ns *ns)
static int
nvme_ctrlr_identify_id_desc(struct spdk_nvme_ns *ns)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int rc;
memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
@ -152,20 +163,31 @@ nvme_ctrlr_identify_id_desc(struct spdk_nvme_ns *ns)
return 0;
}
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
SPDK_DEBUGLOG(SPDK_LOG_NVME, "Attempting to retrieve NS ID Descriptor List\n");
rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST, 0, ns->id,
ns->id_desc_list, sizeof(ns->id_desc_list),
nvme_completion_poll_cb, &status);
nvme_completion_poll_cb, status);
if (rc < 0) {
free(status);
return rc;
}
rc = spdk_nvme_wait_for_completion_robust_lock(ns->ctrlr->adminq, &status, &ns->ctrlr->ctrlr_lock);
rc = spdk_nvme_wait_for_completion_robust_lock(ns->ctrlr->adminq, status, &ns->ctrlr->ctrlr_lock);
if (rc != 0) {
SPDK_WARNLOG("Failed to retrieve NS ID Descriptor List\n");
memset(ns->id_desc_list, 0, sizeof(ns->id_desc_list));
}
if (!status->timed_out) {
free(status);
}
return rc;
}

View File

@ -3,7 +3,7 @@
*
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2017, IBM Corporation. All rights reserved.
* Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
* Copyright (c) 2019, 2020 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -1560,32 +1560,60 @@ _nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
{
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int rc;
rc = nvme_pcie_ctrlr_cmd_create_io_cq(ctrlr, qpair, nvme_completion_poll_cb, &status);
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
rc = nvme_pcie_ctrlr_cmd_create_io_cq(ctrlr, qpair, nvme_completion_poll_cb, status);
if (rc != 0) {
free(status);
return rc;
}
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
if (spdk_nvme_wait_for_completion(ctrlr->adminq, status)) {
SPDK_ERRLOG("nvme_create_io_cq failed!\n");
if (!status->timed_out) {
free(status);
}
return -1;
}
rc = nvme_pcie_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, nvme_completion_poll_cb, &status);
rc = nvme_pcie_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, nvme_completion_poll_cb, status);
if (rc != 0) {
free(status);
return rc;
}
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
if (spdk_nvme_wait_for_completion(ctrlr->adminq, status)) {
SPDK_ERRLOG("nvme_create_io_sq failed!\n");
if (status->timed_out) {
/* Request is still queued, the memory will be freed in a completion callback.
allocate a new request */
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
}
/* Attempt to delete the completion queue */
rc = nvme_pcie_ctrlr_cmd_delete_io_cq(qpair->ctrlr, qpair, nvme_completion_poll_cb, &status);
rc = nvme_pcie_ctrlr_cmd_delete_io_cq(qpair->ctrlr, qpair, nvme_completion_poll_cb, status);
if (rc != 0) {
/* The originall or newly allocated status structure can be freed since
* the corresponding request has been completed of failed to submit */
free(status);
return -1;
}
spdk_nvme_wait_for_completion(ctrlr->adminq, &status);
spdk_nvme_wait_for_completion(ctrlr->adminq, status);
if (!status->timed_out) {
/* status can be freed regardless of spdk_nvme_wait_for_completion return value */
free(status);
}
return -1;
}
@ -1603,6 +1631,7 @@ _nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
pqpair->flags.has_shadow_doorbell = 0;
}
nvme_pcie_qpair_reset(qpair);
free(status);
return 0;
}
@ -1670,7 +1699,7 @@ nvme_pcie_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
int
nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
struct nvme_completion_poll_status status;
struct nvme_completion_poll_status *status;
int rc;
assert(ctrlr != NULL);
@ -1679,25 +1708,40 @@ nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_
goto free;
}
status = malloc(sizeof(*status));
if (!status) {
SPDK_ERRLOG("Failed to allocate status tracker\n");
return -ENOMEM;
}
/* Delete the I/O submission queue */
rc = nvme_pcie_ctrlr_cmd_delete_io_sq(ctrlr, qpair, nvme_completion_poll_cb, &status);
rc = nvme_pcie_ctrlr_cmd_delete_io_sq(ctrlr, qpair, nvme_completion_poll_cb, status);
if (rc != 0) {
SPDK_ERRLOG("Failed to send request to delete_io_sq with rc=%d\n", rc);
free(status);
return rc;
}
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
if (spdk_nvme_wait_for_completion(ctrlr->adminq, status)) {
if (!status->timed_out) {
free(status);
}
return -1;
}
/* Delete the completion queue */
rc = nvme_pcie_ctrlr_cmd_delete_io_cq(ctrlr, qpair, nvme_completion_poll_cb, &status);
rc = nvme_pcie_ctrlr_cmd_delete_io_cq(ctrlr, qpair, nvme_completion_poll_cb, status);
if (rc != 0) {
SPDK_ERRLOG("Failed to send request to delete_io_cq with rc=%d\n", rc);
free(status);
return rc;
}
if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
if (spdk_nvme_wait_for_completion(ctrlr->adminq, status)) {
if (!status->timed_out) {
free(status);
}
return -1;
}
free(status);
free:
if (qpair->no_deletion_notification_needed == 0) {

View File

@ -1236,6 +1236,7 @@ test_nvme_wait_for_completion(void)
timeout_in_secs = 1;
g_status.done = true;
rc = spdk_nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_secs);
CU_ASSERT(g_status.timed_out == true);
CU_ASSERT(g_status.done == false);
CU_ASSERT(rc == -ECANCELED);
@ -1245,6 +1246,7 @@ test_nvme_wait_for_completion(void)
timeout_in_secs = 2;
rc = spdk_nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_secs);
CU_ASSERT(rc == -ECANCELED);
CU_ASSERT(g_status.timed_out == true);
CU_ASSERT(g_status.done == false);
CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
@ -1255,6 +1257,7 @@ test_nvme_wait_for_completion(void)
completion_delay = 1;
timeout_in_secs = 2;
rc = spdk_nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_secs);
CU_ASSERT(g_status.timed_out == false);
CU_ASSERT(g_status.done == true);
CU_ASSERT(rc == 0);
@ -1263,6 +1266,7 @@ test_nvme_wait_for_completion(void)
g_process_comp_result = -1;
rc = spdk_nvme_wait_for_completion(&qpair, &g_status);
CU_ASSERT(rc == -ECANCELED);
CU_ASSERT(g_status.timed_out == true);
CU_ASSERT(g_status.done == false);
CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
@ -1272,6 +1276,7 @@ test_nvme_wait_for_completion(void)
/* successful completion */
rc = spdk_nvme_wait_for_completion(&qpair, &g_status);
CU_ASSERT(rc == 0);
CU_ASSERT(g_status.timed_out == false);
CU_ASSERT(g_status.done == true);
}

View File

@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -282,8 +282,8 @@ spdk_nvme_wait_for_completion_robust_lock(
struct nvme_completion_poll_status *status,
pthread_mutex_t *robust_mutex)
{
memset(status, 0, sizeof(*status));
status->done = true;
memset(&status->cpl, 0, sizeof(status->cpl));
status->cpl.status.sc = 0;
if (set_status_cpl == 1) {
status->cpl.status.sc = 1;