lib/nvme: Add an internal API nvme_wait_for_completion_robust_lock_timeout

Add an internal API nvme_wait_for_completion_robust_mutex_lock_timeout()
and related internal APIs just call it with adjusting parameters.

nvme_wait_for_completion_robust_lock_timeout() will be usable for
the current use cases of nvme_wait_for_completion_robust_lock() and
future use cases.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I2b499643930256a39ebe279f56a399f20a7a2fde
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4217
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Shuhei Matsumoto 2020-09-14 11:20:05 +09:00 committed by Tomasz Zawadzki
parent fcc187e3cf
commit 06d25b7021
4 changed files with 125 additions and 43 deletions

View File

@ -111,21 +111,28 @@ nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
* \param status completion status. The user must fill this structure with zeroes before calling
* this function
* \param robust_mutex optional robust mutex to lock while polling qpair
* \param timeout_in_usecs optional timeout
*
* \return 0 if command completed without error,
* -EIO if command completed with error,
* -ECANCELED if command is not completed due to transport/device error
* -ECANCELED if command is not completed due to transport/device error or time expired
*
* The command to wait upon must be submitted with nvme_completion_poll_cb as the callback
* and status as the callback argument.
* The command to wait upon must be submitted with nvme_completion_poll_cb as the callback
* and status as the callback argument.
*/
int
nvme_wait_for_completion_robust_lock(
nvme_wait_for_completion_robust_lock_timeout(
struct spdk_nvme_qpair *qpair,
struct nvme_completion_poll_status *status,
pthread_mutex_t *robust_mutex)
pthread_mutex_t *robust_mutex,
uint64_t timeout_in_usecs)
{
int rc;
uint64_t timeout_tsc = 0;
int rc = 0;
if (timeout_in_usecs) {
timeout_tsc = spdk_get_ticks() + timeout_in_usecs * spdk_get_ticks_hz() / SPDK_SEC_TO_USEC;
}
while (status->done == false) {
if (robust_mutex) {
@ -141,21 +148,54 @@ nvme_wait_for_completion_robust_lock(
if (rc < 0) {
status->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
status->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
if (status->done == false) {
status->timed_out = true;
}
return -ECANCELED;
break;
}
if (timeout_tsc && spdk_get_ticks() > timeout_tsc) {
rc = -1;
break;
}
}
if (status->done == false) {
status->timed_out = true;
}
if (rc < 0) {
return -ECANCELED;
}
return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
}
/**
* Poll qpair for completions until a command completes.
*
* \param qpair queue to poll
* \param status completion status. The user must fill this structure with zeroes before calling
* this function
* \param robust_mutex optional robust mutex to lock while polling qpair
*
* \return 0 if command completed without error,
* -EIO if command completed with error,
* -ECANCELED if command is not completed due to transport/device error
*
* The command to wait upon must be submitted with nvme_completion_poll_cb as the callback
* and status as the callback argument.
*/
int
nvme_wait_for_completion_robust_lock(
struct spdk_nvme_qpair *qpair,
struct nvme_completion_poll_status *status,
pthread_mutex_t *robust_mutex)
{
return nvme_wait_for_completion_robust_lock_timeout(qpair, status, robust_mutex, 0);
}
int
nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
struct nvme_completion_poll_status *status)
{
return nvme_wait_for_completion_robust_lock(qpair, status, NULL);
return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, 0);
}
/**
@ -178,34 +218,7 @@ nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
struct nvme_completion_poll_status *status,
uint64_t timeout_in_usecs)
{
uint64_t timeout_tsc = 0;
int rc = 0;
if (timeout_in_usecs) {
timeout_tsc = spdk_get_ticks() + timeout_in_usecs * spdk_get_ticks_hz() / SPDK_SEC_TO_USEC;
}
while (status->done == false) {
rc = spdk_nvme_qpair_process_completions(qpair, 0);
if (rc < 0) {
status->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
status->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
break;
}
if (timeout_tsc && spdk_get_ticks() > timeout_tsc) {
break;
}
}
if (status->done == false || rc < 0) {
if (status->done == false) {
status->timed_out = true;
}
return -ECANCELED;
}
return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, timeout_in_usecs);
}
static void

View File

@ -908,6 +908,10 @@ int nvme_wait_for_completion_robust_lock(struct spdk_nvme_qpair *qpair,
int nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
struct nvme_completion_poll_status *status,
uint64_t timeout_in_usecs);
int nvme_wait_for_completion_robust_lock_timeout(struct spdk_nvme_qpair *qpair,
struct nvme_completion_poll_status *status,
pthread_mutex_t *robust_mutex,
uint64_t timeout_in_usecs);
struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
pid_t pid);

View File

@ -1235,6 +1235,7 @@ test_nvme_request_check_timeout(void)
struct nvme_completion_poll_status g_status;
uint64_t completion_delay_us, timeout_in_usecs;
int g_process_comp_result;
pthread_mutex_t g_robust_lock = PTHREAD_MUTEX_INITIALIZER;
int
spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
@ -1304,6 +1305,60 @@ test_nvme_wait_for_completion(void)
CU_ASSERT(rc == 0);
CU_ASSERT(g_status.timed_out == false);
CU_ASSERT(g_status.done == true);
/* completion timeout */
memset(&g_status, 0, sizeof(g_status));
completion_delay_us = 2000000;
timeout_in_usecs = 1000000;
rc = nvme_wait_for_completion_robust_lock_timeout(&qpair, &g_status, &g_robust_lock,
timeout_in_usecs);
CU_ASSERT(g_status.timed_out == true);
CU_ASSERT(g_status.done == false);
CU_ASSERT(rc == -ECANCELED);
/* spdk_nvme_qpair_process_completions returns error */
memset(&g_status, 0, sizeof(g_status));
g_process_comp_result = -1;
completion_delay_us = 1000000;
timeout_in_usecs = 2000000;
rc = nvme_wait_for_completion_robust_lock_timeout(&qpair, &g_status, &g_robust_lock,
timeout_in_usecs);
CU_ASSERT(rc == -ECANCELED);
CU_ASSERT(g_status.timed_out == true);
CU_ASSERT(g_status.done == false);
CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
g_process_comp_result = 0;
/* complete in time */
memset(&g_status, 0, sizeof(g_status));
completion_delay_us = 1000000;
timeout_in_usecs = 2000000;
rc = nvme_wait_for_completion_robust_lock_timeout(&qpair, &g_status, &g_robust_lock,
timeout_in_usecs);
CU_ASSERT(g_status.timed_out == false);
CU_ASSERT(g_status.done == true);
CU_ASSERT(rc == 0);
/* nvme_wait_for_completion */
/* spdk_nvme_qpair_process_completions returns error */
memset(&g_status, 0, sizeof(g_status));
g_process_comp_result = -1;
rc = nvme_wait_for_completion_robust_lock(&qpair, &g_status, &g_robust_lock);
CU_ASSERT(rc == -ECANCELED);
CU_ASSERT(g_status.timed_out == true);
CU_ASSERT(g_status.done == false);
CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
/* successful completion */
memset(&g_status, 0, sizeof(g_status));
g_process_comp_result = 0;
rc = nvme_wait_for_completion_robust_lock(&qpair, &g_status, &g_robust_lock);
CU_ASSERT(rc == 0);
CU_ASSERT(g_status.timed_out == false);
CU_ASSERT(g_status.done == true);
}
static void

View File

@ -291,10 +291,11 @@ nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
static struct nvme_completion_poll_status *g_failed_status;
int
nvme_wait_for_completion_robust_lock(
nvme_wait_for_completion_robust_lock_timeout(
struct spdk_nvme_qpair *qpair,
struct nvme_completion_poll_status *status,
pthread_mutex_t *robust_mutex)
pthread_mutex_t *robust_mutex,
uint64_t timeout_in_usecs)
{
if (spdk_nvme_qpair_process_completions(qpair, 0) < 0) {
g_failed_status = status;
@ -309,11 +310,20 @@ nvme_wait_for_completion_robust_lock(
return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
}
int
nvme_wait_for_completion_robust_lock(
struct spdk_nvme_qpair *qpair,
struct nvme_completion_poll_status *status,
pthread_mutex_t *robust_mutex)
{
return nvme_wait_for_completion_robust_lock_timeout(qpair, status, robust_mutex, 0);
}
int
nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
struct nvme_completion_poll_status *status)
{
return nvme_wait_for_completion_robust_lock(qpair, status, NULL);
return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, 0);
}
int
@ -321,7 +331,7 @@ nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
struct nvme_completion_poll_status *status,
uint64_t timeout_in_usecs)
{
return nvme_wait_for_completion_robust_lock(qpair, status, NULL);
return nvme_wait_for_completion_robust_lock_timeout(qpair, status, NULL, timeout_in_usecs);
}
int