lib/nvme: Add spdk_nvme_detach_async() and spdk_nvme_detach_poll_async()
Add two new public APIs, spdk_nvme_detach_async() and spdk_nvme_detach_poll_async() to detach multiple controllers in parallel as a simple manner to users. Hold the target controller to nvme_ctrlr_detach_ctx because users will free any object which held it after returning spdk_nvme_detach_async(). spdk_nvme_detach_ctx holds all nvme_ctrlr_detach_ctx in a sequence by linked list. spdk_nvme_detach_ctx has a boolean variable polling_started to prevent the user from calling spdk_nvme_detach_async() while spdk_nvme_detach_poll_async() is called repeatedly. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>i Change-Id: Ib049c19f7ef24410b963fd5c777a21184f3012d1 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4758 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Jacek Kalwas <jacek.kalwas@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
ea1bfd84cd
commit
d8f4bbeb43
@ -802,6 +802,45 @@ int spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx);
|
||||
*/
|
||||
int spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr);
|
||||
|
||||
struct spdk_nvme_detach_ctx;
|
||||
|
||||
/**
|
||||
* Allocate a context to track detachment of multiple controllers if this call is the
|
||||
* first successful start of detachment in a sequence, or use the passed context otherwise.
|
||||
*
|
||||
* Then, start detaching the specified device returned by spdk_nvme_probe()'s attach_cb
|
||||
* from the NVMe driver, and append this detachment to the context.
|
||||
*
|
||||
* User must call spdk_nvme_detach_poll_async() to complete the detachment.
|
||||
*
|
||||
* If the context is not allocated before this call, and if the specified device is detached
|
||||
* locally from the caller process but any other process still attaches it or failed to be
|
||||
* detached, context is not allocated.
|
||||
*
|
||||
* This function should be called from a single thread while no other threads are
|
||||
* actively using the NVMe device.
|
||||
*
|
||||
* \param ctrlr Opaque handle to HVMe controller.
|
||||
* \param detach_ctx Reference to the context in a sequence. An new context is allocated
|
||||
* if this call is the first successful start of detachment in a sequence, or use the
|
||||
* passed context.
|
||||
*/
|
||||
int spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr,
|
||||
struct spdk_nvme_detach_ctx **detach_ctx);
|
||||
|
||||
/**
|
||||
* Poll detachment of multiple controllers until they complete.
|
||||
*
|
||||
* User must call this function until it returns 0.
|
||||
*
|
||||
* \param detach_ctx Context to track the detachment.
|
||||
*
|
||||
* \return 0 if all detachments complete; the context is also freed and no longer valid.
|
||||
* \return -EAGAIN if any detachment is still in progress; users must call
|
||||
* spdk_nvme_detach_poll_async() again to continue progress.
|
||||
*/
|
||||
int spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *detach_ctx);
|
||||
|
||||
/**
|
||||
* Update the transport ID for a given controller.
|
||||
*
|
||||
|
@ -97,6 +97,7 @@ nvme_ctrlr_detach_async(struct spdk_nvme_ctrlr *ctrlr,
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
ctx->ctrlr = ctrlr;
|
||||
ctx->cb_fn = nvme_ctrlr_detach_async_finish;
|
||||
|
||||
nvme_ctrlr_proc_put_ref(ctrlr);
|
||||
@ -116,12 +117,11 @@ nvme_ctrlr_detach_async(struct spdk_nvme_ctrlr *ctrlr,
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_ctrlr_detach_poll_async(struct spdk_nvme_ctrlr *ctrlr,
|
||||
struct nvme_ctrlr_detach_ctx *ctx)
|
||||
nvme_ctrlr_detach_poll_async(struct nvme_ctrlr_detach_ctx *ctx)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = nvme_ctrlr_destruct_poll_async(ctrlr, ctx);
|
||||
rc = nvme_ctrlr_destruct_poll_async(ctx->ctrlr, ctx);
|
||||
if (rc == -EAGAIN) {
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -148,7 +148,7 @@ spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
|
||||
}
|
||||
|
||||
while (1) {
|
||||
rc = nvme_ctrlr_detach_poll_async(ctrlr, ctx);
|
||||
rc = nvme_ctrlr_detach_poll_async(ctx);
|
||||
if (rc != -EAGAIN) {
|
||||
break;
|
||||
}
|
||||
@ -158,6 +158,82 @@ spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr,
|
||||
struct spdk_nvme_detach_ctx **_detach_ctx)
|
||||
{
|
||||
struct spdk_nvme_detach_ctx *detach_ctx;
|
||||
struct nvme_ctrlr_detach_ctx *ctx = NULL;
|
||||
int rc;
|
||||
|
||||
if (ctrlr == NULL || _detach_ctx == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Use a context header to poll detachement for multiple controllers.
|
||||
* Allocate an new one if not allocated yet, or use the passed one otherwise.
|
||||
*/
|
||||
detach_ctx = *_detach_ctx;
|
||||
if (detach_ctx == NULL) {
|
||||
detach_ctx = calloc(1, sizeof(*detach_ctx));
|
||||
if (detach_ctx == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
TAILQ_INIT(&detach_ctx->head);
|
||||
} else if (detach_ctx->polling_started) {
|
||||
SPDK_ERRLOG("Busy at polling detachment now.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
rc = nvme_ctrlr_detach_async(ctrlr, &ctx);
|
||||
if (rc != 0 || ctx == NULL) {
|
||||
/* If this detach failed and the context header is empty, it means we just
|
||||
* allocated the header and need to free it before returning.
|
||||
*/
|
||||
if (TAILQ_EMPTY(&detach_ctx->head)) {
|
||||
free(detach_ctx);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Append a context for this detachment to the context header. */
|
||||
TAILQ_INSERT_TAIL(&detach_ctx->head, ctx, link);
|
||||
|
||||
*_detach_ctx = detach_ctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *detach_ctx)
|
||||
{
|
||||
struct nvme_ctrlr_detach_ctx *ctx, *tmp_ctx;
|
||||
int rc;
|
||||
|
||||
if (detach_ctx == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
detach_ctx->polling_started = true;
|
||||
|
||||
TAILQ_FOREACH_SAFE(ctx, &detach_ctx->head, link, tmp_ctx) {
|
||||
TAILQ_REMOVE(&detach_ctx->head, ctx, link);
|
||||
|
||||
rc = nvme_ctrlr_detach_poll_async(ctx);
|
||||
if (rc == -EAGAIN) {
|
||||
/* If not -EAGAIN, ctx was freed by nvme_ctrlr_detach_poll_async(). */
|
||||
TAILQ_INSERT_HEAD(&detach_ctx->head, ctx, link);
|
||||
}
|
||||
}
|
||||
|
||||
if (!TAILQ_EMPTY(&detach_ctx->head)) {
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
free(detach_ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
|
||||
{
|
||||
|
@ -846,10 +846,17 @@ struct spdk_nvme_probe_ctx {
|
||||
typedef void (*nvme_ctrlr_detach_cb)(struct spdk_nvme_ctrlr *ctrlr);
|
||||
|
||||
struct nvme_ctrlr_detach_ctx {
|
||||
nvme_ctrlr_detach_cb cb_fn;
|
||||
uint64_t shutdown_start_tsc;
|
||||
uint32_t shutdown_timeout_ms;
|
||||
bool shutdown_complete;
|
||||
struct spdk_nvme_ctrlr *ctrlr;
|
||||
nvme_ctrlr_detach_cb cb_fn;
|
||||
uint64_t shutdown_start_tsc;
|
||||
uint32_t shutdown_timeout_ms;
|
||||
bool shutdown_complete;
|
||||
TAILQ_ENTRY(nvme_ctrlr_detach_ctx) link;
|
||||
};
|
||||
|
||||
struct spdk_nvme_detach_ctx {
|
||||
TAILQ_HEAD(, nvme_ctrlr_detach_ctx) head;
|
||||
bool polling_started;
|
||||
};
|
||||
|
||||
struct nvme_driver {
|
||||
|
@ -24,6 +24,8 @@
|
||||
spdk_nvme_probe_async;
|
||||
spdk_nvme_probe_poll_async;
|
||||
spdk_nvme_detach;
|
||||
spdk_nvme_detach_async;
|
||||
spdk_nvme_detach_poll_async;
|
||||
|
||||
spdk_nvme_ctrlr_is_discovery;
|
||||
spdk_nvme_ctrlr_get_default_ctrlr_opts;
|
||||
|
Loading…
Reference in New Issue
Block a user