lib/blob: move finishing unload to _spdk_bs_unload_finish()

Moved finishing of unloading to separate function,
which is now called on every failure and success when unloading
the blobstore.

Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: I34539b78c5cc63a6fe5891014cba89b9eb62d4df
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/482009
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Tomasz Zawadzki 2020-01-17 09:56:37 -05:00
parent f7bd1e1eb9
commit bb25821c7e

View File

@ -2666,11 +2666,6 @@ _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl
struct spdk_bs_load_ctx *ctx = arg; struct spdk_bs_load_ctx *ctx = arg;
uint64_t mask_size, lba, lba_count; uint64_t mask_size, lba, lba_count;
if (seq->bserrno) {
_spdk_bs_load_ctx_fail(ctx, seq->bserrno);
return;
}
mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE; mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
@ -4006,9 +4001,9 @@ spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
/* START spdk_bs_unload */ /* START spdk_bs_unload */
static void static void
_spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) _spdk_bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno)
{ {
struct spdk_bs_load_ctx *ctx = cb_arg; spdk_bs_sequence_t *seq = ctx->seq;
spdk_free(ctx->super); spdk_free(ctx->super);
@ -4026,12 +4021,26 @@ _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
free(ctx); free(ctx);
} }
static void
_spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{
struct spdk_bs_load_ctx *ctx = cb_arg;
_spdk_bs_unload_finish(ctx, bserrno);
}
static void static void
_spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{ {
struct spdk_bs_load_ctx *ctx = cb_arg; struct spdk_bs_load_ctx *ctx = cb_arg;
spdk_free(ctx->mask); spdk_free(ctx->mask);
if (bserrno != 0) {
_spdk_bs_unload_finish(ctx, bserrno);
return;
}
ctx->super->clean = 1; ctx->super->clean = 1;
_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx); _spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx);
@ -4045,6 +4054,11 @@ _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, in
spdk_free(ctx->mask); spdk_free(ctx->mask);
ctx->mask = NULL; ctx->mask = NULL;
if (bserrno != 0) {
_spdk_bs_unload_finish(ctx, bserrno);
return;
}
_spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_unload_write_used_clusters_cpl); _spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_unload_write_used_clusters_cpl);
} }
@ -4056,12 +4070,24 @@ _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int
spdk_free(ctx->mask); spdk_free(ctx->mask);
ctx->mask = NULL; ctx->mask = NULL;
if (bserrno != 0) {
_spdk_bs_unload_finish(ctx, bserrno);
return;
}
_spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_unload_write_used_blobids_cpl); _spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_unload_write_used_blobids_cpl);
} }
static void static void
_spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{ {
struct spdk_bs_load_ctx *ctx = cb_arg;
if (bserrno != 0) {
_spdk_bs_unload_finish(ctx, bserrno);
return;
}
_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl); _spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl);
} }