lib/blob: save sequence immidietly on bs_load/unload

Assigning seq to ctx was done very late in the process.
To keep future functions lean and without the seq,
it is assigned immidietly after starting.

Only functions in load path that require separate
seq argument are those passed directly to read/write
device operations.
Rest of them can just use spdk_bs_load_ctx.

Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: I2bd610dc4c7b4a7b0c3de92391922475c514326a
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/481899
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Tomasz Zawadzki 2020-01-17 05:45:39 -05:00
parent bbbe586b28
commit 7167f8d334

View File

@ -2907,9 +2907,8 @@ _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
} }
static void static void
_spdk_bs_load_complete(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno) _spdk_bs_load_complete(struct spdk_bs_load_ctx *ctx, int bserrno)
{ {
ctx->seq = seq;
spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx); spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx);
} }
@ -2937,7 +2936,7 @@ _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrn
return; return;
} }
_spdk_bs_load_complete(seq, ctx, bserrno); _spdk_bs_load_complete(ctx, bserrno);
} }
static void static void
@ -3020,9 +3019,8 @@ _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
} }
static void static void
_spdk_bs_load_read_used_pages(spdk_bs_sequence_t *seq, void *cb_arg) _spdk_bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx)
{ {
struct spdk_bs_load_ctx *ctx = cb_arg;
uint64_t lba, lba_count, mask_size; uint64_t lba, lba_count, mask_size;
/* Read the used pages mask */ /* Read the used pages mask */
@ -3030,13 +3028,13 @@ _spdk_bs_load_read_used_pages(spdk_bs_sequence_t *seq, void *cb_arg)
ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->mask) { if (!ctx->mask) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); _spdk_bs_load_ctx_fail(ctx->seq, ctx, -ENOMEM);
return; return;
} }
lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start); lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len); lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count, spdk_bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
_spdk_bs_load_used_pages_cpl, ctx); _spdk_bs_load_used_pages_cpl, ctx);
} }
@ -3118,14 +3116,14 @@ static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
} }
static void static void
_spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg); _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx);
static void static void
_spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
{ {
struct spdk_bs_load_ctx *ctx = cb_arg; struct spdk_bs_load_ctx *ctx = cb_arg;
_spdk_bs_load_complete(seq, ctx, bserrno); _spdk_bs_load_complete(ctx, bserrno);
} }
static void static void
@ -3151,9 +3149,9 @@ _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bs
} }
static void static void
_spdk_bs_load_write_used_md(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno) _spdk_bs_load_write_used_md(struct spdk_bs_load_ctx *ctx, int bserrno)
{ {
_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_load_write_used_pages_cpl); _spdk_bs_write_used_md(ctx->seq, ctx, _spdk_bs_load_write_used_pages_cpl);
} }
static void static void
@ -3183,7 +3181,7 @@ _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
if (ctx->page->next != SPDK_INVALID_MD_PAGE) { if (ctx->page->next != SPDK_INVALID_MD_PAGE) {
ctx->in_page_chain = true; ctx->in_page_chain = true;
ctx->cur_page = ctx->page->next; ctx->cur_page = ctx->page->next;
_spdk_bs_load_replay_cur_md_page(seq, ctx); _spdk_bs_load_replay_cur_md_page(ctx);
return; return;
} }
} }
@ -3197,7 +3195,7 @@ _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
if (ctx->page_index < ctx->super->md_len) { if (ctx->page_index < ctx->super->md_len) {
ctx->cur_page = ctx->page_index; ctx->cur_page = ctx->page_index;
_spdk_bs_load_replay_cur_md_page(seq, ctx); _spdk_bs_load_replay_cur_md_page(ctx);
} else { } else {
/* Claim all of the clusters used by the metadata */ /* Claim all of the clusters used by the metadata */
num_md_clusters = spdk_divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster); num_md_clusters = spdk_divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster);
@ -3205,65 +3203,61 @@ _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
_spdk_bs_claim_cluster(ctx->bs, i); _spdk_bs_claim_cluster(ctx->bs, i);
} }
spdk_free(ctx->page); spdk_free(ctx->page);
_spdk_bs_load_write_used_md(seq, ctx, bserrno); _spdk_bs_load_write_used_md(ctx, bserrno);
} }
} }
static void static void
_spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg) _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx)
{ {
struct spdk_bs_load_ctx *ctx = cb_arg;
uint64_t lba; uint64_t lba;
assert(ctx->cur_page < ctx->super->md_len); assert(ctx->cur_page < ctx->super->md_len);
lba = _spdk_bs_md_page_to_lba(ctx->bs, ctx->cur_page); lba = _spdk_bs_md_page_to_lba(ctx->bs, ctx->cur_page);
spdk_bs_sequence_read_dev(seq, ctx->page, lba, spdk_bs_sequence_read_dev(ctx->seq, ctx->page, lba,
_spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE), _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
_spdk_bs_load_replay_md_cpl, ctx); _spdk_bs_load_replay_md_cpl, ctx);
} }
static void static void
_spdk_bs_load_replay_md(spdk_bs_sequence_t *seq, void *cb_arg) _spdk_bs_load_replay_md(struct spdk_bs_load_ctx *ctx)
{ {
struct spdk_bs_load_ctx *ctx = cb_arg;
ctx->page_index = 0; ctx->page_index = 0;
ctx->cur_page = 0; ctx->cur_page = 0;
ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA); NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->page) { if (!ctx->page) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); _spdk_bs_load_ctx_fail(ctx->seq, ctx, -ENOMEM);
return; return;
} }
_spdk_bs_load_replay_cur_md_page(seq, ctx); _spdk_bs_load_replay_cur_md_page(ctx);
} }
static void static void
_spdk_bs_recover(spdk_bs_sequence_t *seq, void *cb_arg) _spdk_bs_recover(struct spdk_bs_load_ctx *ctx)
{ {
struct spdk_bs_load_ctx *ctx = cb_arg;
int rc; int rc;
rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len); rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
if (rc < 0) { if (rc < 0) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); _spdk_bs_load_ctx_fail(ctx->seq, ctx, -ENOMEM);
return; return;
} }
rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len); rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
if (rc < 0) { if (rc < 0) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); _spdk_bs_load_ctx_fail(ctx->seq, ctx, -ENOMEM);
return; return;
} }
rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters); rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
if (rc < 0) { if (rc < 0) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM); _spdk_bs_load_ctx_fail(ctx->seq, ctx, -ENOMEM);
return; return;
} }
ctx->bs->num_free_clusters = ctx->bs->total_clusters; ctx->bs->num_free_clusters = ctx->bs->total_clusters;
_spdk_bs_load_replay_md(seq, ctx); _spdk_bs_load_replay_md(ctx);
} }
static void static void
@ -3338,9 +3332,9 @@ _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype)); memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) { if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) {
_spdk_bs_recover(seq, ctx); _spdk_bs_recover(ctx);
} else { } else {
_spdk_bs_load_read_used_pages(seq, ctx); _spdk_bs_load_read_used_pages(ctx);
} }
} }
@ -3350,7 +3344,6 @@ spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
{ {
struct spdk_blob_store *bs; struct spdk_blob_store *bs;
struct spdk_bs_cpl cpl; struct spdk_bs_cpl cpl;
spdk_bs_sequence_t *seq;
struct spdk_bs_load_ctx *ctx; struct spdk_bs_load_ctx *ctx;
struct spdk_bs_opts opts = {}; struct spdk_bs_opts opts = {};
int err; int err;
@ -3409,8 +3402,8 @@ spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
cpl.u.bs_handle.cb_arg = cb_arg; cpl.u.bs_handle.cb_arg = cb_arg;
cpl.u.bs_handle.bs = bs; cpl.u.bs_handle.bs = bs;
seq = spdk_bs_sequence_start(bs->md_channel, &cpl); ctx->seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
if (!seq) { if (!ctx->seq) {
spdk_free(ctx->super); spdk_free(ctx->super);
free(ctx); free(ctx);
_spdk_bs_free(bs); _spdk_bs_free(bs);
@ -3419,7 +3412,7 @@ spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
} }
/* Read the super block */ /* Read the super block */
spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
_spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
_spdk_bs_load_super_cpl, ctx); _spdk_bs_load_super_cpl, ctx);
} }
@ -4051,7 +4044,6 @@ void
spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg) spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
{ {
struct spdk_bs_cpl cpl; struct spdk_bs_cpl cpl;
spdk_bs_sequence_t *seq;
struct spdk_bs_load_ctx *ctx; struct spdk_bs_load_ctx *ctx;
SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n"); SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n");
@ -4082,8 +4074,8 @@ spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_a
cpl.u.bs_basic.cb_fn = cb_fn; cpl.u.bs_basic.cb_fn = cb_fn;
cpl.u.bs_basic.cb_arg = cb_arg; cpl.u.bs_basic.cb_arg = cb_arg;
seq = spdk_bs_sequence_start(bs->md_channel, &cpl); ctx->seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
if (!seq) { if (!ctx->seq) {
spdk_free(ctx->super); spdk_free(ctx->super);
free(ctx); free(ctx);
cb_fn(cb_arg, -ENOMEM); cb_fn(cb_arg, -ENOMEM);
@ -4091,7 +4083,7 @@ spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_a
} }
/* Read super block */ /* Read super block */
spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0), spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
_spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)), _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
_spdk_bs_unload_read_super_cpl, ctx); _spdk_bs_unload_read_super_cpl, ctx);
} }