blobstore: switch to spdk_*malloc().

spdk_dma_*malloc() is about to be deprecated.

Change-Id: I0da485ec68329ee9f819cf6dfdb8744c5993e6f2
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/448171
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Darek Stojaczyk 2019-03-15 15:07:41 +01:00 committed by Jim Harris
parent 38902a5a27
commit 530f481259

View File

@ -595,16 +595,14 @@ _spdk_blob_serialize_add_page(const struct spdk_blob *blob,
if (*page_count == 0) {
assert(*pages == NULL);
*page_count = 1;
*pages = spdk_dma_malloc(SPDK_BS_PAGE_SIZE,
SPDK_BS_PAGE_SIZE,
NULL);
*pages = spdk_malloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
} else {
assert(*pages != NULL);
(*page_count)++;
*pages = spdk_dma_realloc(*pages,
*pages = spdk_realloc(*pages,
SPDK_BS_PAGE_SIZE * (*page_count),
SPDK_BS_PAGE_SIZE,
NULL);
SPDK_BS_PAGE_SIZE);
}
if (*pages == NULL) {
@ -764,7 +762,7 @@ _spdk_blob_serialize_xattrs(const struct spdk_blob *blob,
rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
&cur_page);
if (rc < 0) {
spdk_dma_free(*pages);
spdk_free(*pages);
*pages = NULL;
*page_count = 0;
return rc;
@ -780,7 +778,7 @@ _spdk_blob_serialize_xattrs(const struct spdk_blob *blob,
&required_sz, internal);
if (rc < 0) {
spdk_dma_free(*pages);
spdk_free(*pages);
*pages = NULL;
*page_count = 0;
return rc;
@ -897,7 +895,7 @@ _spdk_blob_load_final(void *cb_arg, int bserrno)
ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno);
/* Free the memory */
spdk_dma_free(ctx->pages);
spdk_free(ctx->pages);
free(ctx);
}
@ -925,7 +923,7 @@ error:
SPDK_ERRLOG("Snapshot fail\n");
_spdk_blob_free(blob);
ctx->cb_fn(ctx->seq, NULL, bserrno);
spdk_dma_free(ctx->pages);
spdk_free(ctx->pages);
free(ctx);
}
@ -944,7 +942,7 @@ _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
SPDK_ERRLOG("Metadata page read failed: %d\n", bserrno);
_spdk_blob_free(blob);
ctx->cb_fn(seq, NULL, bserrno);
spdk_dma_free(ctx->pages);
spdk_free(ctx->pages);
free(ctx);
return;
}
@ -955,7 +953,7 @@ _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages);
_spdk_blob_free(blob);
ctx->cb_fn(seq, NULL, -EINVAL);
spdk_dma_free(ctx->pages);
spdk_free(ctx->pages);
free(ctx);
return;
}
@ -969,8 +967,8 @@ _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
/* Read the next page */
ctx->num_pages++;
ctx->pages = spdk_dma_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
sizeof(*page), NULL);
ctx->pages = spdk_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
sizeof(*page));
if (ctx->pages == NULL) {
ctx->cb_fn(seq, ctx->cb_arg, -ENOMEM);
free(ctx);
@ -989,7 +987,7 @@ _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
if (rc) {
_spdk_blob_free(blob);
ctx->cb_fn(seq, NULL, rc);
spdk_dma_free(ctx->pages);
spdk_free(ctx->pages);
free(ctx);
return;
}
@ -1002,7 +1000,7 @@ _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
if (len != sizeof(spdk_blob_id)) {
_spdk_blob_free(blob);
ctx->cb_fn(seq, NULL, -EINVAL);
spdk_dma_free(ctx->pages);
spdk_free(ctx->pages);
free(ctx);
return;
}
@ -1043,8 +1041,7 @@ _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
}
ctx->blob = blob;
ctx->pages = spdk_dma_realloc(ctx->pages, SPDK_BS_PAGE_SIZE,
SPDK_BS_PAGE_SIZE, NULL);
ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE);
if (!ctx->pages) {
free(ctx);
cb_fn(seq, cb_arg, -ENOMEM);
@ -1104,7 +1101,7 @@ _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
ctx->cb_fn(seq, ctx->cb_arg, bserrno);
/* Free the memory */
spdk_dma_free(ctx->pages);
spdk_free(ctx->pages);
free(ctx);
}
@ -1474,7 +1471,7 @@ _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
ctx->blob->bs->clean = 0;
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
_spdk_blob_persist_start(ctx);
}
@ -1523,7 +1520,8 @@ _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
ctx->cb_arg = cb_arg;
if (blob->bs->clean) {
ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->super) {
cb_fn(seq, cb_arg, -ENOMEM);
free(ctx);
@ -1567,7 +1565,7 @@ _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
}
}
spdk_dma_free(ctx->buf);
spdk_free(ctx->buf);
free(ctx);
}
@ -1670,7 +1668,8 @@ _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
ctx->page = cluster_start_page;
if (blob->parent_id != SPDK_BLOBID_INVALID) {
ctx->buf = spdk_dma_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, NULL);
ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen,
NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->buf) {
SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
blob->bs->cluster_sz);
@ -1682,7 +1681,7 @@ _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, false);
if (rc != 0) {
spdk_dma_free(ctx->buf);
spdk_free(ctx->buf);
free(ctx);
spdk_bs_user_op_abort(op);
return;
@ -1695,7 +1694,7 @@ _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
ctx->seq = spdk_bs_sequence_start(_ch, &cpl);
if (!ctx->seq) {
_spdk_bs_release_cluster(blob->bs, ctx->new_cluster);
spdk_dma_free(ctx->buf);
spdk_free(ctx->buf);
free(ctx);
spdk_bs_user_op_abort(op);
return;
@ -2535,7 +2534,7 @@ _spdk_bs_load_ctx_fail(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, in
{
assert(bserrno != 0);
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
spdk_bs_sequence_finish(seq, bserrno);
_spdk_bs_free(ctx->bs);
free(ctx);
@ -2597,7 +2596,8 @@ _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequenc
/* Write out the used clusters mask */
mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->mask) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
return;
@ -2625,7 +2625,8 @@ _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl
}
mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->mask) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
return;
@ -2657,7 +2658,8 @@ _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence
}
mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
SPDK_MALLOC_DMA);
if (!ctx->mask) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
return;
@ -2701,8 +2703,8 @@ _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
ctx->iter_cb_fn = NULL;
spdk_dma_free(ctx->super);
spdk_dma_free(ctx->mask);
spdk_free(ctx->super);
spdk_free(ctx->mask);
spdk_bs_sequence_finish(ctx->seq, bserrno);
free(ctx);
}
@ -2733,7 +2735,7 @@ _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrn
rc = _spdk_bs_load_mask(&ctx->bs->used_blobids, ctx->mask);
if (rc < 0) {
spdk_dma_free(ctx->mask);
spdk_free(ctx->mask);
_spdk_bs_load_ctx_fail(seq, ctx, rc);
return;
}
@ -2758,7 +2760,7 @@ _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
rc = _spdk_bs_load_mask(&ctx->bs->used_clusters, ctx->mask);
if (rc < 0) {
spdk_dma_free(ctx->mask);
spdk_free(ctx->mask);
_spdk_bs_load_ctx_fail(seq, ctx, rc);
return;
}
@ -2766,11 +2768,12 @@ _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->bs->used_clusters);
assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
spdk_dma_free(ctx->mask);
spdk_free(ctx->mask);
/* Read the used blobids mask */
mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
SPDK_MALLOC_DMA);
if (!ctx->mask) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
return;
@ -2798,16 +2801,17 @@ _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
rc = _spdk_bs_load_mask(&ctx->bs->used_md_pages, ctx->mask);
if (rc < 0) {
spdk_dma_free(ctx->mask);
spdk_free(ctx->mask);
_spdk_bs_load_ctx_fail(seq, ctx, rc);
return;
}
spdk_dma_free(ctx->mask);
spdk_free(ctx->mask);
/* Read the used clusters mask */
mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
SPDK_MALLOC_DMA);
if (!ctx->mask) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
return;
@ -2826,7 +2830,8 @@ _spdk_bs_load_read_used_pages(spdk_bs_sequence_t *seq, void *cb_arg)
/* Read the used pages mask */
mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->mask) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
return;
@ -2930,7 +2935,7 @@ _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int
{
struct spdk_bs_load_ctx *ctx = cb_arg;
spdk_dma_free(ctx->mask);
spdk_free(ctx->mask);
ctx->mask = NULL;
_spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_load_write_used_clusters_cpl);
@ -2941,7 +2946,7 @@ _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bs
{
struct spdk_bs_load_ctx *ctx = cb_arg;
spdk_dma_free(ctx->mask);
spdk_free(ctx->mask);
ctx->mask = NULL;
_spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_load_write_used_blobids_cpl);
@ -3001,7 +3006,7 @@ _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
for (i = 0; i < num_md_clusters; i++) {
_spdk_bs_claim_cluster(ctx->bs, i);
}
spdk_dma_free(ctx->page);
spdk_free(ctx->page);
_spdk_bs_load_write_used_md(seq, ctx, bserrno);
}
}
@ -3026,9 +3031,8 @@ _spdk_bs_load_replay_md(spdk_bs_sequence_t *seq, void *cb_arg)
ctx->page_index = 0;
ctx->cur_page = 0;
ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE,
SPDK_BS_PAGE_SIZE,
NULL);
ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->page) {
_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
return;
@ -3193,7 +3197,8 @@ spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
ctx->iter_cb_arg = opts.iter_cb_arg;
/* Allocate memory for the super block */
ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->super) {
free(ctx);
_spdk_bs_free(bs);
@ -3208,7 +3213,7 @@ spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
if (!seq) {
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
free(ctx);
_spdk_bs_free(bs);
cb_fn(cb_arg, NULL, -ENOMEM);
@ -3239,7 +3244,7 @@ struct spdk_bs_dump_ctx {
static void
_spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int bserrno)
{
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
/*
* We need to defer calling spdk_bs_call_cpl() until after
@ -3361,7 +3366,7 @@ _spdk_bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrn
if (ctx->cur_page < ctx->super->md_len) {
_spdk_bs_dump_read_md_page(seq, cb_arg);
} else {
spdk_dma_free(ctx->page);
spdk_free(ctx->page);
_spdk_bs_dump_finish(seq, ctx, 0);
}
}
@ -3415,9 +3420,8 @@ _spdk_bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len);
ctx->cur_page = 0;
ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE,
SPDK_BS_PAGE_SIZE,
NULL);
ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->page) {
_spdk_bs_dump_finish(seq, ctx, -ENOMEM);
return;
@ -3459,7 +3463,8 @@ spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_x
ctx->print_xattr_fn = print_xattr_fn;
/* Allocate memory for the super block */
ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->super) {
free(ctx);
_spdk_bs_free(bs);
@ -3473,7 +3478,7 @@ spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_x
seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
if (!seq) {
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
free(ctx);
_spdk_bs_free(bs);
cb_fn(cb_arg, -ENOMEM);
@ -3500,7 +3505,7 @@ _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
{
struct spdk_bs_init_ctx *ctx = cb_arg;
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
free(ctx);
spdk_bs_sequence_finish(seq, bserrno);
@ -3597,7 +3602,8 @@ spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
ctx->bs = bs;
/* Allocate memory for the super block */
ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->super) {
free(ctx);
_spdk_bs_free(bs);
@ -3664,7 +3670,7 @@ spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
"please decrease number of pages reserved for metadata "
"or increase cluster size.\n");
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
free(ctx);
_spdk_bs_free(bs);
cb_fn(cb_arg, NULL, -ENOMEM);
@ -3684,7 +3690,7 @@ spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
if (!seq) {
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
free(ctx);
_spdk_bs_free(bs);
cb_fn(cb_arg, NULL, -ENOMEM);
@ -3782,7 +3788,7 @@ _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
{
struct spdk_bs_load_ctx *ctx = cb_arg;
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
/*
* We need to defer calling spdk_bs_call_cpl() until after
@ -3803,7 +3809,7 @@ _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, i
{
struct spdk_bs_load_ctx *ctx = cb_arg;
spdk_dma_free(ctx->mask);
spdk_free(ctx->mask);
ctx->super->clean = 1;
_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx);
@ -3814,7 +3820,7 @@ _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, in
{
struct spdk_bs_load_ctx *ctx = cb_arg;
spdk_dma_free(ctx->mask);
spdk_free(ctx->mask);
ctx->mask = NULL;
_spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_unload_write_used_clusters_cpl);
@ -3825,7 +3831,7 @@ _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int
{
struct spdk_bs_load_ctx *ctx = cb_arg;
spdk_dma_free(ctx->mask);
spdk_free(ctx->mask);
ctx->mask = NULL;
_spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_unload_write_used_blobids_cpl);
@ -3860,7 +3866,8 @@ spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_a
ctx->bs = bs;
ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->super) {
free(ctx);
cb_fn(cb_arg, -ENOMEM);
@ -3873,7 +3880,7 @@ spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_a
seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
if (!seq) {
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
free(ctx);
cb_fn(cb_arg, -ENOMEM);
return;
@ -3903,7 +3910,7 @@ _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
SPDK_ERRLOG("Unable to write to super block of blobstore\n");
}
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
spdk_bs_sequence_finish(seq, bserrno);
@ -3917,7 +3924,7 @@ _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
if (bserrno != 0) {
SPDK_ERRLOG("Unable to read super block of blobstore\n");
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
spdk_bs_sequence_finish(seq, bserrno);
free(ctx);
return;
@ -3944,7 +3951,8 @@ spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
ctx->bs = bs;
ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->super) {
free(ctx);
cb_fn(cb_arg, -ENOMEM);
@ -3957,7 +3965,7 @@ spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
if (!seq) {
spdk_dma_free(ctx->super);
spdk_free(ctx->super);
free(ctx);
cb_fn(cb_arg, -ENOMEM);
return;