From 0b034da148688b7d00f820e65a996a5946329d67 Mon Sep 17 00:00:00 2001 From: Tomasz Zawadzki Date: Mon, 28 Feb 2022 15:05:48 +0100 Subject: [PATCH] blob: add return codes to bs_user_op_abort Prior to this patch bs_user_op_abort() always returned EIO back to the bdev layer. This is not sufficient for ENOMEM cases where the I/O should be resubmitted by the bdev layer. ENOMEM for bs_sequence_start() in bs_allocate_and_copy_cluster() specifically addresses issue #2306. Signed-off-by: Tomasz Zawadzki Change-Id: Icfb0ce9ca20e1c4dd1668ba77d121f7091acb044 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11764 Tested-by: SPDK CI Jenkins Community-CI: Broadcom CI Community-CI: Mellanox Build Bot Reviewed-by: Jim Harris Reviewed-by: Ben Walker --- lib/blob/blobstore.c | 14 +++++++------- lib/blob/request.c | 4 ++-- lib/blob/request.h | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/blob/blobstore.c b/lib/blob/blobstore.c index 5b56098ba..6ed950b6a 100644 --- a/lib/blob/blobstore.c +++ b/lib/blob/blobstore.c @@ -2349,7 +2349,7 @@ blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno) if (bserrno == 0) { bs_user_op_execute(op); } else { - bs_user_op_abort(op); + bs_user_op_abort(op, bserrno); } } @@ -2447,7 +2447,7 @@ bs_allocate_and_copy_cluster(struct spdk_blob *blob, ctx = calloc(1, sizeof(*ctx)); if (!ctx) { - bs_user_op_abort(op); + bs_user_op_abort(op, -ENOMEM); return; } @@ -2463,7 +2463,7 @@ bs_allocate_and_copy_cluster(struct spdk_blob *blob, SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n", blob->bs->cluster_sz); free(ctx); - bs_user_op_abort(op); + bs_user_op_abort(op, -ENOMEM); return; } } @@ -2475,7 +2475,7 @@ bs_allocate_and_copy_cluster(struct spdk_blob *blob, if (rc != 0) { spdk_free(ctx->buf); free(ctx); - bs_user_op_abort(op); + bs_user_op_abort(op, rc); return; } @@ -2490,7 +2490,7 @@ bs_allocate_and_copy_cluster(struct spdk_blob *blob, pthread_mutex_unlock(&blob->bs->used_clusters_mutex); spdk_free(ctx->buf); free(ctx); - bs_user_op_abort(op); + bs_user_op_abort(op, -ENOMEM); return; } @@ -3141,13 +3141,13 @@ bs_channel_destroy(void *io_device, void *ctx_buf) while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) { op = TAILQ_FIRST(&channel->need_cluster_alloc); TAILQ_REMOVE(&channel->need_cluster_alloc, op, link); - bs_user_op_abort(op); + bs_user_op_abort(op, -EIO); } while (!TAILQ_EMPTY(&channel->queued_io)) { op = TAILQ_FIRST(&channel->queued_io); TAILQ_REMOVE(&channel->queued_io, op, link); - bs_user_op_abort(op); + bs_user_op_abort(op, -EIO); } free(channel->req_mem); diff --git a/lib/blob/request.c b/lib/blob/request.c index 6f1d69d9a..1c0b9391f 100644 --- a/lib/blob/request.c +++ b/lib/blob/request.c @@ -494,13 +494,13 @@ bs_user_op_execute(spdk_bs_user_op_t *op) } void -bs_user_op_abort(spdk_bs_user_op_t *op) +bs_user_op_abort(spdk_bs_user_op_t *op, int bserrno) { struct spdk_bs_request_set *set; set = (struct spdk_bs_request_set *)op; - set->cpl.u.blob_basic.cb_fn(set->cpl.u.blob_basic.cb_arg, -EIO); + set->cpl.u.blob_basic.cb_fn(set->cpl.u.blob_basic.cb_arg, bserrno); TAILQ_INSERT_TAIL(&set->channel->reqs, set, link); } diff --git a/lib/blob/request.h b/lib/blob/request.h index 619902501..0269e2291 100644 --- a/lib/blob/request.h +++ b/lib/blob/request.h @@ -210,6 +210,6 @@ spdk_bs_user_op_t *bs_user_op_alloc(struct spdk_io_channel *channel, struct spdk void bs_user_op_execute(spdk_bs_user_op_t *op); -void bs_user_op_abort(spdk_bs_user_op_t *op); +void bs_user_op_abort(spdk_bs_user_op_t *op, int bserrno); #endif