From 29f86a26304ef24c8b062923f59054ef7a281530 Mon Sep 17 00:00:00 2001 From: Shuhei Matsumoto Date: Tue, 2 Jun 2020 14:35:13 +0900 Subject: [PATCH] lib/bdev: spdk_bdev_abort supports queued I/O due to buffer allocation Buffer allocation is done after redirection to the QoS thread. Hence add a new helper function bdev_abort_queued_io() and add its call to bdev_io_do_submit() for both buf_need_small and buf_need_large. For zcopy API, buffer allocation is done before buffer allocation but the caller can get bdev I/O object, and can abort the I/O directly if needed. Signed-off-by: Shuhei Matsumoto Change-Id: I2d6170de5ab2ba4d260df99db3e376c0e2c5ffaf Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2250 Tested-by: SPDK CI Jenkins Community-CI: Mellanox Build Bot Community-CI: Broadcom CI Reviewed-by: Aleksey Marchuk Reviewed-by: Jim Harris Reviewed-by: Ben Walker Reviewed-by: Michael Haeuptle --- lib/bdev/bdev.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/lib/bdev/bdev.c b/lib/bdev/bdev.c index 56ff030b2..9a2f67ede 100644 --- a/lib/bdev/bdev.c +++ b/lib/bdev/bdev.c @@ -373,6 +373,7 @@ bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch, static inline void bdev_io_complete(void *ctx); static bool bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort); +static bool bdev_abort_buf_io(bdev_io_stailq_t *queue, struct spdk_bdev_io *bio_to_abort); void spdk_bdev_get_opts(struct spdk_bdev_opts *opts) @@ -1679,9 +1680,12 @@ bdev_io_do_submit(struct spdk_bdev_channel *bdev_ch, struct spdk_bdev_io *bdev_i struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource; if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT)) { + struct spdk_bdev_mgmt_channel *mgmt_channel = shared_resource->mgmt_ch; struct spdk_bdev_io *bio_to_abort = bdev_io->u.abort.bio_to_abort; - if (bdev_abort_queued_io(&shared_resource->nomem_io, bio_to_abort)) { + if (bdev_abort_queued_io(&shared_resource->nomem_io, bio_to_abort) || + bdev_abort_buf_io(&mgmt_channel->need_buf_small, bio_to_abort) || + bdev_abort_buf_io(&mgmt_channel->need_buf_large, bio_to_abort)) { _bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); return; @@ -2676,6 +2680,22 @@ bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort) return false; } +static bool +bdev_abort_buf_io(bdev_io_stailq_t *queue, struct spdk_bdev_io *bio_to_abort) +{ + struct spdk_bdev_io *bdev_io; + + STAILQ_FOREACH(bdev_io, queue, internal.buf_link) { + if (bdev_io == bio_to_abort) { + STAILQ_REMOVE(queue, bio_to_abort, spdk_bdev_io, internal.buf_link); + spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED); + return true; + } + } + + return false; +} + static void bdev_qos_channel_destroy(void *cb_arg) {