lib/bdev: spdk_bdev_abort supports the case when QoS is enabled

When the target I/O is controlled by QoS, the abort request is
submitted on the same channel that the target I/O is submitted.

By using this, add a helper function bdev_abort_queued_io(), and
change _bdev_io_submit() to call bdev_abort_queued_io(), and
call _bdev_io_complete_in_submit() with success if bdev_abort_queued_io()
returned true when QoS is enabled on the corresponding channel.
Add necessary unit test together.

Update unit test accordingly, especially, update stub_submit_request()
to abort the matched I/O, update io_during_io_done() because we need to
know not boolean but the exact completion status now, and update
basic_qos() to reset the rate limit to test the abort I/O feature.

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I16ca21e7c32cabfdce5d0e5c27a8af8bb00f11c5
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2230
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Shuhei Matsumoto 2020-05-12 03:10:28 +09:00 committed by Tomasz Zawadzki
parent 5771019fc0
commit f048f3d3d3
2 changed files with 92 additions and 6 deletions

View File

@ -372,6 +372,8 @@ bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
static inline void bdev_io_complete(void *ctx);
static bool bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort);
void
spdk_bdev_get_opts(struct spdk_bdev_opts *opts)
{
@ -2019,8 +2021,13 @@ _bdev_io_submit(void *ctx)
if (bdev_ch->flags & BDEV_CH_RESET_IN_PROGRESS) {
_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
} else if (bdev_ch->flags & BDEV_CH_QOS_ENABLED) {
TAILQ_INSERT_TAIL(&bdev->internal.qos->queued, bdev_io, internal.link);
bdev_qos_io_submit(bdev_ch, bdev->internal.qos);
if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) &&
bdev_abort_queued_io(&bdev->internal.qos->queued, bdev_io->u.abort.bio_to_abort)) {
_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
} else {
TAILQ_INSERT_TAIL(&bdev->internal.qos->queued, bdev_io, internal.link);
bdev_qos_io_submit(bdev_ch, bdev->internal.qos);
}
} else {
SPDK_ERRLOG("unknown bdev_ch flag %x found\n", bdev_ch->flags);
_bdev_io_complete_in_submit(bdev_ch, bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
@ -2643,6 +2650,22 @@ bdev_abort_all_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_channel *ch)
}
}
static bool
bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort)
{
struct spdk_bdev_io *bdev_io;
TAILQ_FOREACH(bdev_io, queue, internal.link) {
if (bdev_io == bio_to_abort) {
TAILQ_REMOVE(queue, bio_to_abort, internal.link);
spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
return true;
}
}
return false;
}
static void
bdev_qos_channel_destroy(void *cb_arg)
{

View File

@ -134,10 +134,9 @@ static void
stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
{
struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
struct spdk_bdev_io *io;
if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
struct spdk_bdev_io *io;
while (!TAILQ_EMPTY(&ch->outstanding_io)) {
io = TAILQ_FIRST(&ch->outstanding_io);
TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
@ -145,6 +144,21 @@ stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
ch->avail_cnt++;
}
} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
if (io == bdev_io->u.abort.bio_to_abort) {
TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
ch->outstanding_cnt--;
spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
ch->avail_cnt++;
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
return;
}
}
spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
return;
}
if (ch->avail_cnt > 0) {
@ -181,10 +195,17 @@ stub_complete_io(void *io_target, uint32_t num_to_complete)
return num_completed;
}
static bool
stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type)
{
return true;
}
static struct spdk_bdev_fn_table fn_table = {
.get_io_channel = stub_get_io_channel,
.destruct = stub_destruct,
.submit_request = stub_submit_request,
.io_type_supported = stub_io_type_supported,
};
struct spdk_bdev_module bdev_ut_if;
@ -527,7 +548,7 @@ io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
enum spdk_bdev_io_status *status = cb_arg;
*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
*status = bdev_io->internal.status;
spdk_bdev_free_io(bdev_io);
}
@ -641,7 +662,7 @@ basic_qos(void)
struct spdk_io_channel *io_ch[2];
struct spdk_bdev_channel *bdev_ch[2];
struct spdk_bdev *bdev;
enum spdk_bdev_io_status status;
enum spdk_bdev_io_status status, abort_status;
int rc;
setup_test();
@ -706,6 +727,48 @@ basic_qos(void)
poll_threads();
CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
/* Reset rate limit for the next test cases. */
spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
poll_threads();
/*
* Test abort request when QoS is enabled.
*/
/* Send an I/O on thread 0, which is where the QoS thread is running. */
set_thread(0);
status = SPDK_BDEV_IO_STATUS_PENDING;
rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
CU_ASSERT(rc == 0);
CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
/* Send an abort to the I/O on the same thread. */
abort_status = SPDK_BDEV_IO_STATUS_PENDING;
rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status);
CU_ASSERT(rc == 0);
CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
poll_threads();
CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
/* Send an I/O on thread 1. The QoS thread is not running here. */
status = SPDK_BDEV_IO_STATUS_PENDING;
set_thread(1);
rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
CU_ASSERT(rc == 0);
CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
poll_threads();
/* Send an abort to the I/O on the same thread. */
abort_status = SPDK_BDEV_IO_STATUS_PENDING;
rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status);
CU_ASSERT(rc == 0);
CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
poll_threads();
/* Complete the I/O with failure and the abort with success on thread 1. */
CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
set_thread(0);
/*
* Close the descriptor only, which should stop the qos channel as
* the last descriptor removed.