bdev: simplify completion callback deferring

We can do all of the completion work first, and then
make the decision at the very end on whether to
defer the callback or not.

This also removes the bdev_io defer_callback member -
we no longer need it since we now only defer the
callback itself, instead of deferring the full
execution of the spdk_bdev_io_complete() routine.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I3285e34d2fbce34d4254dca2119561ff825ee9e8

Reviewed-on: https://review.gerrithub.io/365711
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Jim Harris 2017-06-15 11:48:27 -07:00 committed by Daniel Verkamp
parent 356e2ab20f
commit dfa0618d29
2 changed files with 13 additions and 27 deletions

View File

@ -333,11 +333,6 @@ struct spdk_bdev_io {
*/
bool in_submit_request;
/**
* Set to true when the a bdev_io's callback routine should be deferred after completion.
*/
bool defer_callback;
/** Member used for linking child I/Os together. */
TAILQ_ENTRY(spdk_bdev_io) link;

View File

@ -961,7 +961,6 @@ spdk_bdev_reset(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
bdev_io->ch = channel;
bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
bdev_io->defer_callback = true;
spdk_bdev_io_init(bdev_io, bdev, cb_arg, cb);
/* First, abort all I/O queued up waiting for buffers. */
@ -1073,13 +1072,12 @@ spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
}
static void
bdev_io_deferred_completion(void *ctx)
_spdk_bdev_io_complete(void *ctx)
{
struct spdk_bdev_io *bdev_io = ctx;
assert(bdev_io->in_submit_request == false);
spdk_bdev_io_complete(bdev_io, bdev_io->status);
assert(bdev_io->cb != NULL);
bdev_io->cb(bdev_io, bdev_io->status == SPDK_BDEV_IO_STATUS_SUCCESS, bdev_io->caller_ctx);
}
void
@ -1087,21 +1085,6 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta
{
bdev_io->status = status;
if (bdev_io->in_submit_request) {
bdev_io->defer_callback = true;
}
if (bdev_io->defer_callback) {
/*
* Defer completion to avoid potential infinite recursion if the
* user's completion callback issues a new I/O.
*/
bdev_io->defer_callback = false;
spdk_thread_send_msg(spdk_io_channel_get_thread(bdev_io->ch->channel),
bdev_io_deferred_completion, bdev_io);
return;
}
assert(bdev_io->ch->io_outstanding > 0);
bdev_io->ch->io_outstanding--;
if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
@ -1156,8 +1139,16 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta
}
#endif
assert(bdev_io->cb != NULL);
bdev_io->cb(bdev_io, status == SPDK_BDEV_IO_STATUS_SUCCESS, bdev_io->caller_ctx);
if (bdev_io->in_submit_request || bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
/*
* Defer completion to avoid potential infinite recursion if the
* user's completion callback issues a new I/O.
*/
spdk_thread_send_msg(spdk_io_channel_get_thread(bdev_io->ch->channel),
_spdk_bdev_io_complete, bdev_io);
} else {
_spdk_bdev_io_complete(bdev_io);
}
}
void