bdev: Factor out I/O trace update at completion into a helper function

The following patches will add max/min latencies and more optional
counters. This factorization will improve the readability.

In addition to factorization, add spdk_likely to check if completed
successfully or not.

Signed-off-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: I57581ece2b73d486aa138f8d26a5afaf6953a322
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15480
Community-CI: Mellanox Build Bot
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Krzysztof Karas <krzysztof.karas@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Shuhei Matsumoto 2022-11-17 12:26:41 +09:00 committed by Tomasz Zawadzki
parent fab3558f2e
commit 8985382b96

View File

@ -6051,43 +6051,9 @@ spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
}
static inline void
bdev_io_complete(void *ctx)
bdev_io_update_io_stat(struct spdk_bdev_io *bdev_io, uint64_t tsc_diff)
{
struct spdk_bdev_io *bdev_io = ctx;
struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
uint64_t tsc, tsc_diff;
if (spdk_unlikely(bdev_io->internal.in_submit_request || bdev_io->internal.io_submit_ch)) {
/*
* Send the completion to the thread that originally submitted the I/O,
* which may not be the current thread in the case of QoS.
*/
if (bdev_io->internal.io_submit_ch) {
bdev_io->internal.ch = bdev_io->internal.io_submit_ch;
bdev_io->internal.io_submit_ch = NULL;
}
/*
* Defer completion to avoid potential infinite recursion if the
* user's completion callback issues a new I/O.
*/
spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io),
bdev_io_complete, bdev_io);
return;
}
tsc = spdk_get_ticks();
tsc_diff = tsc - bdev_io->internal.submit_tsc;
spdk_trace_record_tsc(tsc, TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)bdev_io,
bdev_io->internal.caller_ctx);
TAILQ_REMOVE(&bdev_ch->io_submitted, bdev_io, internal.ch_link);
if (bdev_io->internal.ch->histogram) {
spdk_histogram_data_tally(bdev_io->internal.ch->histogram, tsc_diff);
}
if (bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS) {
if (spdk_likely(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS)) {
switch (bdev_io->type) {
case SPDK_BDEV_IO_TYPE_READ:
bdev_io->internal.ch->stat->bytes_read += bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
@ -6152,6 +6118,46 @@ bdev_io_complete(void *ctx)
bdev_io->internal.ch->start_tsc = now_tsc;
}
#endif
}
static inline void
bdev_io_complete(void *ctx)
{
struct spdk_bdev_io *bdev_io = ctx;
struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch;
uint64_t tsc, tsc_diff;
if (spdk_unlikely(bdev_io->internal.in_submit_request || bdev_io->internal.io_submit_ch)) {
/*
* Send the completion to the thread that originally submitted the I/O,
* which may not be the current thread in the case of QoS.
*/
if (bdev_io->internal.io_submit_ch) {
bdev_io->internal.ch = bdev_io->internal.io_submit_ch;
bdev_io->internal.io_submit_ch = NULL;
}
/*
* Defer completion to avoid potential infinite recursion if the
* user's completion callback issues a new I/O.
*/
spdk_thread_send_msg(spdk_bdev_io_get_thread(bdev_io),
bdev_io_complete, bdev_io);
return;
}
tsc = spdk_get_ticks();
tsc_diff = tsc - bdev_io->internal.submit_tsc;
spdk_trace_record_tsc(tsc, TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)bdev_io,
bdev_io->internal.caller_ctx);
TAILQ_REMOVE(&bdev_ch->io_submitted, bdev_io, internal.ch_link);
if (bdev_io->internal.ch->histogram) {
spdk_histogram_data_tally(bdev_io->internal.ch->histogram, tsc_diff);
}
bdev_io_update_io_stat(bdev_io, tsc_diff);
assert(bdev_io->internal.cb != NULL);
assert(spdk_get_thread() == spdk_bdev_io_get_thread(bdev_io));