bdev: add tracking for time spent processing I/O.

Two extra fields are added to the iostat rpc.
1. io_time. The amount of time since queue depth tracking was
enabled that has been spent on I/O processing.
2. weighted_io_time. Incremented each time this bdev's queue depth is
polled by the amount of time spent processing I/O since the last polling
event times the measured queue depth.

Change-Id: Ie70489ec24dee83f3eeac8f4f813ec7074ff458f
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/419031
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Seth Howell 2018-07-11 15:06:17 -07:00 committed by Jim Harris
parent c9f4db3027
commit dd9cd4352d
4 changed files with 62 additions and 0 deletions

View File

@ -430,6 +430,40 @@ spdk_bdev_get_qd_sampling_period(const struct spdk_bdev *bdev);
*/
void spdk_bdev_set_qd_sampling_period(struct spdk_bdev *bdev, uint64_t period);
/**
* Get the time spent processing IO for this device.
*
* This value is dependent upon the queue depth sampling period and is
* incremented at sampling time by the sampling period only if the measured
* queue depth is greater than 0.
*
* The disk utilization can be calculated by the following formula:
* disk_util = (io_time_2 - io_time_1) / elapsed_time.
* The user is responsible for tracking the elapsed time between two measurements.
*
* \param bdev Block device to query.
*
* \return The io time for this device in microseconds.
*/
uint64_t spdk_bdev_get_io_time(const struct spdk_bdev *bdev);
/**
* Get the weighted IO processing time for this bdev.
*
* This value is dependent upon the queue depth sampling period and is
* equal to the time spent reading from or writing to a device times
* the measured queue depth during each sampling period.
*
* The average queue depth can be calculated by the following formula:
* queue_depth = (weighted_io_time_2 - weighted_io_time_1) / elapsed_time.
* The user is responsible for tracking the elapsed time between two measurements.
*
* \param bdev Block device to query.
*
* \return The weighted io time for this device in microseconds.
*/
uint64_t spdk_bdev_get_weighted_io_time(const struct spdk_bdev *bdev);
/**
* Obtain an I/O channel for the block device opened by the specified
* descriptor. I/O channels are bound to threads, so the resulting I/O

View File

@ -318,6 +318,12 @@ struct spdk_bdev {
/** queue depth as calculated the last time the telemetry poller checked. */
uint64_t measured_queue_depth;
/** most recent value of ticks spent performing I/O. Used to calculate the weighted time doing I/O */
uint64_t io_time;
/** weighted time performing I/O. Equal to measured_queue_depth * period */
uint64_t weighted_io_time;
} internal;
};

View File

@ -1641,12 +1641,28 @@ spdk_bdev_get_qd_sampling_period(const struct spdk_bdev *bdev)
return bdev->internal.period;
}
uint64_t
spdk_bdev_get_weighted_io_time(const struct spdk_bdev *bdev)
{
return bdev->internal.weighted_io_time;
}
uint64_t
spdk_bdev_get_io_time(const struct spdk_bdev *bdev)
{
return bdev->internal.io_time;
}
static void
_calculate_measured_qd_cpl(struct spdk_io_channel_iter *i, int status)
{
struct spdk_bdev *bdev = spdk_io_channel_iter_get_ctx(i);
bdev->internal.measured_queue_depth = bdev->internal.temporary_queue_depth;
if (bdev->internal.measured_queue_depth) {
bdev->internal.weighted_io_time += bdev->internal.period * bdev->internal.measured_queue_depth;
}
}
static void

View File

@ -88,6 +88,12 @@ spdk_rpc_get_bdevs_iostat_cb(struct spdk_bdev *bdev,
spdk_json_write_name(w, "queue_depth");
spdk_json_write_uint64(w, spdk_bdev_get_qd(bdev));
spdk_json_write_name(w, "io_time");
spdk_json_write_uint64(w, spdk_bdev_get_io_time(bdev));
spdk_json_write_name(w, "weighted_io_time");
spdk_json_write_uint64(w, spdk_bdev_get_weighted_io_time(bdev));
}
spdk_json_write_object_end(w);