bdev: add unmap I/O in bdev_io_stat
There are some patches to linux kernel for separating out discard IOs that are written to /proc/diskstats and the various /sys/block/*/stat files. The following additional fields were added to these files: Filed 12 -- # of discards completed Filed 13 -- # of discards merged Filed 14 -- # of sectors discarded Filed 15 -- # of millisecond spent discarding SPDK could provide these raw information to bdev_io_stat. Users can use these information to calculate more states of block device. Signed-off-by: Yanbo Zhou <yanbo.zhou@intel.com> Change-Id: I517d67f0ff0159baf04e24732a8fd0ccefcb9c46 Reviewed-on: https://review.gerrithub.io/c/439057 Reviewed-by: wuzhouhui <wuzhouhui@kingsoft.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
parent
212fd2196f
commit
814f633153
@ -617,8 +617,11 @@ Example response:
|
||||
"num_read_ops": 2,
|
||||
"bytes_written": 0,
|
||||
"num_write_ops": 0,
|
||||
"bytes_unmapped": 0,
|
||||
"num_unmap_ops": 0,
|
||||
"read_latency_ticks": 178904,
|
||||
"write_latency_ticks": 0,
|
||||
"unmap_latency_ticks": 0,
|
||||
"queue_depth_polling_period": 2,
|
||||
"queue_depth": 0,
|
||||
"io_time": 0,
|
||||
|
@ -133,8 +133,11 @@ struct spdk_bdev_io_stat {
|
||||
uint64_t num_read_ops;
|
||||
uint64_t bytes_written;
|
||||
uint64_t num_write_ops;
|
||||
uint64_t bytes_unmapped;
|
||||
uint64_t num_unmap_ops;
|
||||
uint64_t read_latency_ticks;
|
||||
uint64_t write_latency_ticks;
|
||||
uint64_t unmap_latency_ticks;
|
||||
uint64_t ticks_rate;
|
||||
};
|
||||
|
||||
|
@ -2010,8 +2010,11 @@ _spdk_bdev_io_stat_add(struct spdk_bdev_io_stat *total, struct spdk_bdev_io_stat
|
||||
total->num_read_ops += add->num_read_ops;
|
||||
total->bytes_written += add->bytes_written;
|
||||
total->num_write_ops += add->num_write_ops;
|
||||
total->bytes_unmapped += add->bytes_unmapped;
|
||||
total->num_unmap_ops += add->num_unmap_ops;
|
||||
total->read_latency_ticks += add->read_latency_ticks;
|
||||
total->write_latency_ticks += add->write_latency_ticks;
|
||||
total->unmap_latency_ticks += add->unmap_latency_ticks;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3067,6 +3070,10 @@ _spdk_bdev_io_complete(void *ctx)
|
||||
bdev_io->internal.ch->stat.num_write_ops++;
|
||||
bdev_io->internal.ch->stat.write_latency_ticks += tsc_diff;
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_UNMAP:
|
||||
bdev_io->internal.ch->stat.bytes_unmapped += bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen;
|
||||
bdev_io->internal.ch->stat.num_unmap_ops++;
|
||||
bdev_io->internal.ch->stat.unmap_latency_ticks += tsc_diff;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -76,12 +76,21 @@ spdk_rpc_get_bdevs_iostat_cb(struct spdk_bdev *bdev,
|
||||
spdk_json_write_name(w, "num_write_ops");
|
||||
spdk_json_write_uint64(w, stat->num_write_ops);
|
||||
|
||||
spdk_json_write_name(w, "bytes_unmapped");
|
||||
spdk_json_write_uint64(w, stat->bytes_unmapped);
|
||||
|
||||
spdk_json_write_name(w, "num_unmap_ops");
|
||||
spdk_json_write_uint64(w, stat->num_unmap_ops);
|
||||
|
||||
spdk_json_write_name(w, "read_latency_ticks");
|
||||
spdk_json_write_uint64(w, stat->read_latency_ticks);
|
||||
|
||||
spdk_json_write_name(w, "write_latency_ticks");
|
||||
spdk_json_write_uint64(w, stat->write_latency_ticks);
|
||||
|
||||
spdk_json_write_name(w, "unmap_latency_ticks");
|
||||
spdk_json_write_uint64(w, stat->unmap_latency_ticks);
|
||||
|
||||
if (spdk_bdev_get_qd_sampling_period(bdev)) {
|
||||
spdk_json_write_name(w, "queue_depth_polling_period");
|
||||
spdk_json_write_uint64(w, spdk_bdev_get_qd_sampling_period(bdev));
|
||||
|
Loading…
Reference in New Issue
Block a user