From af0057e6e1bed1c98a737a3b66bcf0bc3fddc464 Mon Sep 17 00:00:00 2001 From: Maciej Wawryk Date: Wed, 3 Jun 2020 10:20:16 +0200 Subject: [PATCH] scripts/perf: Add effective QD counting During perf test we figured out, that global qd settings is split between disks number in filename. Setting this parameter (multiplied by section disks number and numjobs) in filename section give as expected value. Signed-off-by: Maciej Wawryk Change-Id: I89a44e48f10da131e2e9128f3ff2fbfb197076ad Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2749 Reviewed-by: Karol Latecki Reviewed-by: Darek Stojaczyk Reviewed-by: Tomasz Zawadzki Reviewed-by: Shuhei Matsumoto Tested-by: SPDK CI Jenkins Community-CI: Mellanox Build Bot --- scripts/perf/nvmf/README.md | 2 +- scripts/perf/nvmf/run_nvmf.py | 19 +++++++++++-------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/scripts/perf/nvmf/README.md b/scripts/perf/nvmf/README.md index 69e6acd91..2973040fd 100644 --- a/scripts/perf/nvmf/README.md +++ b/scripts/perf/nvmf/README.md @@ -104,7 +104,7 @@ other than -t, -s, -n and -a. Fio job parameters. - bs: block size -- qd: io depth +- qd: io depth - Per connected fio filename target - rw: workload mode - rwmixread: percentage of reads in readwrite workloads - run_time: time (in seconds) to run workload diff --git a/scripts/perf/nvmf/run_nvmf.py b/scripts/perf/nvmf/run_nvmf.py index ab89930e2..145e78a8f 100755 --- a/scripts/perf/nvmf/run_nvmf.py +++ b/scripts/perf/nvmf/run_nvmf.py @@ -351,7 +351,6 @@ norandommap=1 rw={rw} rwmixread={rwmixread} bs={block_size} -iodepth={io_depth} time_based=1 ramp_time={ramp_time} runtime={run_time} @@ -388,13 +387,13 @@ runtime={run_time} threads = range(0, len(subsystems)) if "spdk" in self.mode: - filename_section = self.gen_fio_filename_conf(subsystems, threads) + filename_section = self.gen_fio_filename_conf(subsystems, threads, io_depth, num_jobs) else: - filename_section = self.gen_fio_filename_conf(threads) + filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs) fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf, rw=rw, rwmixread=rwmixread, block_size=block_size, - io_depth=io_depth, ramp_time=ramp_time, run_time=run_time) + ramp_time=ramp_time, run_time=run_time) if num_jobs: fio_config = fio_config + "numjobs=%s \n" % num_jobs if self.cpus_allowed is not None: @@ -727,7 +726,7 @@ class KernelInitiator(Initiator): self.remote_call("sudo %s disconnect -n %s" % (self.nvmecli_bin, subsystem[1])) time.sleep(1) - def gen_fio_filename_conf(self, threads): + def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1): out, err = self.remote_call("lsblk -o NAME -nlp") nvme_list = [x for x in out.split("\n") if "nvme" in x] @@ -747,7 +746,9 @@ class KernelInitiator(Initiator): for i, r in enumerate(result): header = "[filename%s]" % i disks = "\n".join(["filename=/dev/%s" % x for x in r]) - filename_section = "\n".join([filename_section, header, disks]) + job_section_qd = round((io_depth * len(r)) / num_jobs) + iodepth = "iodepth=%s" % job_section_qd + filename_section = "\n".join([filename_section, header, disks, iodepth]) return filename_section @@ -788,7 +789,7 @@ class SPDKInitiator(Initiator): bdev_section = "\n".join([header, bdev_rows]) return bdev_section - def gen_fio_filename_conf(self, subsystems, threads): + def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1): filename_section = "" if len(threads) >= len(subsystems): threads = range(0, len(subsystems)) @@ -807,7 +808,9 @@ class SPDKInitiator(Initiator): for i, r in enumerate(result): header = "[filename%s]" % i disks = "\n".join(["filename=%s" % x for x in r]) - filename_section = "\n".join([filename_section, header, disks]) + job_section_qd = round((io_depth * len(r)) / num_jobs) + iodepth = "iodepth=%s" % job_section_qd + filename_section = "\n".join([filename_section, header, disks, iodepth]) return filename_section