scripts/nvmf_perf: add workload offset option

Use fio "offset", "offset_increment" and "size" to
split target file into chunks, so that each fio
job clone (specified by "numjobs") gets it's own
part of the disk to work with.

This is especially helpful in case of sequential
workloads run with numjobs > 1 and helps to keep
the workload to being as close to sequential as
possible.

Change-Id: I1ca88ae56136d22cf396f464d78b05eff5c07a2a
Signed-off-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13515
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
This commit is contained in:
Karol Latecki 2022-06-30 17:04:44 +00:00 committed by Tomasz Zawadzki
parent 9633d482a7
commit 23e767c4a5
2 changed files with 50 additions and 8 deletions

View File

@ -255,6 +255,8 @@ Optional, SPDK Initiator only:
"rwmixread": 100,
"rate_iops": 10000,
"num_jobs": 2,
"offset": true,
"offset_inc": 10,
"run_time": 30,
"ramp_time": 30,
"run_num": 3
@ -277,6 +279,16 @@ Required:
Optional:
- rate_iops - limit IOPS to this number
- offset - bool; enable offseting of the IO to the file. When this option is
enabled the file is "split" into a number of chunks equal to "num_jobs"
parameter value, and each "num_jobs" fio thread gets it's own chunk to
work with.
For more detail see "offset", "offset_increment" and "size" in fio man
pages. Default: false.
- offset_inc - int; Percentage value determining the offset, size and
offset_increment when "offset" option is enabled. By default if "offset"
is enabled fio file will get split evenly between fio threads doing the
IO. Offset_inc can be used to specify a custom value.
#### Test Combinations

View File

@ -865,7 +865,17 @@ class Initiator(Server):
# Logic implemented in SPDKInitiator and KernelInitiator classes
pass
def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no, num_jobs=None, ramp_time=0, run_time=10, rate_iops=0):
@staticmethod
def gen_fio_offset_section(offset_inc, num_jobs):
offset_inc = 100 // num_jobs if offset_inc == 0 else offset_inc
fio_size = "size=%s%%" % offset_inc
fio_offset = "offset=0%"
fio_offset_inc = "offset_increment=%s%%" % offset_inc
return "\n".join([fio_size, fio_offset, fio_offset_inc])
def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no,
num_jobs=None, ramp_time=0, run_time=10, rate_iops=0,
offset=False, offset_inc=0):
fio_conf_template = """
[global]
ioengine={ioengine}
@ -918,9 +928,11 @@ rate_iops={rate_iops}
threads = range(0, len(subsystems))
if "spdk" in self.mode:
filename_section = self.gen_fio_filename_conf(self.subsystem_info_list, threads, io_depth, num_jobs)
filename_section = self.gen_fio_filename_conf(self.subsystem_info_list, threads, io_depth, num_jobs,
offset, offset_inc)
else:
filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs)
filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs,
offset, offset_inc)
fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
rw=rw, rwmixread=rwmixread, block_size=block_size,
@ -1366,7 +1378,7 @@ class KernelInitiator(Initiator):
self.exec_cmd(["sudo", self.nvmecli_bin, "disconnect", "-n", subsystem[1]])
time.sleep(1)
def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1):
def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1, offset=False, offset_inc=0):
nvme_list = [os.path.join("/dev", nvme) for nvme in self.get_connected_nvme_list()]
filename_section = ""
@ -1388,7 +1400,12 @@ class KernelInitiator(Initiator):
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
offset_section = ""
if offset:
offset_section = self.gen_fio_offset_section(offset_inc, num_jobs)
filename_section = "\n".join([filename_section, header, disks, iodepth, offset_section, ""])
return filename_section
@ -1453,7 +1470,7 @@ class SPDKInitiator(Initiator):
return json.dumps(bdev_cfg_section, indent=2)
def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1):
def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1, offset=False, offset_inc=0):
filename_section = ""
if len(threads) >= len(subsystems):
threads = range(0, len(subsystems))
@ -1476,7 +1493,12 @@ class SPDKInitiator(Initiator):
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
offset_section = ""
if offset:
offset_section = self.gen_fio_offset_section(offset_inc, num_jobs)
filename_section = "\n".join([filename_section, header, disks, iodepth, offset_section, ""])
return filename_section
@ -1533,6 +1555,13 @@ if __name__ == "__main__":
fio_rate_iops = 0
if "rate_iops" in data[k]:
fio_rate_iops = data[k]["rate_iops"]
fio_offset = False
if "offset" in data[k]:
fio_offset = data[k]["offset"]
fio_offset_inc = 0
if "offset_inc" in data[k]:
fio_offset_inc = data[k]["offset_inc"]
else:
continue
@ -1567,7 +1596,8 @@ if __name__ == "__main__":
i.kernel_init_connect()
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
fio_num_jobs, fio_ramp_time, fio_run_time, fio_rate_iops)
fio_num_jobs, fio_ramp_time, fio_run_time, fio_rate_iops,
fio_offset, fio_offset_inc)
configs.append(cfg)
for i, cfg in zip(initiators, configs):