2017-04-28 17:18:24 +00:00
|
|
|
# -*- mode: ruby -*-
|
|
|
|
# vi: set ft=ruby :
|
|
|
|
|
2020-02-18 07:44:56 +00:00
|
|
|
require 'open3'
|
2021-11-05 09:52:24 +00:00
|
|
|
def get_box_type(distro, force_distro)
|
2020-07-22 11:14:07 +00:00
|
|
|
spdk_distro = 'spdk/' + distro
|
2020-02-13 07:07:21 +00:00
|
|
|
localboxes, stderr, status = Open3.capture3("vagrant box list")
|
2020-07-22 11:14:07 +00:00
|
|
|
return spdk_distro if localboxes.include?(spdk_distro)
|
|
|
|
|
|
|
|
distro_to_type = {
|
|
|
|
'centos7' => 'centos/7',
|
|
|
|
'centos8' => 'centos/8',
|
|
|
|
'ubuntu1604' => 'peru/ubuntu-16.04-server-amd64',
|
|
|
|
'ubuntu1804' => 'peru/ubuntu-18.04-server-amd64',
|
2020-11-16 11:22:31 +00:00
|
|
|
'ubuntu2004' => 'peru/ubuntu-20.04-server-amd64',
|
2022-06-09 09:03:05 +00:00
|
|
|
'ubuntu2204' => 'generic/ubuntu2204',
|
2020-11-16 13:06:35 +00:00
|
|
|
'fedora33' => 'generic/fedora33',
|
2022-04-11 10:09:18 +00:00
|
|
|
'fedora34' => 'generic/fedora34',
|
2022-04-11 10:11:59 +00:00
|
|
|
'fedora35' => 'generic/fedora35',
|
2022-06-09 10:30:16 +00:00
|
|
|
'fedora36' => 'generic/fedora36',
|
2020-07-22 11:14:07 +00:00
|
|
|
'arch' => 'generic/arch',
|
|
|
|
'freebsd12' => 'generic/freebsd12',
|
2022-07-11 11:48:38 +00:00
|
|
|
'freebsd13' => 'generic/freebsd13',
|
2022-03-29 17:29:36 +00:00
|
|
|
'rocky8' => 'rockylinux/8'
|
2020-07-22 11:14:07 +00:00
|
|
|
}
|
2021-11-05 09:52:24 +00:00
|
|
|
abort("Invalid argument! #{distro}") unless distro_to_type.key?(distro) || force_distro
|
2020-07-22 11:14:07 +00:00
|
|
|
|
2021-11-05 09:52:24 +00:00
|
|
|
return distro_to_type[distro] ? distro_to_type[distro] : distro
|
2020-02-13 07:07:21 +00:00
|
|
|
end
|
|
|
|
|
2020-08-10 13:35:18 +00:00
|
|
|
def setup_proxy(config,distro)
|
|
|
|
return unless ENV['http_proxy']
|
|
|
|
|
|
|
|
if Vagrant.has_plugin?("vagrant-proxyconf")
|
|
|
|
config.proxy.http = ENV['http_proxy']
|
|
|
|
config.proxy.https = ENV['https_proxy']
|
|
|
|
config.proxy.no_proxy = "localhost,127.0.0.1"
|
|
|
|
end
|
|
|
|
|
|
|
|
# Proxyconf does not seem to support FreeBSD boxes or at least it's
|
|
|
|
# docs do not mention that. Set up proxy configuration manually.
|
|
|
|
if distro.include?("freebsd")
|
|
|
|
$freebsd_proxy = <<-SCRIPT
|
|
|
|
sudo -s
|
|
|
|
echo "export http_proxy=#{ENV['http_proxy']}" >> /etc/profile
|
|
|
|
echo "export https_proxy=#{ENV['http_proxy']}" >> /etc/profile
|
|
|
|
echo "pkg_env: {http_proxy: #{ENV['http_proxy']}}" > /usr/local/etc/pkg.conf
|
|
|
|
chown root:wheel /usr/local/etc/pkg.conf
|
|
|
|
chmod 644 /usr/local/etc/pkg.conf
|
|
|
|
SCRIPT
|
|
|
|
config.vm.provision "shell", inline: $freebsd_proxy
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-08-10 13:43:38 +00:00
|
|
|
def copy_gitconfig(config)
|
|
|
|
src_path = '~/.gitconfig'
|
|
|
|
return unless File.file?(File.expand_path(src_path))
|
|
|
|
|
|
|
|
config.vm.provision "file", source: src_path, destination: ".gitconfig"
|
|
|
|
end
|
|
|
|
|
|
|
|
def copy_tsocks(config)
|
|
|
|
tsocks_file = 'tsocks.conf'
|
|
|
|
tsocks_file_path = '/etc/' + tsocks_file
|
|
|
|
|
|
|
|
return unless File.file?(tsocks_file_path)
|
|
|
|
|
|
|
|
$tsocks_copy_cmd = <<-SCRIPT
|
|
|
|
sudo -s
|
|
|
|
mv -f "#{tsocks_file}" "#{tsocks_file_path}"
|
|
|
|
chown root "#{tsocks_file_path}"
|
|
|
|
chmod 644 "#{tsocks_file_path}"
|
|
|
|
SCRIPT
|
|
|
|
|
|
|
|
config.vm.provision "file", source: tsocks_file_path, destination: tsocks_file
|
|
|
|
config.vm.provision "shell", inline: $tsocks_copy_cmd
|
|
|
|
end
|
|
|
|
|
|
|
|
def copy_vagrant_tools(config,files_sync_backend)
|
|
|
|
src_path = '~/vagrant_tools'
|
|
|
|
return unless File.directory?(File.expand_path(src_path))
|
|
|
|
|
|
|
|
config.vm.synced_folder src_path, "/home/vagrant/tools", files_sync_backend
|
|
|
|
end
|
|
|
|
|
2023-04-19 13:39:11 +00:00
|
|
|
def copy_sources_dirs(config, files_sync_backend)
|
2020-08-10 13:43:38 +00:00
|
|
|
return unless ENV['COPY_SPDK_DIR'] == "1"
|
|
|
|
return unless ENV['SPDK_DIR']
|
|
|
|
|
2023-04-19 13:39:11 +00:00
|
|
|
repo_prefix = '/home/vagrant/spdk_repo'
|
|
|
|
config.vm.synced_folder ENV['SPDK_DIR'], "#{repo_prefix}/spdk", files_sync_backend
|
|
|
|
|
|
|
|
# Optional directories
|
|
|
|
for dir in ['spdk-abi', 'dpdk']
|
|
|
|
src_path = "#{ENV['SPDK_DIR']}/../#{dir}"
|
|
|
|
next unless File.directory?(File.expand_path(src_path))
|
|
|
|
|
|
|
|
config.vm.synced_folder src_path, "#{repo_prefix}/#{dir}", files_sync_backend
|
|
|
|
end
|
2020-08-10 13:43:38 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
def copy_spdk_artifacts(config, plugins_sync_backend)
|
|
|
|
return unless ENV['COPY_SPDK_ARTIFACTS'] == "1"
|
|
|
|
|
|
|
|
vagrantfile_dir=(ENV['VAGRANTFILE_DIR'] || "none")
|
|
|
|
config.vm.synced_folder "#{vagrantfile_dir}/output", "/home/vagrant/spdk_repo/output", plugins_sync_backend
|
|
|
|
end
|
|
|
|
|
2020-08-10 13:48:21 +00:00
|
|
|
def make_spdk_local_copy_of_nfs(config,distro)
|
|
|
|
user_group = 'vagrant:vagrant'
|
|
|
|
|
|
|
|
spdk_path = '/home/vagrant/spdk_repo/spdk'
|
|
|
|
spdk_tmp_path = '/tmp/spdk'
|
|
|
|
$spdk_repo_cmd = <<-SCRIPT
|
|
|
|
sudo -s
|
|
|
|
cp -R '#{spdk_path}' '#{spdk_tmp_path}'
|
|
|
|
umount '#{spdk_path}' && rm -rf '#{spdk_path}'
|
|
|
|
mv '#{spdk_tmp_path}' '#{spdk_path}'
|
|
|
|
chown -R #{user_group} '#{spdk_path}'
|
|
|
|
SCRIPT
|
|
|
|
|
|
|
|
config.vm.provision "shell", inline: $spdk_repo_cmd
|
|
|
|
end
|
2020-08-10 13:43:38 +00:00
|
|
|
|
2020-08-10 13:58:05 +00:00
|
|
|
def get_nvme_disk(disk, index)
|
|
|
|
if ENV['NVME_FILE']
|
|
|
|
nvme_file = ENV['NVME_FILE'].split(',')
|
|
|
|
nvme_disk = nvme_file[index]
|
|
|
|
else
|
|
|
|
nvme_disk = '/var/lib/libvirt/images/nvme_disk.img'
|
|
|
|
end
|
|
|
|
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
unless nvme_disk == "none" || File.exist?(nvme_disk)
|
2020-08-10 13:58:05 +00:00
|
|
|
puts 'If run with libvirt provider please execute create_nvme_img.sh'
|
|
|
|
end
|
|
|
|
|
|
|
|
return nvme_disk
|
|
|
|
end
|
|
|
|
|
|
|
|
def setup_nvme_disk(libvirt, disk, index)
|
|
|
|
nvme_disk_id = disk + '-' + index.to_s
|
|
|
|
nvme_disk = get_nvme_disk(disk, index)
|
|
|
|
|
|
|
|
nvme_namespaces=(ENV['NVME_DISKS_NAMESPACES'] || "").split(',')
|
|
|
|
nvme_cmbs=(ENV['NVME_CMB'] || "").split(',')
|
2021-03-23 19:09:50 +00:00
|
|
|
nvme_pmrs=(ENV['NVME_PMR'] || "").split(',')
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
nvme_zns=(ENV['NVME_ZNS'] || "").split(',')
|
2022-08-12 09:49:27 +00:00
|
|
|
nvme_ms=(ENV['NVME_MS'] || "").split(',')
|
2023-04-25 13:11:56 +00:00
|
|
|
nvme_fdp=(ENV['NVME_FDP'] || "").split(',')
|
2020-08-10 13:58:05 +00:00
|
|
|
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
namespace_disks = []
|
2021-06-09 09:18:59 +00:00
|
|
|
pmr_cmdline = ""
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
nvme_controller = ""
|
2023-04-25 13:11:56 +00:00
|
|
|
fdp_subsys = ""
|
|
|
|
fdp_subsys_id = ""
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
|
|
|
|
# Define controller
|
|
|
|
nvme_controller = "nvme,id=#{nvme_disk_id},serial=1234#{index}"
|
|
|
|
|
2023-04-25 13:11:56 +00:00
|
|
|
# For the FDP, we need to hook our nvme into a dedicated subsystem
|
|
|
|
if !nvme_fdp[index].nil? && nvme_fdp[index] != ""
|
|
|
|
fdp_subsys_id = "fdp-subsys#{index}"
|
|
|
|
fdp = nvme_fdp[index].split(':')[0..3]
|
|
|
|
fdp_ruhs = (nvme_fdp[index].split(':')[4..]) # fdp.ruhs per ns
|
|
|
|
|
|
|
|
# Put some defaults in place if needed
|
|
|
|
fdp_enable = "#{fdp[0] != nil && fdp[0] != '' ? fdp[0] : 'off'}"
|
|
|
|
fdp_runs = "#{fdp[1] != nil && fdp[1] != '' ? fdp[1] : '96M'}"
|
|
|
|
fdp_nrg = "#{fdp[2] != nil && fdp[2] != '' ? fdp[2] : 2}"
|
|
|
|
fdp_nruh = "#{fdp[3] != nil && fdp[3] != '' ? fdp[3] : 8}"
|
|
|
|
|
|
|
|
fdp_subsys = "nvme-subsys,id=#{fdp_subsys_id},fdp=#{fdp_enable}"
|
|
|
|
fdp_subsys << ",fdp.runs=#{fdp_runs},fdp.nrg=#{fdp_nrg},fdp.nruh=#{fdp_nruh}"
|
|
|
|
|
|
|
|
nvme_controller << ",subsys=#{fdp_subsys_id}"
|
|
|
|
|
|
|
|
libvirt.qemuargs :value => "-device"
|
|
|
|
libvirt.qemuargs :value => fdp_subsys
|
|
|
|
end
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
# Gather all drives - each namespace requires separate drive
|
|
|
|
if nvme_namespaces[index].nil?
|
|
|
|
namespace_disks = namespace_disks + nvme_disk.split()
|
|
|
|
elsif !nvme_namespaces[index].nil? && !nvme_namespaces[index].match(/^[0-9]+$/)
|
|
|
|
namespace_disks = namespace_disks + nvme_disk.split() + nvme_namespaces[index].split(':')
|
2023-04-25 10:38:18 +00:00
|
|
|
elsif !nvme_namespaces[index].nil? && nvme_namespaces[index] == "1"
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
libvirt.qemuargs :value => "-drive"
|
|
|
|
libvirt.qemuargs :value => "format=raw,file=#{nvme_disk},if=none,id=#{nvme_disk_id}"
|
|
|
|
nvme_controller <<",drive=#{nvme_disk_id}"
|
|
|
|
end
|
2021-03-23 19:09:50 +00:00
|
|
|
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
if !nvme_cmbs[index].nil? && nvme_cmbs[index] != ""
|
2020-08-10 13:58:05 +00:00
|
|
|
# Fix the size of the buffer to 128M
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
nvme_controller << ",cmb_size_mb=128"
|
2021-03-23 19:09:50 +00:00
|
|
|
end
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
|
|
|
|
if !nvme_pmrs[index].nil? && nvme_pmrs[index] != ""
|
2021-03-23 19:09:50 +00:00
|
|
|
pmr_path, pmr_size = nvme_pmrs[index].split(':')
|
|
|
|
if pmr_size.nil?
|
|
|
|
pmr_size = "16M"
|
|
|
|
end
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
nvme_controller << ",pmrdev=pmr#{index}"
|
2021-03-23 19:09:50 +00:00
|
|
|
pmr_cmdline = "memory-backend-file,id=pmr#{index},share=on,mem-path=#{pmr_path},size=#{pmr_size}"
|
2020-08-10 13:58:05 +00:00
|
|
|
end
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
|
|
|
|
libvirt.qemuargs :value => "-device"
|
|
|
|
libvirt.qemuargs :value => nvme_controller
|
|
|
|
|
2021-06-09 09:18:59 +00:00
|
|
|
if pmr_cmdline != ""
|
2021-03-23 19:09:50 +00:00
|
|
|
libvirt.qemuargs :value => "-object"
|
|
|
|
libvirt.qemuargs :value => pmr_cmdline
|
|
|
|
end
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
|
|
|
|
# Define all namespaces
|
|
|
|
namespace_disks.each_with_index { |disk, nsid|
|
|
|
|
if disk == "none"
|
|
|
|
next
|
|
|
|
end
|
|
|
|
zoned = nvme_zns[index].nil? ? "false" : "true"
|
2022-08-12 09:49:27 +00:00
|
|
|
ms = nvme_ms[index].nil? ? "" : ",ms=64"
|
2023-04-25 13:11:56 +00:00
|
|
|
ns = "nvme-ns,drive=#{nvme_disk_id}-drive#{nsid},bus=#{nvme_disk_id},nsid=#{nsid + 1},zoned=#{zoned},logical_block_size=4096,physical_block_size=4096#{ms}"
|
|
|
|
if !fdp_ruhs.nil? && !fdp_ruhs[nsid].nil? && fdp_ruhs[nsid] != ""
|
|
|
|
ns << ",fdp.ruhs=#{fdp_ruhs[nsid]}"
|
|
|
|
end
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
libvirt.qemuargs :value => "-drive"
|
|
|
|
libvirt.qemuargs :value => "format=raw,file=#{disk},if=none,id=#{nvme_disk_id}-drive#{nsid}"
|
|
|
|
libvirt.qemuargs :value => "-device"
|
2023-04-25 13:11:56 +00:00
|
|
|
libvirt.qemuargs :value => ns
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
}
|
|
|
|
|
2020-08-10 13:58:05 +00:00
|
|
|
end
|
|
|
|
|
2020-08-10 14:02:15 +00:00
|
|
|
def setup_ssh(config)
|
|
|
|
config.ssh.forward_agent = true
|
|
|
|
config.ssh.forward_x11 = true
|
|
|
|
if ENV['VAGRANT_PASSWORD_AUTH'] == "1"
|
|
|
|
config.ssh.username = "vagrant"
|
|
|
|
config.ssh.password = "vagrant"
|
scripts/vagrant: unset private_key_file when password auth is used
The main purpose of this is to force vagrant into using its own
ssh keys when password authentication is requested. Normally,
when ssh.password is defined, vagrant will attempt to inject its
own ssh key, by using provided password first, and re-use the key
for the new ssh session to provision the VM.
However, some vagrant boxes may come with their own embedded
Vagrantfiles which define custom ssh keys. If password auth is
requested for such boxes, vagrant will not use its own key, nor
the ones that may be defined by the vagrant box. Instead, it will
fallback to interactive, password authentication. From the CI pool
perspective this is not desirable.
On other note, this ties to the ongoing work of building indepdent
images for the CI where vagrant boxes will be deployed with a
custom ssh key alredy provided inside the box.
Related work:
trello.com/c/gAfo9mH1/208-vagrant-improvements-box-packaging
trello.com/c/9Dxp2Y9c/248-packaging-centos78-and-freebsd1112-with-packer
Change-Id: I49035b426519d9b24bcdab573d335ee622130560
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4283
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
2020-09-16 15:34:15 +00:00
|
|
|
config.ssh.private_key_path = nil
|
2020-08-10 14:02:15 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2021-03-05 09:30:33 +00:00
|
|
|
def deploy_test_vm(config, distro, plugins_sync_backend)
|
2020-08-10 14:05:03 +00:00
|
|
|
return unless ENV['DEPLOY_TEST_VM'] == "1"
|
2021-03-05 10:02:23 +00:00
|
|
|
return unless ENV['COPY_SPDK_DIR'] == "1"
|
2020-08-10 14:05:03 +00:00
|
|
|
return unless ENV['SPDK_DIR']
|
|
|
|
|
2021-11-25 01:40:59 +00:00
|
|
|
# use http proxy if available
|
2021-03-05 09:30:33 +00:00
|
|
|
setup_proxy(config, distro)
|
|
|
|
|
|
|
|
# Copy the tsocks configuration file for use when installing some spdk test pool dependencies
|
|
|
|
copy_tsocks(config)
|
|
|
|
|
|
|
|
# freebsd boxes in order to have spdk sources synced from
|
|
|
|
# host properly will use NFS with "ro" option enabled to prevent changes
|
|
|
|
# on host filesystem.
|
|
|
|
# To make sources usable in the guest VM we need to unmount them and use
|
|
|
|
# local copy.
|
|
|
|
make_spdk_local_copy_of_nfs(config,distro) if plugins_sync_backend[:type] == :nfs
|
|
|
|
|
2020-08-10 14:05:03 +00:00
|
|
|
config.vm.provision "shell" do |setup|
|
2021-03-05 10:02:23 +00:00
|
|
|
setup.inline = "/home/vagrant/spdk_repo/spdk/test/common/config/vm_setup.sh"
|
2020-08-10 14:05:03 +00:00
|
|
|
setup.privileged = false
|
|
|
|
setup.args = ["-u", "-i"]
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-08-10 14:09:54 +00:00
|
|
|
def setup_virtualbox(config, vmcpu, vmram)
|
|
|
|
config.vm.provider "virtualbox" do |vb|
|
|
|
|
vb.customize ["modifyvm", :id, "--ioapic", "on"]
|
|
|
|
vb.memory = vmram
|
|
|
|
vb.cpus = vmcpu
|
|
|
|
|
|
|
|
nvme_disk=(ENV['NVME_FILE'] || "nvme_disk.img")
|
|
|
|
unless File.exist? (nvme_disk)
|
|
|
|
vb.customize ["createhd", "--filename", nvme_disk, "--variant", "Fixed", "--size", "1024"]
|
|
|
|
vb.customize ["storagectl", :id, "--name", "nvme", "--add", "pcie", "--controller", "NVMe", "--portcount", "1", "--bootable", "off"]
|
|
|
|
vb.customize ["storageattach", :id, "--storagectl", "nvme", "--type", "hdd", "--medium", nvme_disk, "--port", "0"]
|
|
|
|
end
|
|
|
|
|
|
|
|
#support for the SSE4.x instruction is required in some versions of VB.
|
|
|
|
vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.1", "1"]
|
|
|
|
vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.2", "1"]
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-08-10 14:12:11 +00:00
|
|
|
def setup_libvirt(config, vmcpu, vmram, distro)
|
|
|
|
emulated_nvme_types=(ENV['NVME_DISKS_TYPE'] || "nvme").split(',')
|
|
|
|
|
|
|
|
config.vm.provider "libvirt" do |libvirt, override|
|
|
|
|
libvirt.random_hostname = "1"
|
|
|
|
libvirt.driver = "kvm"
|
|
|
|
libvirt.graphics_type = "vnc"
|
|
|
|
libvirt.memory = vmram
|
|
|
|
libvirt.cpus = vmcpu
|
|
|
|
libvirt.video_type = "cirrus"
|
|
|
|
|
|
|
|
if (distro.include?("freebsd"))
|
|
|
|
# generic/freebsd boxes need to be explicitly run with SCSI bus,
|
|
|
|
# otherwise boot process fails on mounting the disk
|
|
|
|
libvirt.disk_bus = "scsi"
|
|
|
|
elsif (distro.include?("arch"))
|
|
|
|
# Run generic/arch boxes explicitly with IDE bus,
|
|
|
|
# otherwise boot process fails on mounting the disk
|
|
|
|
libvirt.disk_bus = "ide"
|
|
|
|
else
|
|
|
|
libvirt.disk_bus = "virtio"
|
|
|
|
end
|
|
|
|
|
|
|
|
if ENV['SPDK_QEMU_EMULATOR']
|
|
|
|
libvirt.emulator_path = ENV['SPDK_QEMU_EMULATOR']
|
|
|
|
libvirt.machine_type = "pc"
|
|
|
|
end
|
|
|
|
|
|
|
|
# we put nvme_disk inside default pool to eliminate libvirt/SELinux Permissions Problems
|
|
|
|
# and to be able to run vagrant from user $HOME directory
|
|
|
|
|
|
|
|
# Loop to create all emulated disks set
|
|
|
|
emulated_nvme_types.each_with_index { |disk, index|
|
scripts/vagrant: Drop OCSSD awareness from functional tests
This also translates into switching fully to upstream QEMU for the
vagrant setup.
This is done in order to move away from OCSSD and SPDK's qemu fork
and align with what upstream QEMU supports. Main changes touch the
way how nvme namespaces are configured. With >= 5.2.0 it's possible
now to configure multiple namespace under single nvme device. Each
namespace requires a separate disk image to work with. This:
-b foo.img,nvme,1...
-b foo.img
-b foo.img,,..
Will still configure nvme controller with a single namespace attached
to foo.img.
This:
-b foo.img,,foo-ns1.img:foo-ns2.img
Will configure nvme controller with three namespaces.
Configuring nvme controller with no namespaces is possible via:
-b none ...
Note that this still allows to define other options specific to nvme
controller, like CMB and PMR. E.g:
-b none,nvme,,true
This will create nvme controller with no namespaces but with CMB
enabled.
It's possible now to also request for given controller to be zoned.
Currently if requsted, all namespaces under the target controller
will be zoned with no limit set as to max open|active zones.
All nvme devices have block size fixed to 4KB to imititate behavior
of the SPDK's qemu fork.
Compatibility with spdk-5.0.0 fork is preserved in context of setting
up namespaces so this:
-b foo.img,nvme,2
is valid as long as the emulator is set to that of spdk-5.0.0's.
Signed-off-by: Michal Berger <michalx.berger@intel.com>
Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Karol Latecki <karol.latecki@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
|
|
|
setup_nvme_disk(libvirt, disk, index)
|
2020-08-10 14:12:11 +00:00
|
|
|
}
|
|
|
|
|
2021-04-16 08:34:43 +00:00
|
|
|
# Add network interface for openstack tests
|
|
|
|
if ENV['SPDK_OPENSTACK_NETWORK'] == "1"
|
|
|
|
libvirt.qemuargs :value => "-device"
|
|
|
|
libvirt.qemuargs :value => "virtio-net,netdev=openstack.0"
|
|
|
|
libvirt.qemuargs :value => "-netdev"
|
|
|
|
libvirt.qemuargs :value => "user,id=openstack.0"
|
|
|
|
end
|
|
|
|
|
2020-08-10 14:12:11 +00:00
|
|
|
if ENV['VAGRANT_HUGE_MEM'] == "1"
|
|
|
|
libvirt.memorybacking :hugepages
|
|
|
|
end
|
|
|
|
|
|
|
|
# Optional field if we want use other storage pools than default
|
|
|
|
# libvirt.storage_pool_name = "vm"
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-08-10 14:18:55 +00:00
|
|
|
#################################################################################################
|
2021-06-22 08:52:41 +00:00
|
|
|
# Pick the right distro and bootstrap, default is fedora33
|
|
|
|
distro = (ENV['SPDK_VAGRANT_DISTRO'] || "fedora33")
|
2020-08-10 14:18:55 +00:00
|
|
|
provider = (ENV['SPDK_VAGRANT_PROVIDER'] || "virtualbox")
|
|
|
|
|
|
|
|
# Get all variables for creating vm
|
|
|
|
vmcpu = (ENV['SPDK_VAGRANT_VMCPU'] || 2)
|
|
|
|
vmram = (ENV['SPDK_VAGRANT_VMRAM'] || 4096)
|
|
|
|
|
2021-11-05 09:52:24 +00:00
|
|
|
force_distro = ENV['FORCE_DISTRO'] == "true" ? true : false
|
|
|
|
|
|
|
|
distro_to_use = get_box_type(distro, force_distro)
|
2022-07-11 12:21:16 +00:00
|
|
|
# Remove --copy-links from default rsync cmdline since we do want to sync
|
|
|
|
# actual symlinks as well. Also, since copy is made between host and its
|
|
|
|
# local VM we don't need to worry about saturating the local link so skip
|
|
|
|
# the compression to speed up the whole transfer.
|
|
|
|
files_sync_backend = {type: "rsync", rsync__auto: false, rsync__args: ["--archive", "--verbose", "--delete"]}
|
2023-03-28 00:48:31 +00:00
|
|
|
|
|
|
|
if ENV['NFS4_BACKEND'] or not Vagrant.has_plugin?("vagrant-sshfs")
|
|
|
|
plugins_sync_backend = {type: :nfs, nfs_udp: false, nfs_version: 4}
|
|
|
|
else
|
|
|
|
plugins_sync_backend = {type: :sshfs}
|
|
|
|
end
|
2020-08-10 14:18:55 +00:00
|
|
|
|
|
|
|
Vagrant.configure(2) do |config|
|
2021-11-05 09:52:24 +00:00
|
|
|
config.vm.box = distro_to_use
|
2017-04-28 17:18:24 +00:00
|
|
|
config.vm.box_check_update = false
|
2020-03-05 14:25:15 +00:00
|
|
|
config.vm.synced_folder '.', '/vagrant', disabled: true
|
2021-11-05 10:15:17 +00:00
|
|
|
if ENV['VAGRANT_BOX_VERSION']
|
|
|
|
config.vm.box_version = ENV['VAGRANT_BOX_VERSION']
|
|
|
|
end
|
2017-04-28 17:18:24 +00:00
|
|
|
|
|
|
|
# Copy in the .gitconfig if it exists
|
2020-08-10 13:43:38 +00:00
|
|
|
copy_gitconfig(config)
|
2017-04-28 17:18:24 +00:00
|
|
|
|
2020-08-10 13:43:38 +00:00
|
|
|
# Copy in the user's tools if they exists
|
|
|
|
copy_vagrant_tools(config,files_sync_backend)
|
|
|
|
|
2023-04-19 13:39:11 +00:00
|
|
|
copy_sources_dirs(config, files_sync_backend)
|
2020-08-10 13:43:38 +00:00
|
|
|
|
|
|
|
# rsync artifacts from build
|
|
|
|
copy_spdk_artifacts(config, plugins_sync_backend)
|
2018-09-26 17:11:36 +00:00
|
|
|
|
2020-08-10 14:02:15 +00:00
|
|
|
# Setup SSH
|
|
|
|
setup_ssh(config)
|
2017-04-28 17:18:24 +00:00
|
|
|
|
2020-08-10 14:09:54 +00:00
|
|
|
# Virtualbox configuration
|
|
|
|
setup_virtualbox(config,vmcpu,vmram)
|
2017-11-28 14:04:51 +00:00
|
|
|
|
2020-08-10 14:12:11 +00:00
|
|
|
setup_libvirt(config,vmcpu,vmram,distro)
|
2018-06-19 03:58:46 +00:00
|
|
|
|
2018-09-26 17:11:36 +00:00
|
|
|
# provision the vm with all of the necessary spdk dependencies for running the autorun.sh tests
|
2021-03-05 09:30:33 +00:00
|
|
|
deploy_test_vm(config, distro, plugins_sync_backend)
|
2017-04-28 17:18:24 +00:00
|
|
|
end
|
2022-08-09 11:07:51 +00:00
|
|
|
|
|
|
|
if ENV['EXTRA_VAGRANTFILES']
|
|
|
|
loaders = (ENV['EXTRA_VAGRANTFILES'].split(','))
|
|
|
|
loaders.each { |loader|
|
|
|
|
load loader if File.exists?(loader)
|
|
|
|
}
|
|
|
|
end
|