Spdk/scripts/vagrant/Vagrantfile

400 lines
13 KiB
Ruby
Raw Permalink Normal View History

# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'open3'
def get_box_type(distro, force_distro)
spdk_distro = 'spdk/' + distro
localboxes, stderr, status = Open3.capture3("vagrant box list")
return spdk_distro if localboxes.include?(spdk_distro)
distro_to_type = {
'centos7' => 'centos/7',
'centos8' => 'centos/8',
'ubuntu1604' => 'peru/ubuntu-16.04-server-amd64',
'ubuntu1804' => 'peru/ubuntu-18.04-server-amd64',
'ubuntu2004' => 'peru/ubuntu-20.04-server-amd64',
'ubuntu2204' => 'generic/ubuntu2204',
'fedora33' => 'generic/fedora33',
'fedora34' => 'generic/fedora34',
'fedora35' => 'generic/fedora35',
'fedora36' => 'generic/fedora36',
'arch' => 'generic/arch',
'freebsd12' => 'generic/freebsd12',
'freebsd13' => 'generic/freebsd13',
'rocky8' => 'rockylinux/8'
}
abort("Invalid argument! #{distro}") unless distro_to_type.key?(distro) || force_distro
return distro_to_type[distro] ? distro_to_type[distro] : distro
end
def setup_proxy(config,distro)
return unless ENV['http_proxy']
if Vagrant.has_plugin?("vagrant-proxyconf")
config.proxy.http = ENV['http_proxy']
config.proxy.https = ENV['https_proxy']
config.proxy.no_proxy = "localhost,127.0.0.1"
end
# Proxyconf does not seem to support FreeBSD boxes or at least it's
# docs do not mention that. Set up proxy configuration manually.
if distro.include?("freebsd")
$freebsd_proxy = <<-SCRIPT
sudo -s
echo "export http_proxy=#{ENV['http_proxy']}" >> /etc/profile
echo "export https_proxy=#{ENV['http_proxy']}" >> /etc/profile
echo "pkg_env: {http_proxy: #{ENV['http_proxy']}}" > /usr/local/etc/pkg.conf
chown root:wheel /usr/local/etc/pkg.conf
chmod 644 /usr/local/etc/pkg.conf
SCRIPT
config.vm.provision "shell", inline: $freebsd_proxy
end
end
def copy_gitconfig(config)
src_path = '~/.gitconfig'
return unless File.file?(File.expand_path(src_path))
config.vm.provision "file", source: src_path, destination: ".gitconfig"
end
def copy_tsocks(config)
tsocks_file = 'tsocks.conf'
tsocks_file_path = '/etc/' + tsocks_file
return unless File.file?(tsocks_file_path)
$tsocks_copy_cmd = <<-SCRIPT
sudo -s
mv -f "#{tsocks_file}" "#{tsocks_file_path}"
chown root "#{tsocks_file_path}"
chmod 644 "#{tsocks_file_path}"
SCRIPT
config.vm.provision "file", source: tsocks_file_path, destination: tsocks_file
config.vm.provision "shell", inline: $tsocks_copy_cmd
end
def copy_vagrant_tools(config,files_sync_backend)
src_path = '~/vagrant_tools'
return unless File.directory?(File.expand_path(src_path))
config.vm.synced_folder src_path, "/home/vagrant/tools", files_sync_backend
end
def copy_sources_dirs(config, files_sync_backend)
return unless ENV['COPY_SPDK_DIR'] == "1"
return unless ENV['SPDK_DIR']
repo_prefix = '/home/vagrant/spdk_repo'
config.vm.synced_folder ENV['SPDK_DIR'], "#{repo_prefix}/spdk", files_sync_backend
# Optional directories
for dir in ['spdk-abi', 'dpdk']
src_path = "#{ENV['SPDK_DIR']}/../#{dir}"
next unless File.directory?(File.expand_path(src_path))
config.vm.synced_folder src_path, "#{repo_prefix}/#{dir}", files_sync_backend
end
end
def copy_spdk_artifacts(config, plugins_sync_backend)
return unless ENV['COPY_SPDK_ARTIFACTS'] == "1"
vagrantfile_dir=(ENV['VAGRANTFILE_DIR'] || "none")
config.vm.synced_folder "#{vagrantfile_dir}/output", "/home/vagrant/spdk_repo/output", plugins_sync_backend
end
def make_spdk_local_copy_of_nfs(config,distro)
user_group = 'vagrant:vagrant'
spdk_path = '/home/vagrant/spdk_repo/spdk'
spdk_tmp_path = '/tmp/spdk'
$spdk_repo_cmd = <<-SCRIPT
sudo -s
cp -R '#{spdk_path}' '#{spdk_tmp_path}'
umount '#{spdk_path}' && rm -rf '#{spdk_path}'
mv '#{spdk_tmp_path}' '#{spdk_path}'
chown -R #{user_group} '#{spdk_path}'
SCRIPT
config.vm.provision "shell", inline: $spdk_repo_cmd
end
def get_nvme_disk(disk, index)
if ENV['NVME_FILE']
nvme_file = ENV['NVME_FILE'].split(',')
nvme_disk = nvme_file[index]
else
nvme_disk = '/var/lib/libvirt/images/nvme_disk.img'
end
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
unless nvme_disk == "none" || File.exist?(nvme_disk)
puts 'If run with libvirt provider please execute create_nvme_img.sh'
end
return nvme_disk
end
def setup_nvme_disk(libvirt, disk, index)
nvme_disk_id = disk + '-' + index.to_s
nvme_disk = get_nvme_disk(disk, index)
nvme_namespaces=(ENV['NVME_DISKS_NAMESPACES'] || "").split(',')
nvme_cmbs=(ENV['NVME_CMB'] || "").split(',')
nvme_pmrs=(ENV['NVME_PMR'] || "").split(',')
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
nvme_zns=(ENV['NVME_ZNS'] || "").split(',')
nvme_ms=(ENV['NVME_MS'] || "").split(',')
nvme_fdp=(ENV['NVME_FDP'] || "").split(',')
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
namespace_disks = []
pmr_cmdline = ""
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
nvme_controller = ""
fdp_subsys = ""
fdp_subsys_id = ""
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
# Define controller
nvme_controller = "nvme,id=#{nvme_disk_id},serial=1234#{index}"
# For the FDP, we need to hook our nvme into a dedicated subsystem
if !nvme_fdp[index].nil? && nvme_fdp[index] != ""
fdp_subsys_id = "fdp-subsys#{index}"
fdp = nvme_fdp[index].split(':')[0..3]
fdp_ruhs = (nvme_fdp[index].split(':')[4..]) # fdp.ruhs per ns
# Put some defaults in place if needed
fdp_enable = "#{fdp[0] != nil && fdp[0] != '' ? fdp[0] : 'off'}"
fdp_runs = "#{fdp[1] != nil && fdp[1] != '' ? fdp[1] : '96M'}"
fdp_nrg = "#{fdp[2] != nil && fdp[2] != '' ? fdp[2] : 2}"
fdp_nruh = "#{fdp[3] != nil && fdp[3] != '' ? fdp[3] : 8}"
fdp_subsys = "nvme-subsys,id=#{fdp_subsys_id},fdp=#{fdp_enable}"
fdp_subsys << ",fdp.runs=#{fdp_runs},fdp.nrg=#{fdp_nrg},fdp.nruh=#{fdp_nruh}"
nvme_controller << ",subsys=#{fdp_subsys_id}"
libvirt.qemuargs :value => "-device"
libvirt.qemuargs :value => fdp_subsys
end
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
# Gather all drives - each namespace requires separate drive
if nvme_namespaces[index].nil?
namespace_disks = namespace_disks + nvme_disk.split()
elsif !nvme_namespaces[index].nil? && !nvme_namespaces[index].match(/^[0-9]+$/)
namespace_disks = namespace_disks + nvme_disk.split() + nvme_namespaces[index].split(':')
elsif !nvme_namespaces[index].nil? && nvme_namespaces[index] == "1"
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
libvirt.qemuargs :value => "-drive"
libvirt.qemuargs :value => "format=raw,file=#{nvme_disk},if=none,id=#{nvme_disk_id}"
nvme_controller <<",drive=#{nvme_disk_id}"
end
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
if !nvme_cmbs[index].nil? && nvme_cmbs[index] != ""
# Fix the size of the buffer to 128M
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
nvme_controller << ",cmb_size_mb=128"
end
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
if !nvme_pmrs[index].nil? && nvme_pmrs[index] != ""
pmr_path, pmr_size = nvme_pmrs[index].split(':')
if pmr_size.nil?
pmr_size = "16M"
end
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
nvme_controller << ",pmrdev=pmr#{index}"
pmr_cmdline = "memory-backend-file,id=pmr#{index},share=on,mem-path=#{pmr_path},size=#{pmr_size}"
end
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
libvirt.qemuargs :value => "-device"
libvirt.qemuargs :value => nvme_controller
if pmr_cmdline != ""
libvirt.qemuargs :value => "-object"
libvirt.qemuargs :value => pmr_cmdline
end
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
# Define all namespaces
namespace_disks.each_with_index { |disk, nsid|
if disk == "none"
next
end
zoned = nvme_zns[index].nil? ? "false" : "true"
ms = nvme_ms[index].nil? ? "" : ",ms=64"
ns = "nvme-ns,drive=#{nvme_disk_id}-drive#{nsid},bus=#{nvme_disk_id},nsid=#{nsid + 1},zoned=#{zoned},logical_block_size=4096,physical_block_size=4096#{ms}"
if !fdp_ruhs.nil? && !fdp_ruhs[nsid].nil? && fdp_ruhs[nsid] != ""
ns << ",fdp.ruhs=#{fdp_ruhs[nsid]}"
end
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
libvirt.qemuargs :value => "-drive"
libvirt.qemuargs :value => "format=raw,file=#{disk},if=none,id=#{nvme_disk_id}-drive#{nsid}"
libvirt.qemuargs :value => "-device"
libvirt.qemuargs :value => ns
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
}
end
def setup_ssh(config)
config.ssh.forward_agent = true
config.ssh.forward_x11 = true
if ENV['VAGRANT_PASSWORD_AUTH'] == "1"
config.ssh.username = "vagrant"
config.ssh.password = "vagrant"
config.ssh.private_key_path = nil
end
end
def deploy_test_vm(config, distro, plugins_sync_backend)
return unless ENV['DEPLOY_TEST_VM'] == "1"
return unless ENV['COPY_SPDK_DIR'] == "1"
return unless ENV['SPDK_DIR']
# use http proxy if available
setup_proxy(config, distro)
# Copy the tsocks configuration file for use when installing some spdk test pool dependencies
copy_tsocks(config)
# freebsd boxes in order to have spdk sources synced from
# host properly will use NFS with "ro" option enabled to prevent changes
# on host filesystem.
# To make sources usable in the guest VM we need to unmount them and use
# local copy.
make_spdk_local_copy_of_nfs(config,distro) if plugins_sync_backend[:type] == :nfs
config.vm.provision "shell" do |setup|
setup.inline = "/home/vagrant/spdk_repo/spdk/test/common/config/vm_setup.sh"
setup.privileged = false
setup.args = ["-u", "-i"]
end
end
def setup_virtualbox(config, vmcpu, vmram)
config.vm.provider "virtualbox" do |vb|
vb.customize ["modifyvm", :id, "--ioapic", "on"]
vb.memory = vmram
vb.cpus = vmcpu
nvme_disk=(ENV['NVME_FILE'] || "nvme_disk.img")
unless File.exist? (nvme_disk)
vb.customize ["createhd", "--filename", nvme_disk, "--variant", "Fixed", "--size", "1024"]
vb.customize ["storagectl", :id, "--name", "nvme", "--add", "pcie", "--controller", "NVMe", "--portcount", "1", "--bootable", "off"]
vb.customize ["storageattach", :id, "--storagectl", "nvme", "--type", "hdd", "--medium", nvme_disk, "--port", "0"]
end
#support for the SSE4.x instruction is required in some versions of VB.
vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.1", "1"]
vb.customize ["setextradata", :id, "VBoxInternal/CPUM/SSE4.2", "1"]
end
end
def setup_libvirt(config, vmcpu, vmram, distro)
emulated_nvme_types=(ENV['NVME_DISKS_TYPE'] || "nvme").split(',')
config.vm.provider "libvirt" do |libvirt, override|
libvirt.random_hostname = "1"
libvirt.driver = "kvm"
libvirt.graphics_type = "vnc"
libvirt.memory = vmram
libvirt.cpus = vmcpu
libvirt.video_type = "cirrus"
if (distro.include?("freebsd"))
# generic/freebsd boxes need to be explicitly run with SCSI bus,
# otherwise boot process fails on mounting the disk
libvirt.disk_bus = "scsi"
elsif (distro.include?("arch"))
# Run generic/arch boxes explicitly with IDE bus,
# otherwise boot process fails on mounting the disk
libvirt.disk_bus = "ide"
else
libvirt.disk_bus = "virtio"
end
if ENV['SPDK_QEMU_EMULATOR']
libvirt.emulator_path = ENV['SPDK_QEMU_EMULATOR']
libvirt.machine_type = "pc"
end
# we put nvme_disk inside default pool to eliminate libvirt/SELinux Permissions Problems
# and to be able to run vagrant from user $HOME directory
# Loop to create all emulated disks set
emulated_nvme_types.each_with_index { |disk, index|
scripts/vagrant: Drop OCSSD awareness from functional tests This also translates into switching fully to upstream QEMU for the vagrant setup. This is done in order to move away from OCSSD and SPDK's qemu fork and align with what upstream QEMU supports. Main changes touch the way how nvme namespaces are configured. With >= 5.2.0 it's possible now to configure multiple namespace under single nvme device. Each namespace requires a separate disk image to work with. This: -b foo.img,nvme,1... -b foo.img -b foo.img,,.. Will still configure nvme controller with a single namespace attached to foo.img. This: -b foo.img,,foo-ns1.img:foo-ns2.img Will configure nvme controller with three namespaces. Configuring nvme controller with no namespaces is possible via: -b none ... Note that this still allows to define other options specific to nvme controller, like CMB and PMR. E.g: -b none,nvme,,true This will create nvme controller with no namespaces but with CMB enabled. It's possible now to also request for given controller to be zoned. Currently if requsted, all namespaces under the target controller will be zoned with no limit set as to max open|active zones. All nvme devices have block size fixed to 4KB to imititate behavior of the SPDK's qemu fork. Compatibility with spdk-5.0.0 fork is preserved in context of setting up namespaces so this: -b foo.img,nvme,2 is valid as long as the emulator is set to that of spdk-5.0.0's. Signed-off-by: Michal Berger <michalx.berger@intel.com> Change-Id: Ib5d53cb5c330c1f84b57e0bf877ea0e2d0312ddd Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8421 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Karol Latecki <karol.latecki@intel.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
2021-06-17 16:44:43 +00:00
setup_nvme_disk(libvirt, disk, index)
}
# Add network interface for openstack tests
if ENV['SPDK_OPENSTACK_NETWORK'] == "1"
libvirt.qemuargs :value => "-device"
libvirt.qemuargs :value => "virtio-net,netdev=openstack.0"
libvirt.qemuargs :value => "-netdev"
libvirt.qemuargs :value => "user,id=openstack.0"
end
if ENV['VAGRANT_HUGE_MEM'] == "1"
libvirt.memorybacking :hugepages
end
# Optional field if we want use other storage pools than default
# libvirt.storage_pool_name = "vm"
end
end
#################################################################################################
# Pick the right distro and bootstrap, default is fedora33
distro = (ENV['SPDK_VAGRANT_DISTRO'] || "fedora33")
provider = (ENV['SPDK_VAGRANT_PROVIDER'] || "virtualbox")
# Get all variables for creating vm
vmcpu = (ENV['SPDK_VAGRANT_VMCPU'] || 2)
vmram = (ENV['SPDK_VAGRANT_VMRAM'] || 4096)
force_distro = ENV['FORCE_DISTRO'] == "true" ? true : false
distro_to_use = get_box_type(distro, force_distro)
# Remove --copy-links from default rsync cmdline since we do want to sync
# actual symlinks as well. Also, since copy is made between host and its
# local VM we don't need to worry about saturating the local link so skip
# the compression to speed up the whole transfer.
files_sync_backend = {type: "rsync", rsync__auto: false, rsync__args: ["--archive", "--verbose", "--delete"]}
if ENV['NFS4_BACKEND'] or not Vagrant.has_plugin?("vagrant-sshfs")
plugins_sync_backend = {type: :nfs, nfs_udp: false, nfs_version: 4}
else
plugins_sync_backend = {type: :sshfs}
end
Vagrant.configure(2) do |config|
config.vm.box = distro_to_use
config.vm.box_check_update = false
config.vm.synced_folder '.', '/vagrant', disabled: true
if ENV['VAGRANT_BOX_VERSION']
config.vm.box_version = ENV['VAGRANT_BOX_VERSION']
end
# Copy in the .gitconfig if it exists
copy_gitconfig(config)
# Copy in the user's tools if they exists
copy_vagrant_tools(config,files_sync_backend)
copy_sources_dirs(config, files_sync_backend)
# rsync artifacts from build
copy_spdk_artifacts(config, plugins_sync_backend)
# Setup SSH
setup_ssh(config)
# Virtualbox configuration
setup_virtualbox(config,vmcpu,vmram)
setup_libvirt(config,vmcpu,vmram,distro)
# provision the vm with all of the necessary spdk dependencies for running the autorun.sh tests
deploy_test_vm(config, distro, plugins_sync_backend)
end
if ENV['EXTRA_VAGRANTFILES']
loaders = (ENV['EXTRA_VAGRANTFILES'].split(','))
loaders.each { |loader|
load loader if File.exists?(loader)
}
end