Compare commits
13 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
87036cef6f | ||
|
ea938b9a88 | ||
|
18f65e5a8a | ||
|
2a280f2fdc | ||
|
466eb99e9f | ||
|
8760280eb3 | ||
|
867063e463 | ||
|
e5cfae172d | ||
|
1050ace333 | ||
|
34ccb2d7d9 | ||
|
5ac01ab53e | ||
|
3bc1b71100 | ||
|
73fee9c732 |
21
CHANGELOG.md
21
CHANGELOG.md
@ -1,5 +1,26 @@
|
||||
# Changelog
|
||||
|
||||
## v18.01.1: vhost CVE fixes, NVMe getpid() caching
|
||||
|
||||
This release contains the following fixes. All users of SPDK v18.01 are strongly
|
||||
recommended to upgrade.
|
||||
|
||||
### Fixes for DPDK CVE-2018-1059
|
||||
|
||||
The SPDK vhost-scsi and vhost-blk applications now have fixes to address the DPDK rte_vhost
|
||||
vulnerability [CVE-2018-1059](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1059).
|
||||
Please see this [security advisory](https://access.redhat.com/security/cve/cve-2018-1059)
|
||||
for additional information on this DPDK vulnerability.
|
||||
|
||||
### NVMe driver getpid() caching
|
||||
|
||||
The SPDK NVMe driver now caches the pid in a global variable rather than calling getpid() on
|
||||
every request. The SPDK NVMe driver associates each request with the pid of its submitting
|
||||
process to enable multi-process support. glibc 2.25 eliminated pid caching resulting in system
|
||||
calls when getpid() is called which degraded SPDK NVMe driver efficiency. glibc eliminated pid
|
||||
caching for use cases (such as forking) that are not support by SPDK, so pid caching is
|
||||
an acceptable solution to eliminate this degradation.
|
||||
|
||||
## v18.01: Blobstore Thin Provisioning
|
||||
|
||||
### Build System
|
||||
|
@ -92,7 +92,10 @@ fi
|
||||
timing_enter lib
|
||||
|
||||
if [ $SPDK_TEST_BLOCKDEV -eq 1 ]; then
|
||||
# NOTE: disabled on SPDK v18.01.x branch when ASAN is enabled
|
||||
if [ $SPDK_RUN_ASAN -eq 0 ]; then
|
||||
run_test test/lib/bdev/blockdev.sh
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $SPDK_TEST_EVENT -eq 1 ]; then
|
||||
|
1
examples/ioat/kperf/kmod/.gitignore
vendored
1
examples/ioat/kperf/kmod/.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
.cache.mk
|
||||
.tmp_versions
|
||||
dmaperf.mod.c
|
||||
modules.order
|
||||
|
@ -54,12 +54,12 @@
|
||||
* Patch level is incremented on maintenance branch releases and reset to 0 for each
|
||||
* new major.minor release.
|
||||
*/
|
||||
#define SPDK_VERSION_PATCH 0
|
||||
#define SPDK_VERSION_PATCH 2
|
||||
|
||||
/**
|
||||
* Version string suffix.
|
||||
*/
|
||||
#define SPDK_VERSION_SUFFIX ""
|
||||
#define SPDK_VERSION_SUFFIX "-pre"
|
||||
|
||||
/**
|
||||
* Single numeric value representing a version number for compile-time comparisons.
|
||||
|
@ -39,6 +39,7 @@
|
||||
#define SPDK_NVME_DRIVER_NAME "spdk_nvme_driver"
|
||||
|
||||
struct nvme_driver *g_spdk_nvme_driver;
|
||||
static pid_t g_pid;
|
||||
|
||||
int32_t spdk_nvme_retry_count;
|
||||
|
||||
@ -128,7 +129,7 @@ nvme_allocate_request(struct spdk_nvme_qpair *qpair,
|
||||
req->payload = *payload;
|
||||
req->payload_size = payload_size;
|
||||
req->qpair = qpair;
|
||||
req->pid = getpid();
|
||||
req->pid = g_pid;
|
||||
|
||||
return req;
|
||||
}
|
||||
@ -259,6 +260,9 @@ nvme_driver_init(void)
|
||||
/* Any socket ID */
|
||||
int socket_id = -1;
|
||||
|
||||
/* Each process needs its own pid. */
|
||||
g_pid = getpid();
|
||||
|
||||
/*
|
||||
* Only one thread from one process will do this driver init work.
|
||||
* The primary process will reserve the shared memory and do the
|
||||
|
@ -143,6 +143,46 @@ rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert guest physical address to host virtual address safely
|
||||
*
|
||||
* This variant of rte_vhost_gpa_to_vva() takes care all the
|
||||
* requested length is mapped and contiguous in process address
|
||||
* space.
|
||||
*
|
||||
* @param mem
|
||||
* the guest memory regions
|
||||
* @param gpa
|
||||
* the guest physical address for querying
|
||||
* @param len
|
||||
* the size of the requested area to map,
|
||||
* updated with actual size mapped
|
||||
* @return
|
||||
* the host virtual address on success, 0 on failure */
|
||||
static inline uint64_t
|
||||
rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem,
|
||||
uint64_t gpa, uint64_t *len)
|
||||
{
|
||||
struct rte_vhost_mem_region *r;
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < mem->nregions; i++) {
|
||||
r = &mem->regions[i];
|
||||
if (gpa >= r->guest_phys_addr &&
|
||||
gpa < r->guest_phys_addr + r->size) {
|
||||
|
||||
if (unlikely(*len > r->guest_phys_addr + r->size - gpa))
|
||||
*len = r->guest_phys_addr + r->size - gpa;
|
||||
|
||||
return gpa - r->guest_phys_addr +
|
||||
r->host_user_addr;
|
||||
}
|
||||
}
|
||||
*len = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define RTE_VHOST_NEED_LOG(features) ((features) & (1ULL << VHOST_F_LOG_ALL))
|
||||
|
||||
/**
|
||||
@ -306,34 +346,6 @@ int rte_vhost_get_mtu(int vid, uint16_t *mtu);
|
||||
*/
|
||||
int rte_vhost_get_numa_node(int vid);
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* Get the number of queues the device supports.
|
||||
*
|
||||
* Note this function is deprecated, as it returns a queue pair number,
|
||||
* which is vhost specific. Instead, rte_vhost_get_vring_num should
|
||||
* be used.
|
||||
*
|
||||
* @param vid
|
||||
* vhost device ID
|
||||
*
|
||||
* @return
|
||||
* The number of queues, 0 on failure
|
||||
*/
|
||||
__rte_deprecated
|
||||
uint32_t rte_vhost_get_queue_num(int vid);
|
||||
|
||||
/**
|
||||
* Get the number of vrings the device supports.
|
||||
*
|
||||
* @param vid
|
||||
* vhost device ID
|
||||
*
|
||||
* @return
|
||||
* The number of vrings, 0 on failure
|
||||
*/
|
||||
uint16_t rte_vhost_get_vring_num(int vid);
|
||||
|
||||
/**
|
||||
* Get the virtio net device's ifname, which is the vhost-user socket
|
||||
* file path.
|
||||
|
@ -307,28 +307,6 @@ rte_vhost_get_numa_node(int vid)
|
||||
#endif
|
||||
}
|
||||
|
||||
uint32_t
|
||||
rte_vhost_get_queue_num(int vid)
|
||||
{
|
||||
struct virtio_net *dev = get_device(vid);
|
||||
|
||||
if (dev == NULL)
|
||||
return 0;
|
||||
|
||||
return dev->nr_vring / 2;
|
||||
}
|
||||
|
||||
uint16_t
|
||||
rte_vhost_get_vring_num(int vid)
|
||||
{
|
||||
struct virtio_net *dev = get_device(vid);
|
||||
|
||||
if (dev == NULL)
|
||||
return 0;
|
||||
|
||||
return dev->nr_vring;
|
||||
}
|
||||
|
||||
int
|
||||
rte_vhost_get_ifname(int vid, char *buf, size_t len)
|
||||
{
|
||||
|
@ -323,7 +323,7 @@ numa_realloc(struct virtio_net *dev, int index __rte_unused)
|
||||
* used to convert the ring addresses to our address space.
|
||||
*/
|
||||
static uint64_t
|
||||
qva_to_vva(struct virtio_net *dev, uint64_t qva)
|
||||
qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
|
||||
{
|
||||
struct rte_vhost_mem_region *reg;
|
||||
uint32_t i;
|
||||
@ -334,6 +334,10 @@ qva_to_vva(struct virtio_net *dev, uint64_t qva)
|
||||
|
||||
if (qva >= reg->guest_user_addr &&
|
||||
qva < reg->guest_user_addr + reg->size) {
|
||||
|
||||
if (unlikely(*len > reg->guest_user_addr + reg->size - qva))
|
||||
*len = reg->guest_user_addr + reg->size - qva;
|
||||
|
||||
return qva - reg->guest_user_addr +
|
||||
reg->host_user_addr;
|
||||
}
|
||||
@ -352,6 +356,7 @@ static int
|
||||
vhost_user_set_vring_addr(struct virtio_net *dev, VhostUserMsg *msg)
|
||||
{
|
||||
struct vhost_virtqueue *vq;
|
||||
uint64_t len;
|
||||
|
||||
if (dev->has_new_mem_table) {
|
||||
vhost_setup_mem_table(dev);
|
||||
@ -372,11 +377,12 @@ vhost_user_set_vring_addr(struct virtio_net *dev, VhostUserMsg *msg)
|
||||
vq = dev->virtqueue[msg->payload.addr.index];
|
||||
|
||||
/* The addresses are converted from QEMU virtual to Vhost virtual. */
|
||||
len = sizeof(struct vring_desc) * vq->size;
|
||||
vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
|
||||
msg->payload.addr.desc_user_addr);
|
||||
if (vq->desc == 0) {
|
||||
msg->payload.addr.desc_user_addr, &len);
|
||||
if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
|
||||
RTE_LOG(ERR, VHOST_CONFIG,
|
||||
"(%d) failed to find desc ring address.\n",
|
||||
"(%d) failed to map desc ring.\n",
|
||||
dev->vid);
|
||||
return -1;
|
||||
}
|
||||
@ -384,18 +390,25 @@ vhost_user_set_vring_addr(struct virtio_net *dev, VhostUserMsg *msg)
|
||||
dev = numa_realloc(dev, msg->payload.addr.index);
|
||||
vq = dev->virtqueue[msg->payload.addr.index];
|
||||
|
||||
len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
|
||||
vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
|
||||
msg->payload.addr.avail_user_addr);
|
||||
if (vq->avail == 0) {
|
||||
msg->payload.addr.avail_user_addr, &len);
|
||||
if (vq->avail == 0 ||
|
||||
len != sizeof(struct vring_avail)
|
||||
+ sizeof(uint16_t) * vq->size) {
|
||||
RTE_LOG(ERR, VHOST_CONFIG,
|
||||
"(%d) failed to find avail ring address.\n",
|
||||
dev->vid);
|
||||
return -1;
|
||||
}
|
||||
|
||||
len = sizeof(struct vring_used) +
|
||||
sizeof(struct vring_used_elem) * vq->size;
|
||||
vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
|
||||
msg->payload.addr.used_user_addr);
|
||||
if (vq->used == 0) {
|
||||
msg->payload.addr.used_user_addr, &len);
|
||||
if (vq->used == 0 || len != sizeof(struct vring_used) +
|
||||
sizeof(struct vring_used_elem) * vq->size) {
|
||||
|
||||
RTE_LOG(ERR, VHOST_CONFIG,
|
||||
"(%d) failed to find used ring address.\n",
|
||||
dev->vid);
|
||||
@ -689,13 +702,14 @@ virtio_is_ready(struct virtio_net *dev)
|
||||
for (i = 0; i < dev->nr_vring; i++) {
|
||||
vq = dev->virtqueue[i];
|
||||
|
||||
if (!vq_is_ready(vq))
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (vq_is_ready(vq)) {
|
||||
RTE_LOG(INFO, VHOST_CONFIG,
|
||||
"virtio is now ready for processing.\n");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -86,9 +86,19 @@ const struct vhost_device_ops g_spdk_vhost_ops = {
|
||||
static struct spdk_vhost_dev *g_spdk_vhost_devices[MAX_VHOST_DEVICES];
|
||||
static pthread_mutex_t g_spdk_vhost_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
void *spdk_vhost_gpa_to_vva(struct spdk_vhost_dev *vdev, uint64_t addr)
|
||||
void *spdk_vhost_gpa_to_vva(struct spdk_vhost_dev *vdev, uint64_t addr, uint64_t len)
|
||||
{
|
||||
return (void *)rte_vhost_gpa_to_vva(vdev->mem, addr);
|
||||
void *vva;
|
||||
uint64_t newlen;
|
||||
|
||||
newlen = len;
|
||||
vva = (void *)rte_vhost_va_from_guest_pa(vdev->mem, addr, &newlen);
|
||||
if (newlen != len) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return vva;
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
@ -213,8 +223,9 @@ spdk_vhost_vq_get_desc(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue
|
||||
|
||||
if (spdk_vhost_vring_desc_is_indirect(*desc)) {
|
||||
assert(spdk_vhost_dev_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC));
|
||||
*desc_table = spdk_vhost_gpa_to_vva(vdev, (*desc)->addr);
|
||||
*desc_table_size = (*desc)->len / sizeof(**desc);
|
||||
*desc_table = spdk_vhost_gpa_to_vva(vdev, (*desc)->addr,
|
||||
sizeof(**desc) * *desc_table_size);
|
||||
*desc = *desc_table;
|
||||
if (*desc == NULL) {
|
||||
return -1;
|
||||
@ -262,7 +273,7 @@ check_dev_io_stats(struct spdk_vhost_dev *vdev, uint64_t now)
|
||||
}
|
||||
|
||||
vdev->next_stats_check_time = now + vdev->stats_check_interval;
|
||||
for (q_idx = 0; q_idx < vdev->num_queues; q_idx++) {
|
||||
for (q_idx = 0; q_idx < vdev->max_queues; q_idx++) {
|
||||
virtqueue = &vdev->virtqueue[q_idx];
|
||||
|
||||
req_cnt = virtqueue->req_cnt + virtqueue->used_req_cnt;
|
||||
@ -286,10 +297,11 @@ spdk_vhost_dev_used_signal(struct spdk_vhost_dev *vdev)
|
||||
uint16_t q_idx;
|
||||
|
||||
if (vdev->coalescing_delay_time_base == 0) {
|
||||
for (q_idx = 0; q_idx < vdev->num_queues; q_idx++) {
|
||||
for (q_idx = 0; q_idx < vdev->max_queues; q_idx++) {
|
||||
virtqueue = &vdev->virtqueue[q_idx];
|
||||
|
||||
if (virtqueue->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
|
||||
if (virtqueue->vring.desc == NULL ||
|
||||
(virtqueue->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -299,7 +311,7 @@ spdk_vhost_dev_used_signal(struct spdk_vhost_dev *vdev)
|
||||
now = spdk_get_ticks();
|
||||
check_dev_io_stats(vdev, now);
|
||||
|
||||
for (q_idx = 0; q_idx < vdev->num_queues; q_idx++) {
|
||||
for (q_idx = 0; q_idx < vdev->max_queues; q_idx++) {
|
||||
virtqueue = &vdev->virtqueue[q_idx];
|
||||
|
||||
/* No need for event right now */
|
||||
@ -429,7 +441,7 @@ spdk_vhost_vring_desc_to_iov(struct spdk_vhost_dev *vdev, struct iovec *iov,
|
||||
SPDK_ERRLOG("SPDK_VHOST_IOVS_MAX(%d) reached\n", SPDK_VHOST_IOVS_MAX);
|
||||
return -1;
|
||||
}
|
||||
vva = (uintptr_t)spdk_vhost_gpa_to_vva(vdev, payload);
|
||||
vva = (uintptr_t)rte_vhost_gpa_to_vva(vdev->mem, payload);
|
||||
if (vva == 0) {
|
||||
SPDK_ERRLOG("gpa_to_vva(%p) == NULL\n", (void *)payload);
|
||||
return -1;
|
||||
@ -448,7 +460,7 @@ spdk_vhost_vring_desc_to_iov(struct spdk_vhost_dev *vdev, struct iovec *iov,
|
||||
*/
|
||||
len = to_boundary;
|
||||
while (len < remaining) {
|
||||
if (vva + len != (uintptr_t)spdk_vhost_gpa_to_vva(vdev, payload + len)) {
|
||||
if (vva + len != (uintptr_t)rte_vhost_gpa_to_vva(vdev->mem, payload + len)) {
|
||||
break;
|
||||
}
|
||||
len += spdk_min(remaining - len, 0x200000);
|
||||
@ -983,8 +995,11 @@ stop_device(int vid)
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < vdev->num_queues; i++) {
|
||||
for (i = 0; i < vdev->max_queues; i++) {
|
||||
q = &vdev->virtqueue[i].vring;
|
||||
if (q->desc == NULL) {
|
||||
continue;
|
||||
}
|
||||
rte_vhost_set_vhost_vring_last_idx(vdev->vid, i, q->last_avail_idx, q->last_used_idx);
|
||||
}
|
||||
|
||||
@ -999,11 +1014,9 @@ start_device(int vid)
|
||||
{
|
||||
struct spdk_vhost_dev *vdev;
|
||||
int rc = -1;
|
||||
uint16_t num_queues;
|
||||
uint16_t i;
|
||||
|
||||
pthread_mutex_lock(&g_spdk_vhost_mutex);
|
||||
num_queues = rte_vhost_get_vring_num(vid);
|
||||
|
||||
vdev = spdk_vhost_dev_find_by_vid(vid);
|
||||
if (vdev == NULL) {
|
||||
@ -1016,22 +1029,16 @@ start_device(int vid)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (num_queues > SPDK_VHOST_MAX_VQUEUES) {
|
||||
SPDK_ERRLOG("vhost device %d: Too many queues (%"PRIu16"). Max %"PRIu16"\n", vid, num_queues,
|
||||
SPDK_VHOST_MAX_VQUEUES);
|
||||
goto out;
|
||||
}
|
||||
|
||||
vdev->max_queues = 0;
|
||||
memset(vdev->virtqueue, 0, sizeof(vdev->virtqueue));
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
for (i = 0; i < SPDK_VHOST_MAX_VQUEUES; i++) {
|
||||
if (rte_vhost_get_vhost_vring(vid, i, &vdev->virtqueue[i].vring)) {
|
||||
SPDK_ERRLOG("vhost device %d: Failed to get information of queue %"PRIu16"\n", vid, i);
|
||||
goto out;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (vdev->virtqueue[i].vring.size == 0) {
|
||||
SPDK_ERRLOG("vhost device %d: Queue %"PRIu16" has size 0.\n", vid, i);
|
||||
goto out;
|
||||
if (vdev->virtqueue[i].vring.desc == NULL ||
|
||||
vdev->virtqueue[i].vring.size == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Disable notifications. */
|
||||
@ -1039,9 +1046,9 @@ start_device(int vid)
|
||||
SPDK_ERRLOG("vhost device %d: Failed to disable guest notification on queue %"PRIu16"\n", vid, i);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
vdev->num_queues = num_queues;
|
||||
vdev->max_queues = i + 1;
|
||||
}
|
||||
|
||||
if (rte_vhost_get_negotiated_features(vid, &vdev->negotiated_features) != 0) {
|
||||
SPDK_ERRLOG("vhost device %d: Failed to get negotiated driver features\n", vid);
|
||||
@ -1062,7 +1069,7 @@ start_device(int vid)
|
||||
*
|
||||
* Tested on QEMU 2.10.91 and 2.11.50.
|
||||
*/
|
||||
for (i = 0; i < num_queues; i++) {
|
||||
for (i = 0; i < vdev->max_queues; i++) {
|
||||
if (vdev->virtqueue[i].vring.callfd != -1) {
|
||||
eventfd_write(vdev->virtqueue[i].vring.callfd, (eventfd_t)1);
|
||||
}
|
||||
|
@ -330,7 +330,7 @@ vdev_worker(void *arg)
|
||||
struct spdk_vhost_blk_dev *bvdev = arg;
|
||||
uint16_t q_idx;
|
||||
|
||||
for (q_idx = 0; q_idx < bvdev->vdev.num_queues; q_idx++) {
|
||||
for (q_idx = 0; q_idx < bvdev->vdev.max_queues; q_idx++) {
|
||||
process_vq(bvdev, &bvdev->vdev.virtqueue[q_idx]);
|
||||
}
|
||||
|
||||
@ -363,7 +363,7 @@ no_bdev_vdev_worker(void *arg)
|
||||
struct spdk_vhost_blk_dev *bvdev = arg;
|
||||
uint16_t q_idx;
|
||||
|
||||
for (q_idx = 0; q_idx < bvdev->vdev.num_queues; q_idx++) {
|
||||
for (q_idx = 0; q_idx < bvdev->vdev.max_queues; q_idx++) {
|
||||
no_bdev_process_vq(bvdev, &bvdev->vdev.virtqueue[q_idx]);
|
||||
}
|
||||
|
||||
@ -427,7 +427,7 @@ free_task_pool(struct spdk_vhost_blk_dev *bvdev)
|
||||
struct spdk_vhost_virtqueue *vq;
|
||||
uint16_t i;
|
||||
|
||||
for (i = 0; i < bvdev->vdev.num_queues; i++) {
|
||||
for (i = 0; i < bvdev->vdev.max_queues; i++) {
|
||||
vq = &bvdev->vdev.virtqueue[i];
|
||||
if (vq->tasks == NULL) {
|
||||
continue;
|
||||
@ -447,8 +447,12 @@ alloc_task_pool(struct spdk_vhost_blk_dev *bvdev)
|
||||
uint16_t i;
|
||||
uint32_t j;
|
||||
|
||||
for (i = 0; i < bvdev->vdev.num_queues; i++) {
|
||||
for (i = 0; i < bvdev->vdev.max_queues; i++) {
|
||||
vq = &bvdev->vdev.virtqueue[i];
|
||||
if (vq->vring.desc == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
task_cnt = vq->vring.size;
|
||||
if (task_cnt > SPDK_VHOST_MAX_VQ_SIZE) {
|
||||
/* sanity check */
|
||||
@ -486,7 +490,7 @@ static int
|
||||
spdk_vhost_blk_start(struct spdk_vhost_dev *vdev, void *event_ctx)
|
||||
{
|
||||
struct spdk_vhost_blk_dev *bvdev;
|
||||
int rc = 0;
|
||||
int i, rc = 0;
|
||||
|
||||
bvdev = to_blk_dev(vdev);
|
||||
if (bvdev == NULL) {
|
||||
@ -495,6 +499,15 @@ spdk_vhost_blk_start(struct spdk_vhost_dev *vdev, void *event_ctx)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* validate all I/O queues are in a contiguous index range */
|
||||
for (i = 0; i < vdev->max_queues; i++) {
|
||||
if (vdev->virtqueue[i].vring.desc == NULL) {
|
||||
SPDK_ERRLOG("%s: queue %"PRIu32" is empty\n", vdev->name, i);
|
||||
rc = -1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
rc = alloc_task_pool(bvdev);
|
||||
if (rc != 0) {
|
||||
SPDK_ERRLOG("%s: failed to alloc task pool.\n", bvdev->vdev.name);
|
||||
@ -537,7 +550,7 @@ destroy_device_poller_cb(void *arg)
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < bvdev->vdev.num_queues; i++) {
|
||||
for (i = 0; i < bvdev->vdev.max_queues; i++) {
|
||||
bvdev->vdev.virtqueue[i].next_event_time = 0;
|
||||
spdk_vhost_vq_used_signal(&bvdev->vdev, &bvdev->vdev.virtqueue[i]);
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ struct spdk_vhost_dev {
|
||||
/* Interval used for event coalescing checking. */
|
||||
uint64_t stats_check_interval;
|
||||
|
||||
uint16_t num_queues;
|
||||
uint16_t max_queues;
|
||||
|
||||
uint64_t negotiated_features;
|
||||
|
||||
@ -172,7 +172,7 @@ struct spdk_vhost_dev *spdk_vhost_dev_find(const char *ctrlr_name);
|
||||
void spdk_vhost_dev_mem_register(struct spdk_vhost_dev *vdev);
|
||||
void spdk_vhost_dev_mem_unregister(struct spdk_vhost_dev *vdev);
|
||||
|
||||
void *spdk_vhost_gpa_to_vva(struct spdk_vhost_dev *vdev, uint64_t addr);
|
||||
void *spdk_vhost_gpa_to_vva(struct spdk_vhost_dev *vdev, uint64_t addr, uint64_t len);
|
||||
|
||||
uint16_t spdk_vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *vq, uint16_t *reqs,
|
||||
uint16_t reqs_len);
|
||||
|
@ -185,7 +185,7 @@ eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_
|
||||
goto out;
|
||||
}
|
||||
|
||||
desc_ev = spdk_vhost_gpa_to_vva(&svdev->vdev, desc->addr);
|
||||
desc_ev = spdk_vhost_gpa_to_vva(&svdev->vdev, desc->addr, sizeof(*desc_ev));
|
||||
if (desc_ev == NULL) {
|
||||
SPDK_ERRLOG("Controller %s: Eventq descriptor at index %"PRIu16" points to unmapped guest memory address %p.\n",
|
||||
svdev->vdev.name, req, (void *)(uintptr_t)desc->addr);
|
||||
@ -319,7 +319,7 @@ process_ctrl_request(struct spdk_vhost_scsi_task *task)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ctrl_req = spdk_vhost_gpa_to_vva(vdev, desc->addr);
|
||||
ctrl_req = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*ctrl_req));
|
||||
|
||||
SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI_QUEUE,
|
||||
"Processing controlq descriptor: desc %d/%p, desc_addr %p, len %d, flags %d, last_used_idx %d; kickfd %d; size %d\n",
|
||||
@ -340,7 +340,7 @@ process_ctrl_request(struct spdk_vhost_scsi_task *task)
|
||||
/* Process the TMF request */
|
||||
switch (ctrl_req->type) {
|
||||
case VIRTIO_SCSI_T_TMF:
|
||||
task->tmf_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
|
||||
task->tmf_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->tmf_resp));
|
||||
if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_tmf_resp) || task->tmf_resp == NULL)) {
|
||||
SPDK_ERRLOG("%s: TMF response descriptor at index %d points to invalid guest memory region\n",
|
||||
vdev->name, task->req_idx);
|
||||
@ -369,7 +369,7 @@ process_ctrl_request(struct spdk_vhost_scsi_task *task)
|
||||
break;
|
||||
case VIRTIO_SCSI_T_AN_QUERY:
|
||||
case VIRTIO_SCSI_T_AN_SUBSCRIBE: {
|
||||
an_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
|
||||
an_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*an_resp));
|
||||
if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_an_resp) || an_resp == NULL)) {
|
||||
SPDK_WARNLOG("%s: Asynchronous response descriptor points to invalid guest memory region\n",
|
||||
vdev->name);
|
||||
@ -418,7 +418,7 @@ task_data_setup(struct spdk_vhost_scsi_task *task,
|
||||
goto invalid_task;
|
||||
}
|
||||
|
||||
*req = spdk_vhost_gpa_to_vva(vdev, desc->addr);
|
||||
*req = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(**req));
|
||||
if (spdk_unlikely(*req == NULL)) {
|
||||
SPDK_WARNLOG("%s: Request descriptor at index %d points to invalid guest memory region\n",
|
||||
vdev->name, task->req_idx);
|
||||
@ -440,7 +440,7 @@ task_data_setup(struct spdk_vhost_scsi_task *task,
|
||||
/*
|
||||
* FROM_DEV (READ): [RD_req][WR_resp][WR_buf0]...[WR_bufN]
|
||||
*/
|
||||
task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
|
||||
task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->resp));
|
||||
if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) {
|
||||
SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n",
|
||||
vdev->name, task->req_idx);
|
||||
@ -510,7 +510,7 @@ task_data_setup(struct spdk_vhost_scsi_task *task,
|
||||
}
|
||||
}
|
||||
|
||||
task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr);
|
||||
task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->resp));
|
||||
if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) {
|
||||
SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n",
|
||||
vdev->name, task->req_idx);
|
||||
@ -662,7 +662,7 @@ vdev_worker(void *arg)
|
||||
struct spdk_vhost_scsi_dev *svdev = arg;
|
||||
uint32_t q_idx;
|
||||
|
||||
for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.num_queues; q_idx++) {
|
||||
for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.max_queues; q_idx++) {
|
||||
process_requestq(svdev, &svdev->vdev.virtqueue[q_idx]);
|
||||
}
|
||||
|
||||
@ -990,7 +990,7 @@ free_task_pool(struct spdk_vhost_scsi_dev *svdev)
|
||||
struct spdk_vhost_virtqueue *vq;
|
||||
uint16_t i;
|
||||
|
||||
for (i = 0; i < svdev->vdev.num_queues; i++) {
|
||||
for (i = 0; i < svdev->vdev.max_queues; i++) {
|
||||
vq = &svdev->vdev.virtqueue[i];
|
||||
if (vq->tasks == NULL) {
|
||||
continue;
|
||||
@ -1010,8 +1010,12 @@ alloc_task_pool(struct spdk_vhost_scsi_dev *svdev)
|
||||
uint16_t i;
|
||||
uint32_t j;
|
||||
|
||||
for (i = 0; i < svdev->vdev.num_queues; i++) {
|
||||
for (i = 0; i < svdev->vdev.max_queues; i++) {
|
||||
vq = &svdev->vdev.virtqueue[i];
|
||||
if (vq->vring.desc == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
task_cnt = vq->vring.size;
|
||||
if (task_cnt > SPDK_VHOST_MAX_VQ_SIZE) {
|
||||
/* sanity check */
|
||||
@ -1058,6 +1062,15 @@ spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* validate all I/O queues are in a contiguous index range */
|
||||
for (i = VIRTIO_SCSI_REQUESTQ; i < vdev->max_queues; i++) {
|
||||
if (vdev->virtqueue[i].vring.desc == NULL) {
|
||||
SPDK_ERRLOG("%s: queue %"PRIu32" is empty\n", vdev->name, i);
|
||||
rc = -1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
rc = alloc_task_pool(svdev);
|
||||
if (rc != 0) {
|
||||
SPDK_ERRLOG("%s: failed to alloc task pool.\n", vdev->name);
|
||||
@ -1075,8 +1088,11 @@ spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx)
|
||||
spdk_vhost_dev_mem_register(vdev);
|
||||
|
||||
svdev->requestq_poller = spdk_poller_register(vdev_worker, svdev, 0);
|
||||
if (vdev->virtqueue[VIRTIO_SCSI_CONTROLQ].vring.desc &&
|
||||
vdev->virtqueue[VIRTIO_SCSI_EVENTQ].vring.desc) {
|
||||
svdev->mgmt_poller = spdk_poller_register(vdev_mgmt_worker, svdev,
|
||||
MGMT_POLL_PERIOD_US);
|
||||
}
|
||||
out:
|
||||
spdk_vhost_dev_backend_event_done(event_ctx, rc);
|
||||
return rc;
|
||||
@ -1100,7 +1116,7 @@ destroy_device_poller_cb(void *arg)
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < svdev->vdev.num_queues; i++) {
|
||||
for (i = 0; i < svdev->vdev.max_queues; i++) {
|
||||
spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[i]);
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,7 @@ run_step() {
|
||||
echo "--spdk_cache_size=$CACHE_SIZE" >> "$1"_flags.txt
|
||||
|
||||
echo -n Start $1 test phase...
|
||||
/usr/bin/time taskset 0xFFF $DB_BENCH --flagfile="$1"_flags.txt &> "$1"_db_bench.txt
|
||||
/usr/bin/time taskset 0xFF $DB_BENCH --flagfile="$1"_flags.txt &> "$1"_db_bench.txt
|
||||
echo done.
|
||||
}
|
||||
|
||||
|
@ -140,9 +140,9 @@ run_step() {
|
||||
echo -n Start $1 test phase...
|
||||
if [ "$USE_PERF" = "1" ]
|
||||
then
|
||||
sudo /usr/bin/time taskset 0xFFF perf record $DB_BENCH --flagfile="$1"_flags.txt &> "$1"_db_bench.txt
|
||||
sudo /usr/bin/time taskset 0xFF perf record $DB_BENCH --flagfile="$1"_flags.txt &> "$1"_db_bench.txt
|
||||
else
|
||||
sudo /usr/bin/time taskset 0xFFF $DB_BENCH --flagfile="$1"_flags.txt &> "$1"_db_bench.txt
|
||||
sudo /usr/bin/time taskset 0xFF $DB_BENCH --flagfile="$1"_flags.txt &> "$1"_db_bench.txt
|
||||
fi
|
||||
echo done.
|
||||
|
||||
|
@ -13,5 +13,5 @@ if [ -z "$ISCSI_APP" ]; then
|
||||
fi
|
||||
|
||||
if [ -z "$ISCSI_TEST_CORE_MASK" ]; then
|
||||
ISCSI_TEST_CORE_MASK=0xFFFF
|
||||
ISCSI_TEST_CORE_MASK=0xFF
|
||||
fi
|
||||
|
@ -25,7 +25,7 @@ function nbd_function_test() {
|
||||
local rpc_server=/var/tmp/spdk-nbd.sock
|
||||
local conf=$1
|
||||
local nbd_num=6
|
||||
local nbd_all=(`ls /dev/nbd*`)
|
||||
local nbd_all=(`ls /dev/nbd* | grep -v p`)
|
||||
local bdev_all=($bdevs_name)
|
||||
local nbd_list=(${nbd_all[@]:0:$nbd_num})
|
||||
local bdev_list=(${bdev_all[@]:0:$nbd_num})
|
||||
|
@ -9,7 +9,7 @@ if [ -z "$NVMF_APP" ]; then
|
||||
fi
|
||||
|
||||
if [ -z "$NVMF_TEST_CORE_MASK" ]; then
|
||||
NVMF_TEST_CORE_MASK=0xFFFF
|
||||
NVMF_TEST_CORE_MASK=0xFF
|
||||
fi
|
||||
|
||||
function load_ib_rdma_modules()
|
||||
|
@ -87,7 +87,8 @@ DEFINE_STUB_V(spdk_vhost_call_external_event, (const char *ctrlr_name, spdk_vhos
|
||||
DEFINE_STUB(spdk_vhost_dev_has_feature, bool, (struct spdk_vhost_dev *vdev, unsigned feature_id),
|
||||
false);
|
||||
DEFINE_STUB(spdk_vhost_vring_desc_has_next, bool, (struct vring_desc *cur_desc), false);
|
||||
DEFINE_STUB_VP(spdk_vhost_gpa_to_vva, (struct spdk_vhost_dev *vdev, uint64_t addr), {0});
|
||||
DEFINE_STUB_VP(spdk_vhost_gpa_to_vva, (struct spdk_vhost_dev *vdev, uint64_t addr, uint64_t len),
|
||||
{0});
|
||||
DEFINE_STUB(spdk_scsi_dev_get_id, int, (const struct spdk_scsi_dev *dev), {0});
|
||||
DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0);
|
||||
DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
|
||||
|
Loading…
Reference in New Issue
Block a user