vhost: defer setting up new mem table

First step is do not destroy an existing device in
vhost_user_set_mem_table().  This is because we may
still be processing I/O via INT13 while QEMU is setting
up the mem tables for OS boot.

The primary part of this patch though is to defer
using the new mem table until after we receive the
first SET_VRING_ADDR message.  SET_VRING_ADDR will be
sent by QEMU when guest OS virtio-scsi driver starts
initialization.  At this point it is safe to invalidate
the old mem tables because there will be no more
INT13 I/O at this point.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I45fb5910f45e7fd2cf4a325341ad105a57d8ea40
This commit is contained in:
Jim Harris 2017-03-28 13:30:40 -07:00
parent 7fa7f91ee3
commit f325e71c9d
2 changed files with 37 additions and 8 deletions

View File

@ -43,6 +43,7 @@
#include <rte_log.h>
#include "rte_virtio_net.h"
#include "vhost_user.h"
/* Used to indicate that the device is running on a data core */
#define VIRTIO_DEV_RUNNING 1
@ -171,6 +172,9 @@ struct virtio_net {
uint32_t nr_guest_pages;
uint32_t max_guest_pages;
struct guest_page *guest_pages;
int has_new_mem_table;
struct VhostUserMemory mem_table;
int mem_table_fds[VHOST_MEMORY_MAX_NREGIONS];
} __rte_cache_aligned;
/**

View File

@ -317,6 +317,8 @@ qva_to_vva(struct virtio_net *dev, uint64_t qva)
return 0;
}
static int vhost_setup_mem_table(struct virtio_net *dev);
/*
* The virtio device sends us the desc, used and avail ring addresses.
* This function then converts these to our address space.
@ -326,6 +328,12 @@ vhost_user_set_vring_addr(struct virtio_net *dev, VhostUserMsg *msg)
{
struct vhost_virtqueue *vq;
if (dev->has_new_mem_table) {
vhost_setup_mem_table(dev);
dev->has_new_mem_table = 0;
}
if (dev->mem == NULL)
return -1;
@ -492,7 +500,30 @@ dump_guest_pages(struct virtio_net *dev)
static int
vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
{
struct VhostUserMemory memory = pmsg->payload.memory;
uint32_t i;
if (dev->has_new_mem_table) {
/*
* The previous mem table was not consumed, so close the
* file descriptors from that mem table before copying
* the new one.
*/
for (i = 0; i < dev->mem_table.nregions; i++) {
close(dev->mem_table_fds[i]);
}
}
memcpy(&dev->mem_table, &pmsg->payload.memory, sizeof(dev->mem_table));
memcpy(dev->mem_table_fds, pmsg->fds, sizeof(dev->mem_table_fds));
dev->has_new_mem_table = 1;
return 0;
}
static int
vhost_setup_mem_table(struct virtio_net *dev)
{
struct VhostUserMemory memory = dev->mem_table;
struct virtio_memory_region *reg;
void *mmap_addr;
uint64_t mmap_size;
@ -501,12 +532,6 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
uint32_t i;
int fd;
/* Remove from the data plane. */
if (dev->flags & VIRTIO_DEV_RUNNING) {
dev->flags &= ~VIRTIO_DEV_RUNNING;
notify_ops->destroy_device(dev->vid);
}
if (dev->mem) {
free_mem_region(dev);
rte_free(dev->mem);
@ -531,7 +556,7 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
dev->mem->nregions = memory.nregions;
for (i = 0; i < memory.nregions; i++) {
fd = pmsg->fds[i];
fd = dev->mem_table_fds[i];
reg = &dev->mem->regions[i];
reg->guest_phys_addr = memory.regions[i].guest_phys_addr;