rte_virtio: added virtio_dev struct

Previously virtio_hw was managing both VirtIO PCI and
vhost-user devices. Now's there virtio_dev, a common
part for both backends. virtio_hw is only used for PCI.

Note that this patch does not introduce another
abstraction layer. It only unifies an already existing
one. Previously virtio_user_dev was built on top of
virtio_hw, with most PCI fields just hanging there
unused. Now both, virtio_user_dev and virtio_hw are
built on top of virtio_dev.

Change-Id: Ida25defc0063055a81cf4039c9b85470b9880bc3
Signed-off-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/376966
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Dariusz Stojaczyk 2017-09-01 19:33:53 +02:00 committed by Daniel Verkamp
parent 72ebd59086
commit 09af33b6a4
10 changed files with 268 additions and 238 deletions

View File

@ -68,7 +68,7 @@ struct virtio_scsi_io_ctx {
};
struct virtio_scsi_scan_base {
struct virtio_hw *hw;
struct virtio_dev *vdev;
struct spdk_bdev_poller *scan_poller;
/* Currently queried target */
@ -84,14 +84,14 @@ struct virtio_scsi_scan_base {
struct virtio_scsi_disk {
struct spdk_bdev bdev;
struct virtio_hw *hw;
struct virtio_dev *vdev;
uint64_t num_blocks;
uint32_t block_size;
TAILQ_ENTRY(virtio_scsi_disk) link;
};
struct bdev_virtio_io_channel {
struct virtio_hw *hw;
struct virtio_dev *vdev;
struct spdk_bdev_poller *poller;
};
@ -148,7 +148,7 @@ bdev_virtio_rw(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
to_be16(&req->cdb[7], bdev_io->u.write.num_blocks);
}
virtio_xmit_pkts(disk->hw->vqs[2], vreq);
virtio_xmit_pkts(disk->vdev->vqs[2], vreq);
}
static int _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
@ -199,7 +199,7 @@ bdev_virtio_get_io_channel(void *ctx)
{
struct virtio_scsi_disk *disk = ctx;
return spdk_get_io_channel(&disk->hw);
return spdk_get_io_channel(&disk->vdev);
}
static int
@ -260,7 +260,7 @@ bdev_virtio_poll(void *arg)
struct virtio_req *req[32];
uint16_t i, cnt;
cnt = virtio_recv_pkts(ch->hw->vqs[2], req, SPDK_COUNTOF(req));
cnt = virtio_recv_pkts(ch->vdev->vqs[2], req, SPDK_COUNTOF(req));
for (i = 0; i < cnt; ++i) {
bdev_virtio_io_cpl(req[i]);
}
@ -269,10 +269,10 @@ bdev_virtio_poll(void *arg)
static int
bdev_virtio_create_cb(void *io_device, void *ctx_buf)
{
struct virtio_hw **hw = io_device;
struct virtio_dev **vdev = io_device;
struct bdev_virtio_io_channel *ch = ctx_buf;
ch->hw = *hw;
ch->vdev = *vdev;
spdk_bdev_poller_start(&ch->poller, bdev_virtio_poll, ch,
spdk_env_get_current_core(), 0);
return 0;
@ -301,7 +301,7 @@ scan_target_finish(struct virtio_scsi_scan_base *base)
while ((disk = TAILQ_FIRST(&base->found_disks))) {
TAILQ_REMOVE(&base->found_disks, disk, link);
spdk_io_device_register(&disk->hw, bdev_virtio_create_cb, bdev_virtio_destroy_cb,
spdk_io_device_register(&disk->vdev, bdev_virtio_create_cb, bdev_virtio_destroy_cb,
sizeof(struct bdev_virtio_io_channel));
spdk_bdev_register(&disk->bdev);
}
@ -334,7 +334,7 @@ process_scan_inquiry(struct virtio_scsi_scan_base *base, struct virtio_req *vreq
iov[0].iov_len = 32;
to_be32(&req->cdb[10], iov[0].iov_len);
virtio_xmit_pkts(base->hw->vqs[2], vreq);
virtio_xmit_pkts(base->vdev->vqs[2], vreq);
return 0;
}
@ -360,7 +360,7 @@ process_read_cap(struct virtio_scsi_scan_base *base, struct virtio_req *vreq)
disk->num_blocks = from_be64((uint64_t *)(vreq->iov[0].iov_base)) + 1;
disk->block_size = from_be32((uint32_t *)(vreq->iov[0].iov_base + 8));
disk->hw = base->hw;
disk->vdev = base->vdev;
bdev = &disk->bdev;
bdev->name = spdk_sprintf_alloc("Virtio0");
@ -416,7 +416,7 @@ bdev_scan_poll(void *arg)
struct virtio_req *req;
uint16_t cnt;
cnt = virtio_recv_pkts(base->hw->vqs[2], &req, 1);
cnt = virtio_recv_pkts(base->vdev->vqs[2], &req, 1);
if (cnt > 0) {
process_scan_resp(base, req);
}
@ -456,7 +456,7 @@ scan_target(struct virtio_scsi_scan_base *base)
cdb->opcode = SPDK_SPC_INQUIRY;
cdb->alloc_len[1] = 255;
virtio_xmit_pkts(base->hw->vqs[2], vreq);
virtio_xmit_pkts(base->vdev->vqs[2], vreq);
}
static int
@ -464,7 +464,7 @@ bdev_virtio_initialize(void)
{
struct spdk_conf_section *sp = spdk_conf_find_section(NULL, "Virtio");
struct virtio_scsi_scan_base *base;
struct virtio_hw *hw = NULL;
struct virtio_dev *vdev = NULL;
char *type, *path;
uint32_t i;
int rc = 0;
@ -485,16 +485,16 @@ bdev_virtio_initialize(void)
SPDK_ERRLOG("No path specified for index %d\n", i);
continue;
}
hw = virtio_user_dev_init(path, 1, 512);
vdev = virtio_user_dev_init(path, 1, 512);
} else if (!strcmp("Pci", type)) {
hw = get_pci_virtio_hw();
vdev = get_pci_virtio_hw();
} else {
SPDK_ERRLOG("Invalid type %s specified for index %d\n", type, i);
continue;
}
}
if (hw == NULL) {
if (vdev == NULL) {
goto out;
}
@ -506,10 +506,10 @@ bdev_virtio_initialize(void)
}
/* TODO check rc, add virtio_dev_deinit() */
virtio_init_device(hw, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
virtio_dev_start(hw);
virtio_init_device(vdev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
virtio_dev_start(vdev);
base->hw = hw;
base->vdev = vdev;
TAILQ_INIT(&base->found_disks);
spdk_bdev_poller_start(&base->scan_poller, bdev_scan_poll, base,

View File

@ -67,9 +67,9 @@ static const struct rte_pci_id pci_id_virtio_map[] = {
};
static uint16_t
virtio_get_nr_vq(struct virtio_hw *hw)
virtio_get_nr_vq(struct virtio_dev *dev)
{
return hw->max_queues;
return dev->max_queues;
}
static void
@ -102,7 +102,7 @@ virtio_init_vring(struct virtqueue *vq)
}
static int
virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
virtio_init_queue(struct virtio_dev *dev, uint16_t vtpci_queue_idx)
{
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
const struct rte_memzone *mz = NULL;
@ -116,7 +116,7 @@ virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
* Read the virtqueue size from the Queue Size field
* Always power of 2 and if 0 virtqueue does not exist
*/
vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
vq_size = VTPCI_OPS(dev)->get_queue_num(dev, vtpci_queue_idx);
PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
if (vq_size == 0) {
PMD_INIT_LOG(ERR, "virtqueue does not exist");
@ -129,7 +129,7 @@ virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
}
snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
hw->port_id, vtpci_queue_idx);
dev->port_id, vtpci_queue_idx);
size = RTE_ALIGN_CEIL(sizeof(*vq) +
vq_size * sizeof(struct vq_desc_extra),
@ -141,9 +141,9 @@ virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
PMD_INIT_LOG(ERR, "can not allocate vq");
return -ENOMEM;
}
hw->vqs[vtpci_queue_idx] = vq;
dev->vqs[vtpci_queue_idx] = vq;
vq->hw = hw;
vq->vdev = dev;
vq->vq_queue_index = vtpci_queue_idx;
vq->vq_nentries = vq_size;
@ -180,7 +180,7 @@ virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
vq->mz = mz;
if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
if (VTPCI_OPS(dev)->setup_queue(dev, vq) < 0) {
PMD_INIT_LOG(ERR, "setup_queue failed");
return -EINVAL;
}
@ -195,47 +195,47 @@ fail_q_alloc:
}
static void
virtio_free_queues(struct virtio_hw *hw)
virtio_free_queues(struct virtio_dev *dev)
{
uint16_t nr_vq = virtio_get_nr_vq(hw);
uint16_t nr_vq = virtio_get_nr_vq(dev);
struct virtqueue *vq;
uint16_t i;
if (hw->vqs == NULL)
if (dev->vqs == NULL)
return;
for (i = 0; i < nr_vq; i++) {
vq = hw->vqs[i];
vq = dev->vqs[i];
if (!vq)
continue;
rte_memzone_free(vq->mz);
rte_free(vq);
hw->vqs[i] = NULL;
dev->vqs[i] = NULL;
}
rte_free(hw->vqs);
hw->vqs = NULL;
rte_free(dev->vqs);
dev->vqs = NULL;
}
static int
virtio_alloc_queues(struct virtio_hw *hw)
virtio_alloc_queues(struct virtio_dev *dev)
{
uint16_t nr_vq = virtio_get_nr_vq(hw);
uint16_t nr_vq = virtio_get_nr_vq(dev);
uint16_t i;
int ret;
hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
if (!hw->vqs) {
dev->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
if (!dev->vqs) {
PMD_INIT_LOG(ERR, "failed to allocate vqs");
return -ENOMEM;
}
for (i = 0; i < nr_vq; i++) {
ret = virtio_init_queue(hw, i);
ret = virtio_init_queue(dev, i);
if (ret < 0) {
virtio_free_queues(hw);
virtio_free_queues(dev);
return ret;
}
}
@ -244,7 +244,7 @@ virtio_alloc_queues(struct virtio_hw *hw)
}
static int
virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
virtio_negotiate_features(struct virtio_dev *dev, uint64_t req_features)
{
uint64_t host_features;
@ -253,7 +253,7 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
req_features);
/* Read device(host) feature bits */
host_features = VTPCI_OPS(hw)->get_features(hw);
host_features = VTPCI_OPS(dev)->get_features(dev);
PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
host_features);
@ -261,66 +261,66 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
* Negotiate features: Subset of device feature bits are written back
* guest feature bits.
*/
hw->guest_features = req_features;
hw->guest_features = vtpci_negotiate_features(hw, host_features);
dev->guest_features = req_features;
dev->guest_features = vtpci_negotiate_features(dev, host_features);
PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
hw->guest_features);
dev->guest_features);
if (hw->modern) {
if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
if (dev->modern) {
if (!vtpci_with_feature(dev, VIRTIO_F_VERSION_1)) {
PMD_INIT_LOG(ERR,
"VIRTIO_F_VERSION_1 features is not enabled.");
return -1;
}
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FEATURES_OK);
if (!(vtpci_get_status(dev) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
PMD_INIT_LOG(ERR,
"failed to set FEATURES_OK status!");
return -1;
}
}
hw->req_guest_features = req_features;
dev->req_guest_features = req_features;
return 0;
}
/* reset device and renegotiate features if needed */
int
virtio_init_device(struct virtio_hw *hw, uint64_t req_features)
virtio_init_device(struct virtio_dev *dev, uint64_t req_features)
{
int ret;
/* Reset the device although not necessary at startup */
vtpci_reset(hw);
vtpci_reset(dev);
/* Tell the host we've noticed this device. */
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
/* Tell the host we've known how to drive the device. */
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
if (virtio_negotiate_features(hw, req_features) < 0)
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
if (virtio_negotiate_features(dev, req_features) < 0)
return -1;
vtpci_read_dev_config(hw, offsetof(struct virtio_scsi_config, num_queues),
&hw->max_queues, sizeof(hw->max_queues));
vtpci_read_dev_config(dev, offsetof(struct virtio_scsi_config, num_queues),
&dev->max_queues, sizeof(dev->max_queues));
/* FIXME
* Hardcode num_queues to 3 until we add proper
* mutli-queue support. This value should be limited
* by number of cores assigned to SPDK
*/
hw->max_queues = 3;
dev->max_queues = 3;
ret = virtio_alloc_queues(hw);
ret = virtio_alloc_queues(dev);
if (ret < 0)
return ret;
vtpci_reinit_complete(hw);
vtpci_reinit_complete(dev);
return 0;
}
int
virtio_dev_start(struct virtio_hw *hw)
virtio_dev_start(struct virtio_dev *vdev)
{
struct virtnet_tx *txvq __rte_unused;
@ -343,14 +343,14 @@ virtio_dev_start(struct virtio_hw *hw)
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
hw->started = 1;
vdev->started = 1;
return 0;
}
static struct virtio_hw *g_pci_hw = NULL;
struct virtio_hw *
struct virtio_dev *
get_pci_virtio_hw(void)
{
int ret;
@ -361,11 +361,11 @@ get_pci_virtio_hw(void)
return NULL;
}
ret = vtpci_init(g_pci_hw->pci_dev, g_pci_hw);
ret = vtpci_init(g_pci_hw->pci_dev, &g_pci_hw->vdev);
if (ret)
return NULL;
return g_pci_hw;
return &g_pci_hw->vdev;
}
static int virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
@ -374,6 +374,7 @@ static int virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct virtio_hw *hw;
hw = calloc(1, sizeof(*hw));
hw->vdev.is_hw = 1;
hw->pci_dev = pci_dev;
g_pci_hw = hw;

View File

@ -37,12 +37,21 @@
#include <stdint.h>
#include <sys/uio.h>
#include "virtio_pci.h"
#define VIRTIO_MAX_RX_QUEUES 128U
#define VIRTIO_MAX_TX_QUEUES 128U
#define VIRTIO_MIN_RX_BUFSIZE 64
struct virtio_dev {
struct virtqueue **vqs;
uint16_t started;
uint32_t max_queues;
uint8_t port_id;
uint64_t req_guest_features;
uint64_t guest_features;
int is_hw;
uint8_t modern;
};
struct virtio_req {
struct iovec *iov;
struct iovec iov_req;
@ -66,9 +75,9 @@ uint16_t virtio_recv_pkts(struct virtqueue *vq, struct virtio_req **reqs,
uint16_t virtio_xmit_pkts(struct virtqueue *vq, struct virtio_req *req);
int virtio_init_device(struct virtio_hw *hw, uint64_t req_features);
int virtio_dev_start(struct virtio_hw *hw);
struct virtio_hw *get_pci_virtio_hw(void);
int virtio_init_device(struct virtio_dev *hw, uint64_t req_features);
int virtio_dev_start(struct virtio_dev *hw);
struct virtio_dev *get_pci_virtio_hw(void);
void virtio_interrupt_handler(void *param);

View File

@ -43,7 +43,7 @@
#include "virtio_logs.h"
#include "virtio_queue.h"
struct virtio_hw_internal virtio_hw_internal[128];
struct vtpci_internal virtio_hw_internal[128];
/*
* Following macros are derived from linux/pci_regs.h, however,
@ -60,6 +60,9 @@ struct virtio_hw_internal virtio_hw_internal[128];
*/
#define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20)
#define virtio_dev_get_hw(hw) \
((struct virtio_hw *)((uintptr_t)(hw) - offsetof(struct virtio_hw, vdev)))
static inline int
check_vq_phys_addr_ok(struct virtqueue *vq)
{
@ -88,27 +91,28 @@ check_vq_phys_addr_ok(struct virtqueue *vq)
* enforces this for the virtio-net stuff.
*/
static void
legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
legacy_read_dev_config(struct virtio_dev *dev, size_t offset,
void *dst, int length)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
#ifdef RTE_ARCH_PPC_64
int size;
while (length > 0) {
if (length >= 4) {
size = 4;
rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
VIRTIO_PCI_CONFIG(hw) + offset);
rte_pci_ioport_read(VTPCI_IO(dev), dst, size,
VIRTIO_PCI_CONFIG(dev) + offset);
*(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
} else if (length >= 2) {
size = 2;
rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
VIRTIO_PCI_CONFIG(hw) + offset);
rte_pci_ioport_read(VTPCI_IO(dev), dst, size,
VIRTIO_PCI_CONFIG(dev) + offset);
*(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
} else {
size = 1;
rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
VIRTIO_PCI_CONFIG(hw) + offset);
rte_pci_ioport_read(VTPCI_IO(dev), dst, size,
VIRTIO_PCI_CONFIG(dev) + offset);
}
dst = (char *)dst + size;
@ -116,15 +120,16 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
length -= size;
}
#else
rte_pci_ioport_read(VTPCI_IO(hw), dst, length,
rte_pci_ioport_read(VTPCI_IO(dev), dst, length,
VIRTIO_PCI_CONFIG(hw) + offset);
#endif
}
static void
legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
legacy_write_dev_config(struct virtio_dev *dev, size_t offset,
const void *src, int length)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
#ifdef RTE_ARCH_PPC_64
union {
uint32_t u32;
@ -154,125 +159,125 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
length -= size;
}
#else
rte_pci_ioport_write(VTPCI_IO(hw), src, length,
rte_pci_ioport_write(VTPCI_IO(dev), src, length,
VIRTIO_PCI_CONFIG(hw) + offset);
#endif
}
static uint64_t
legacy_get_features(struct virtio_hw *hw)
legacy_get_features(struct virtio_dev *dev)
{
uint32_t dst;
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES);
rte_pci_ioport_read(VTPCI_IO(dev), &dst, 4, VIRTIO_PCI_HOST_FEATURES);
return dst;
}
static void
legacy_set_features(struct virtio_hw *hw, uint64_t features)
legacy_set_features(struct virtio_dev *dev, uint64_t features)
{
if ((features >> 32) != 0) {
PMD_DRV_LOG(ERR,
"only 32 bit features are allowed for legacy virtio!");
return;
}
rte_pci_ioport_write(VTPCI_IO(hw), &features, 4,
rte_pci_ioport_write(VTPCI_IO(dev), &features, 4,
VIRTIO_PCI_GUEST_FEATURES);
}
static uint8_t
legacy_get_status(struct virtio_hw *hw)
legacy_get_status(struct virtio_dev *dev)
{
uint8_t dst;
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
rte_pci_ioport_read(VTPCI_IO(dev), &dst, 1, VIRTIO_PCI_STATUS);
return dst;
}
static void
legacy_set_status(struct virtio_hw *hw, uint8_t status)
legacy_set_status(struct virtio_dev *dev, uint8_t status)
{
rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
rte_pci_ioport_write(VTPCI_IO(dev), &status, 1, VIRTIO_PCI_STATUS);
}
static void
legacy_reset(struct virtio_hw *hw)
legacy_reset(struct virtio_dev *dev)
{
legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
legacy_set_status(dev, VIRTIO_CONFIG_STATUS_RESET);
}
static uint8_t
legacy_get_isr(struct virtio_hw *hw)
legacy_get_isr(struct virtio_dev *dev)
{
uint8_t dst;
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
rte_pci_ioport_read(VTPCI_IO(dev), &dst, 1, VIRTIO_PCI_ISR);
return dst;
}
/* Enable one vector (0) for Link State Intrerrupt */
static uint16_t
legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
legacy_set_config_irq(struct virtio_dev *dev, uint16_t vec)
{
uint16_t dst;
rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
rte_pci_ioport_write(VTPCI_IO(dev), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
rte_pci_ioport_read(VTPCI_IO(dev), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
return dst;
}
static uint16_t
legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
legacy_set_queue_irq(struct virtio_dev *dev, struct virtqueue *vq, uint16_t vec)
{
uint16_t dst;
rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
rte_pci_ioport_write(VTPCI_IO(dev), &vq->vq_queue_index, 2,
VIRTIO_PCI_QUEUE_SEL);
rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
rte_pci_ioport_write(VTPCI_IO(dev), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
rte_pci_ioport_read(VTPCI_IO(dev), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
return dst;
}
static uint16_t
legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
legacy_get_queue_num(struct virtio_dev *dev, uint16_t queue_id)
{
uint16_t dst;
rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
rte_pci_ioport_write(VTPCI_IO(dev), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
rte_pci_ioport_read(VTPCI_IO(dev), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
return dst;
}
static int
legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
legacy_setup_queue(struct virtio_dev *dev, struct virtqueue *vq)
{
uint32_t src;
if (!check_vq_phys_addr_ok(vq))
return -1;
rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
rte_pci_ioport_write(VTPCI_IO(dev), &vq->vq_queue_index, 2,
VIRTIO_PCI_QUEUE_SEL);
src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
rte_pci_ioport_write(VTPCI_IO(dev), &src, 4, VIRTIO_PCI_QUEUE_PFN);
return 0;
}
static void
legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
legacy_del_queue(struct virtio_dev *dev, struct virtqueue *vq)
{
uint32_t src = 0;
rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
rte_pci_ioport_write(VTPCI_IO(dev), &vq->vq_queue_index, 2,
VIRTIO_PCI_QUEUE_SEL);
rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
rte_pci_ioport_write(VTPCI_IO(dev), &src, 4, VIRTIO_PCI_QUEUE_PFN);
}
static void
legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
legacy_notify_queue(struct virtio_dev *dev, struct virtqueue *vq)
{
rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
rte_pci_ioport_write(VTPCI_IO(dev), &vq->vq_queue_index, 2,
VIRTIO_PCI_QUEUE_NOTIFY);
}
@ -301,9 +306,10 @@ io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
}
static void
modern_read_dev_config(struct virtio_hw *hw, size_t offset,
modern_read_dev_config(struct virtio_dev *dev, size_t offset,
void *dst, int length)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
int i;
uint8_t *p;
uint8_t old_gen, new_gen;
@ -320,9 +326,10 @@ modern_read_dev_config(struct virtio_hw *hw, size_t offset,
}
static void
modern_write_dev_config(struct virtio_hw *hw, size_t offset,
modern_write_dev_config(struct virtio_dev *dev, size_t offset,
const void *src, int length)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
int i;
const uint8_t *p = src;
@ -331,8 +338,9 @@ modern_write_dev_config(struct virtio_hw *hw, size_t offset,
}
static uint64_t
modern_get_features(struct virtio_hw *hw)
modern_get_features(struct virtio_dev *dev)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
uint32_t features_lo, features_hi;
rte_write32(0, &hw->common_cfg->device_feature_select);
@ -345,8 +353,10 @@ modern_get_features(struct virtio_hw *hw)
}
static void
modern_set_features(struct virtio_hw *hw, uint64_t features)
modern_set_features(struct virtio_dev *dev, uint64_t features)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
rte_write32(0, &hw->common_cfg->guest_feature_select);
rte_write32(features & ((1ULL << 32) - 1),
&hw->common_cfg->guest_feature);
@ -357,55 +367,68 @@ modern_set_features(struct virtio_hw *hw, uint64_t features)
}
static uint8_t
modern_get_status(struct virtio_hw *hw)
modern_get_status(struct virtio_dev *dev)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
return rte_read8(&hw->common_cfg->device_status);
}
static void
modern_set_status(struct virtio_hw *hw, uint8_t status)
modern_set_status(struct virtio_dev *dev, uint8_t status)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
rte_write8(status, &hw->common_cfg->device_status);
}
static void
modern_reset(struct virtio_hw *hw)
modern_reset(struct virtio_dev *dev)
{
modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
modern_get_status(hw);
modern_set_status(dev, VIRTIO_CONFIG_STATUS_RESET);
modern_get_status(dev);
}
static uint8_t
modern_get_isr(struct virtio_hw *hw)
modern_get_isr(struct virtio_dev *dev)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
return rte_read8(hw->isr);
}
static uint16_t
modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
modern_set_config_irq(struct virtio_dev *dev, uint16_t vec)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
rte_write16(vec, &hw->common_cfg->msix_config);
return rte_read16(&hw->common_cfg->msix_config);
}
static uint16_t
modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
modern_set_queue_irq(struct virtio_dev *dev, struct virtqueue *vq, uint16_t vec)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
rte_write16(vec, &hw->common_cfg->queue_msix_vector);
return rte_read16(&hw->common_cfg->queue_msix_vector);
}
static uint16_t
modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
modern_get_queue_num(struct virtio_dev *dev, uint16_t queue_id)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
rte_write16(queue_id, &hw->common_cfg->queue_select);
return rte_read16(&hw->common_cfg->queue_size);
}
static int
modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
modern_setup_queue(struct virtio_dev *dev, struct virtqueue *vq)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
uint64_t desc_addr, avail_addr, used_addr;
uint16_t notify_off;
@ -444,8 +467,10 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
}
static void
modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
modern_del_queue(struct virtio_dev *dev, struct virtqueue *vq)
{
struct virtio_hw *hw = virtio_dev_get_hw(dev);
rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
@ -459,7 +484,7 @@ modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
}
static void
modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
modern_notify_queue(struct virtio_dev *dev __rte_unused, struct virtqueue *vq)
{
rte_write16(vq->vq_queue_index, vq->notify_addr);
}
@ -483,21 +508,21 @@ const struct virtio_pci_ops modern_ops = {
void
vtpci_read_dev_config(struct virtio_hw *hw, size_t offset,
vtpci_read_dev_config(struct virtio_dev *dev, size_t offset,
void *dst, int length)
{
VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
VTPCI_OPS(dev)->read_dev_cfg(dev, offset, dst, length);
}
void
vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
vtpci_write_dev_config(struct virtio_dev *dev, size_t offset,
const void *src, int length)
{
VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
VTPCI_OPS(dev)->write_dev_cfg(dev, offset, src, length);
}
uint64_t
vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
vtpci_negotiate_features(struct virtio_dev *dev, uint64_t host_features)
{
uint64_t features;
@ -505,45 +530,45 @@ vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
* Limit negotiated features to what the driver, virtqueue, and
* host all support.
*/
features = host_features & hw->guest_features;
VTPCI_OPS(hw)->set_features(hw, features);
features = host_features & dev->guest_features;
VTPCI_OPS(dev)->set_features(dev, features);
return features;
}
void
vtpci_reset(struct virtio_hw *hw)
vtpci_reset(struct virtio_dev *dev)
{
VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
VTPCI_OPS(dev)->set_status(dev, VIRTIO_CONFIG_STATUS_RESET);
/* flush status write */
VTPCI_OPS(hw)->get_status(hw);
VTPCI_OPS(dev)->get_status(dev);
}
void
vtpci_reinit_complete(struct virtio_hw *hw)
vtpci_reinit_complete(struct virtio_dev *dev)
{
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
}
void
vtpci_set_status(struct virtio_hw *hw, uint8_t status)
vtpci_set_status(struct virtio_dev *dev, uint8_t status)
{
if (status != VIRTIO_CONFIG_STATUS_RESET)
status |= VTPCI_OPS(hw)->get_status(hw);
status |= VTPCI_OPS(dev)->get_status(dev);
VTPCI_OPS(hw)->set_status(hw, status);
VTPCI_OPS(dev)->set_status(dev, status);
}
uint8_t
vtpci_get_status(struct virtio_hw *hw)
vtpci_get_status(struct virtio_dev *dev)
{
return VTPCI_OPS(hw)->get_status(hw);
return VTPCI_OPS(dev)->get_status(dev);
}
uint8_t
vtpci_isr(struct virtio_hw *hw)
vtpci_isr(struct virtio_dev *dev)
{
return VTPCI_OPS(hw)->get_isr(hw);
return VTPCI_OPS(dev)->get_isr(dev);
}
static void *
@ -668,8 +693,10 @@ next:
* Return 0 on success.
*/
int
vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
vtpci_init(struct rte_pci_device *dev, struct virtio_dev *vdev)
{
struct virtio_hw *hw = virtio_dev_get_hw(vdev);
/*
* Try if we can succeed reading virtio pci caps, which exists
* only on modern pci device. If failed, we fallback to legacy
@ -677,8 +704,8 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
*/
if (virtio_read_caps(dev, hw) == 0) {
PMD_INIT_LOG(INFO, "modern virtio pci detected.");
virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops;
hw->modern = 1;
virtio_hw_internal[vdev->port_id].vtpci_ops = &modern_ops;
vdev->modern = 1;
return 0;
}
@ -697,8 +724,8 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
}
#endif
virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops;
hw->modern = 0;
virtio_hw_internal[vdev->port_id].vtpci_ops = &legacy_ops;
vdev->modern = 0;
return 0;
}

View File

@ -38,6 +38,8 @@
#include <rte_pci.h>
#include "virtio_dev.h"
struct virtqueue;
/* VirtIO PCI vendor/device ID. */
@ -183,68 +185,57 @@ struct virtio_pci_common_cfg {
uint32_t queue_used_hi; /* read-write */
};
struct virtio_hw;
struct virtio_pci_ops {
void (*read_dev_cfg)(struct virtio_hw *hw, size_t offset,
void (*read_dev_cfg)(struct virtio_dev *hw, size_t offset,
void *dst, int len);
void (*write_dev_cfg)(struct virtio_hw *hw, size_t offset,
void (*write_dev_cfg)(struct virtio_dev *hw, size_t offset,
const void *src, int len);
void (*reset)(struct virtio_hw *hw);
void (*reset)(struct virtio_dev *hw);
uint8_t (*get_status)(struct virtio_hw *hw);
void (*set_status)(struct virtio_hw *hw, uint8_t status);
uint8_t (*get_status)(struct virtio_dev *hw);
void (*set_status)(struct virtio_dev *hw, uint8_t status);
uint64_t (*get_features)(struct virtio_hw *hw);
void (*set_features)(struct virtio_hw *hw, uint64_t features);
uint64_t (*get_features)(struct virtio_dev *hw);
void (*set_features)(struct virtio_dev *hw, uint64_t features);
uint8_t (*get_isr)(struct virtio_hw *hw);
uint8_t (*get_isr)(struct virtio_dev *hw);
uint16_t (*set_config_irq)(struct virtio_hw *hw, uint16_t vec);
uint16_t (*set_config_irq)(struct virtio_dev *hw, uint16_t vec);
uint16_t (*set_queue_irq)(struct virtio_hw *hw, struct virtqueue *vq,
uint16_t (*set_queue_irq)(struct virtio_dev *hw, struct virtqueue *vq,
uint16_t vec);
uint16_t (*get_queue_num)(struct virtio_hw *hw, uint16_t queue_id);
int (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq);
void (*del_queue)(struct virtio_hw *hw, struct virtqueue *vq);
void (*notify_queue)(struct virtio_hw *hw, struct virtqueue *vq);
uint16_t (*get_queue_num)(struct virtio_dev *hw, uint16_t queue_id);
int (*setup_queue)(struct virtio_dev *hw, struct virtqueue *vq);
void (*del_queue)(struct virtio_dev *hw, struct virtqueue *vq);
void (*notify_queue)(struct virtio_dev *hw, struct virtqueue *vq);
};
struct virtio_hw {
uint64_t req_guest_features;
uint64_t guest_features;
uint32_t max_queues;
uint16_t started;
struct virtio_dev vdev;
uint8_t use_msix;
uint8_t modern;
uint8_t port_id;
uint32_t notify_off_multiplier;
uint8_t *isr;
uint16_t *notify_base;
struct virtio_pci_common_cfg *common_cfg;
struct rte_pci_device *pci_dev;
struct virtio_scsi_config *dev_cfg;
void *virtio_user_dev;
struct virtqueue **vqs;
};
/*
* While virtio_hw is stored in shared memory, this structure stores
* some infos that may vary in the multiple process model locally.
* For example, the vtpci_ops pointer.
*/
struct virtio_hw_internal {
struct vtpci_internal {
const struct virtio_pci_ops *vtpci_ops;
struct rte_pci_ioport io;
};
#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->port_id].vtpci_ops)
#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->port_id].io)
#define VTPCI_OPS(dev) (virtio_hw_internal[(dev)->port_id].vtpci_ops)
#define VTPCI_IO(dev) (&virtio_hw_internal[(dev)->port_id].io)
extern struct virtio_hw_internal virtio_hw_internal[128];
extern struct vtpci_internal virtio_hw_internal[128];
/*
* How many bits to shift physical queue address written to QUEUE_PFN.
@ -256,29 +247,29 @@ extern struct virtio_hw_internal virtio_hw_internal[128];
#define VIRTIO_PCI_VRING_ALIGN 4096
static inline int
vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
vtpci_with_feature(struct virtio_dev *dev, uint64_t bit)
{
return (hw->guest_features & (1ULL << bit)) != 0;
return (dev->guest_features & (1ULL << bit)) != 0;
}
/*
* Function declaration from virtio_pci.c
*/
int vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw);
void vtpci_reset(struct virtio_hw *);
int vtpci_init(struct rte_pci_device *dev, struct virtio_dev *vdev);
void vtpci_reset(struct virtio_dev *);
void vtpci_reinit_complete(struct virtio_hw *);
void vtpci_reinit_complete(struct virtio_dev *);
uint8_t vtpci_get_status(struct virtio_hw *);
void vtpci_set_status(struct virtio_hw *, uint8_t);
uint8_t vtpci_get_status(struct virtio_dev *);
void vtpci_set_status(struct virtio_dev *, uint8_t);
uint64_t vtpci_negotiate_features(struct virtio_hw *, uint64_t);
uint64_t vtpci_negotiate_features(struct virtio_dev *, uint64_t);
void vtpci_write_dev_config(struct virtio_hw *, size_t, const void *, int);
void vtpci_write_dev_config(struct virtio_dev *, size_t, const void *, int);
void vtpci_read_dev_config(struct virtio_hw *, size_t, void *, int);
void vtpci_read_dev_config(struct virtio_dev *, size_t, void *, int);
uint8_t vtpci_isr(struct virtio_hw *);
uint8_t vtpci_isr(struct virtio_dev *);
extern const struct virtio_pci_ops virtio_user_ops;

View File

@ -119,7 +119,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct virtio_req **rx_pkts,
static inline void
virtqueue_iov_to_desc(struct virtqueue *vq, uint16_t desc_idx, struct iovec *iov)
{
if (vq->hw->virtio_user_dev) {
if (!vq->vdev->is_hw) {
vq->vq_ring.desc[desc_idx].addr = (uintptr_t)iov->iov_base;
} else {
vq->vq_ring.desc[desc_idx].addr = spdk_vtophys(iov->iov_base);
@ -193,7 +193,7 @@ virtqueue_enqueue_xmit(struct virtqueue *vq, struct virtio_req *req)
uint16_t
virtio_recv_pkts(struct virtqueue *vq, struct virtio_req **reqs, uint16_t nb_pkts)
{
struct virtio_hw *hw = vq->hw;
struct virtio_dev *vdev = vq->vdev;
struct virtio_req *rxm;
uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
@ -201,7 +201,7 @@ virtio_recv_pkts(struct virtqueue *vq, struct virtio_req **reqs, uint16_t nb_pkt
uint32_t i;
nb_rx = 0;
if (unlikely(hw->started == 0))
if (unlikely(vdev->started == 0))
return nb_rx;
nb_used = VIRTQUEUE_NUSED(vq);
@ -232,9 +232,9 @@ virtio_recv_pkts(struct virtqueue *vq, struct virtio_req **reqs, uint16_t nb_pkt
uint16_t
virtio_xmit_pkts(struct virtqueue *vq, struct virtio_req *req)
{
struct virtio_hw *hw = vq->hw;
struct virtio_dev *vdev = vq->vdev;
if (unlikely(hw->started == 0))
if (unlikely(vdev->started == 0))
return 0;
virtio_rmb();

View File

@ -42,7 +42,7 @@
#include <rte_memzone.h>
#include <rte_mempool.h>
#include "virtio_pci.h"
#include "virtio_dev.h"
#include "virtio_logs.h"
/*
@ -72,7 +72,7 @@ struct vq_desc_extra {
};
struct virtqueue {
struct virtio_hw *hw; /**< virtio_hw structure pointer. */
struct virtio_dev *vdev; /**< owner of this virtqueue */
struct vring vq_ring; /**< vring keeping desc, used and avail */
/**
* Last consumed descriptor in the used table,
@ -175,7 +175,7 @@ virtqueue_notify(struct virtqueue *vq)
* For virtio on IA, the notificaiton is through io port operation
* which is a serialization instruction itself.
*/
VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
VTPCI_OPS(vq->vdev)->notify_queue(vq->vdev, vq);
}
#endif /* _VIRTQUEUE_H_ */

View File

@ -50,74 +50,74 @@
#include "virtio_queue.h"
#include "virtio_user/virtio_user_dev.h"
#define virtio_user_get_dev(hw) \
((struct virtio_user_dev *)(hw)->virtio_user_dev)
#define virtio_dev_get_user_dev(dev) \
((struct virtio_user_dev *)((uintptr_t)(dev) - offsetof(struct virtio_user_dev, vdev)))
static void
virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
virtio_user_read_dev_config(struct virtio_dev *vdev, size_t offset,
void *dst, int length)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
if (offset == offsetof(struct virtio_scsi_config, num_queues))
*(uint16_t *)dst = dev->max_queues;
}
static void
virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
virtio_user_write_dev_config(struct virtio_dev *vdev, size_t offset,
const void *src, int length)
{
PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d", offset, length);
}
static void
virtio_user_reset(struct virtio_hw *hw)
virtio_user_reset(struct virtio_dev *vdev)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
virtio_user_stop_device(dev);
}
static void
virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
virtio_user_set_status(struct virtio_dev *vdev, uint8_t status)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
virtio_user_start_device(dev);
else if (status == VIRTIO_CONFIG_STATUS_RESET)
virtio_user_reset(hw);
virtio_user_reset(vdev);
dev->status = status;
}
static uint8_t
virtio_user_get_status(struct virtio_hw *hw)
virtio_user_get_status(struct virtio_dev *vdev)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
return dev->status;
}
static uint64_t
virtio_user_get_features(struct virtio_hw *hw)
virtio_user_get_features(struct virtio_dev *vdev)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
/* unmask feature bits defined in vhost user protocol */
return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES;
}
static void
virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
virtio_user_set_features(struct virtio_dev *vdev, uint64_t features)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
dev->features = features & dev->device_features;
}
static uint8_t
virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
virtio_user_get_isr(struct virtio_dev *vdev __rte_unused)
{
/* rxq interrupts and config interrupt are separated in virtio-user,
* here we only report config change.
@ -126,14 +126,14 @@ virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
}
static uint16_t
virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused,
virtio_user_set_config_irq(struct virtio_dev *vdev __rte_unused,
uint16_t vec __rte_unused)
{
return 0;
}
static uint16_t
virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused,
virtio_user_set_queue_irq(struct virtio_dev *vdev __rte_unused,
struct virtqueue *vq __rte_unused,
uint16_t vec)
{
@ -146,18 +146,18 @@ virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused,
* max supported queues.
*/
static uint16_t
virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
virtio_user_get_queue_num(struct virtio_dev *vdev, uint16_t queue_id __rte_unused)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
/* Currently, each queue has same queue size */
return dev->queue_size;
}
static int
virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
virtio_user_setup_queue(struct virtio_dev *vdev, struct virtqueue *vq)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
uint16_t queue_idx = vq->vq_queue_index;
uint64_t desc_addr, avail_addr, used_addr;
@ -176,7 +176,7 @@ virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
}
static void
virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
virtio_user_del_queue(struct virtio_dev *vdev, struct virtqueue *vq)
{
/* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
* correspondingly stops the ioeventfds, and reset the status of
@ -187,17 +187,17 @@ virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
* Here we just care about what information to deliver to vhost-user
* or vhost-kernel. So we just close ioeventfd for now.
*/
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
close(dev->callfds[vq->vq_queue_index]);
close(dev->kickfds[vq->vq_queue_index]);
}
static void
virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
virtio_user_notify_queue(struct virtio_dev *vdev, struct virtqueue *vq)
{
uint64_t buf = 1;
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
PMD_DRV_LOG(ERR, "failed to kick backend: %s",

View File

@ -233,18 +233,18 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
(1ULL << VIRTIO_SCSI_F_INOUT | \
1ULL << VIRTIO_F_VERSION_1)
struct virtio_hw *
struct virtio_dev *
virtio_user_dev_init(char *path, int queues, int queue_size)
{
struct virtio_hw *hw;
struct virtio_dev *vdev;
struct virtio_user_dev *dev;
uint64_t max_queues;
hw = calloc(1, sizeof(*hw));
dev = calloc(1, sizeof(struct virtio_user_dev));
hw->virtio_user_dev = dev;
dev = calloc(1, sizeof(*dev));
vdev = &dev->vdev;
vdev->is_hw = 0;
virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops;
virtio_hw_internal[0].vtpci_ops = &virtio_user_ops;
snprintf(dev->path, PATH_MAX, "%s", path);
/* Account for control and event queue. */
@ -279,10 +279,9 @@ virtio_user_dev_init(char *path, int queues, int queue_size)
dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
return hw;
return vdev;
err:
free(hw);
free(dev);
return NULL;
}

View File

@ -37,12 +37,15 @@
#include <linux/virtio_ring.h>
#include <limits.h>
#include "../virtio_pci.h"
#include "vhost.h"
#include "../virtio_dev.h"
#define VIRTIO_MAX_VIRTQUEUES 0x100
struct virtio_user_dev {
struct virtio_dev vdev;
/* for vhost_user backend */
int vhostfd;
@ -71,7 +74,7 @@ struct virtio_user_dev {
int is_vhost_user_by_type(const char *path);
int virtio_user_start_device(struct virtio_user_dev *dev);
int virtio_user_stop_device(struct virtio_user_dev *dev);
struct virtio_hw *virtio_user_dev_init(char *path, int queues, int queue_size);
struct virtio_dev *virtio_user_dev_init(char *path, int queues, int queue_size);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
#endif