rte_virtio: added virtio_dev struct
Previously virtio_hw was managing both VirtIO PCI and vhost-user devices. Now's there virtio_dev, a common part for both backends. virtio_hw is only used for PCI. Note that this patch does not introduce another abstraction layer. It only unifies an already existing one. Previously virtio_user_dev was built on top of virtio_hw, with most PCI fields just hanging there unused. Now both, virtio_user_dev and virtio_hw are built on top of virtio_dev. Change-Id: Ida25defc0063055a81cf4039c9b85470b9880bc3 Signed-off-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com> Reviewed-on: https://review.gerrithub.io/376966 Tested-by: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
parent
72ebd59086
commit
09af33b6a4
@ -68,7 +68,7 @@ struct virtio_scsi_io_ctx {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct virtio_scsi_scan_base {
|
struct virtio_scsi_scan_base {
|
||||||
struct virtio_hw *hw;
|
struct virtio_dev *vdev;
|
||||||
struct spdk_bdev_poller *scan_poller;
|
struct spdk_bdev_poller *scan_poller;
|
||||||
|
|
||||||
/* Currently queried target */
|
/* Currently queried target */
|
||||||
@ -84,14 +84,14 @@ struct virtio_scsi_scan_base {
|
|||||||
|
|
||||||
struct virtio_scsi_disk {
|
struct virtio_scsi_disk {
|
||||||
struct spdk_bdev bdev;
|
struct spdk_bdev bdev;
|
||||||
struct virtio_hw *hw;
|
struct virtio_dev *vdev;
|
||||||
uint64_t num_blocks;
|
uint64_t num_blocks;
|
||||||
uint32_t block_size;
|
uint32_t block_size;
|
||||||
TAILQ_ENTRY(virtio_scsi_disk) link;
|
TAILQ_ENTRY(virtio_scsi_disk) link;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bdev_virtio_io_channel {
|
struct bdev_virtio_io_channel {
|
||||||
struct virtio_hw *hw;
|
struct virtio_dev *vdev;
|
||||||
struct spdk_bdev_poller *poller;
|
struct spdk_bdev_poller *poller;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -148,7 +148,7 @@ bdev_virtio_rw(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
|||||||
to_be16(&req->cdb[7], bdev_io->u.write.num_blocks);
|
to_be16(&req->cdb[7], bdev_io->u.write.num_blocks);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtio_xmit_pkts(disk->hw->vqs[2], vreq);
|
virtio_xmit_pkts(disk->vdev->vqs[2], vreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
static int _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
|
||||||
@ -199,7 +199,7 @@ bdev_virtio_get_io_channel(void *ctx)
|
|||||||
{
|
{
|
||||||
struct virtio_scsi_disk *disk = ctx;
|
struct virtio_scsi_disk *disk = ctx;
|
||||||
|
|
||||||
return spdk_get_io_channel(&disk->hw);
|
return spdk_get_io_channel(&disk->vdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -260,7 +260,7 @@ bdev_virtio_poll(void *arg)
|
|||||||
struct virtio_req *req[32];
|
struct virtio_req *req[32];
|
||||||
uint16_t i, cnt;
|
uint16_t i, cnt;
|
||||||
|
|
||||||
cnt = virtio_recv_pkts(ch->hw->vqs[2], req, SPDK_COUNTOF(req));
|
cnt = virtio_recv_pkts(ch->vdev->vqs[2], req, SPDK_COUNTOF(req));
|
||||||
for (i = 0; i < cnt; ++i) {
|
for (i = 0; i < cnt; ++i) {
|
||||||
bdev_virtio_io_cpl(req[i]);
|
bdev_virtio_io_cpl(req[i]);
|
||||||
}
|
}
|
||||||
@ -269,10 +269,10 @@ bdev_virtio_poll(void *arg)
|
|||||||
static int
|
static int
|
||||||
bdev_virtio_create_cb(void *io_device, void *ctx_buf)
|
bdev_virtio_create_cb(void *io_device, void *ctx_buf)
|
||||||
{
|
{
|
||||||
struct virtio_hw **hw = io_device;
|
struct virtio_dev **vdev = io_device;
|
||||||
struct bdev_virtio_io_channel *ch = ctx_buf;
|
struct bdev_virtio_io_channel *ch = ctx_buf;
|
||||||
|
|
||||||
ch->hw = *hw;
|
ch->vdev = *vdev;
|
||||||
spdk_bdev_poller_start(&ch->poller, bdev_virtio_poll, ch,
|
spdk_bdev_poller_start(&ch->poller, bdev_virtio_poll, ch,
|
||||||
spdk_env_get_current_core(), 0);
|
spdk_env_get_current_core(), 0);
|
||||||
return 0;
|
return 0;
|
||||||
@ -301,7 +301,7 @@ scan_target_finish(struct virtio_scsi_scan_base *base)
|
|||||||
|
|
||||||
while ((disk = TAILQ_FIRST(&base->found_disks))) {
|
while ((disk = TAILQ_FIRST(&base->found_disks))) {
|
||||||
TAILQ_REMOVE(&base->found_disks, disk, link);
|
TAILQ_REMOVE(&base->found_disks, disk, link);
|
||||||
spdk_io_device_register(&disk->hw, bdev_virtio_create_cb, bdev_virtio_destroy_cb,
|
spdk_io_device_register(&disk->vdev, bdev_virtio_create_cb, bdev_virtio_destroy_cb,
|
||||||
sizeof(struct bdev_virtio_io_channel));
|
sizeof(struct bdev_virtio_io_channel));
|
||||||
spdk_bdev_register(&disk->bdev);
|
spdk_bdev_register(&disk->bdev);
|
||||||
}
|
}
|
||||||
@ -334,7 +334,7 @@ process_scan_inquiry(struct virtio_scsi_scan_base *base, struct virtio_req *vreq
|
|||||||
iov[0].iov_len = 32;
|
iov[0].iov_len = 32;
|
||||||
to_be32(&req->cdb[10], iov[0].iov_len);
|
to_be32(&req->cdb[10], iov[0].iov_len);
|
||||||
|
|
||||||
virtio_xmit_pkts(base->hw->vqs[2], vreq);
|
virtio_xmit_pkts(base->vdev->vqs[2], vreq);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -360,7 +360,7 @@ process_read_cap(struct virtio_scsi_scan_base *base, struct virtio_req *vreq)
|
|||||||
disk->num_blocks = from_be64((uint64_t *)(vreq->iov[0].iov_base)) + 1;
|
disk->num_blocks = from_be64((uint64_t *)(vreq->iov[0].iov_base)) + 1;
|
||||||
disk->block_size = from_be32((uint32_t *)(vreq->iov[0].iov_base + 8));
|
disk->block_size = from_be32((uint32_t *)(vreq->iov[0].iov_base + 8));
|
||||||
|
|
||||||
disk->hw = base->hw;
|
disk->vdev = base->vdev;
|
||||||
|
|
||||||
bdev = &disk->bdev;
|
bdev = &disk->bdev;
|
||||||
bdev->name = spdk_sprintf_alloc("Virtio0");
|
bdev->name = spdk_sprintf_alloc("Virtio0");
|
||||||
@ -416,7 +416,7 @@ bdev_scan_poll(void *arg)
|
|||||||
struct virtio_req *req;
|
struct virtio_req *req;
|
||||||
uint16_t cnt;
|
uint16_t cnt;
|
||||||
|
|
||||||
cnt = virtio_recv_pkts(base->hw->vqs[2], &req, 1);
|
cnt = virtio_recv_pkts(base->vdev->vqs[2], &req, 1);
|
||||||
if (cnt > 0) {
|
if (cnt > 0) {
|
||||||
process_scan_resp(base, req);
|
process_scan_resp(base, req);
|
||||||
}
|
}
|
||||||
@ -456,7 +456,7 @@ scan_target(struct virtio_scsi_scan_base *base)
|
|||||||
cdb->opcode = SPDK_SPC_INQUIRY;
|
cdb->opcode = SPDK_SPC_INQUIRY;
|
||||||
cdb->alloc_len[1] = 255;
|
cdb->alloc_len[1] = 255;
|
||||||
|
|
||||||
virtio_xmit_pkts(base->hw->vqs[2], vreq);
|
virtio_xmit_pkts(base->vdev->vqs[2], vreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -464,7 +464,7 @@ bdev_virtio_initialize(void)
|
|||||||
{
|
{
|
||||||
struct spdk_conf_section *sp = spdk_conf_find_section(NULL, "Virtio");
|
struct spdk_conf_section *sp = spdk_conf_find_section(NULL, "Virtio");
|
||||||
struct virtio_scsi_scan_base *base;
|
struct virtio_scsi_scan_base *base;
|
||||||
struct virtio_hw *hw = NULL;
|
struct virtio_dev *vdev = NULL;
|
||||||
char *type, *path;
|
char *type, *path;
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
@ -485,16 +485,16 @@ bdev_virtio_initialize(void)
|
|||||||
SPDK_ERRLOG("No path specified for index %d\n", i);
|
SPDK_ERRLOG("No path specified for index %d\n", i);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
hw = virtio_user_dev_init(path, 1, 512);
|
vdev = virtio_user_dev_init(path, 1, 512);
|
||||||
} else if (!strcmp("Pci", type)) {
|
} else if (!strcmp("Pci", type)) {
|
||||||
hw = get_pci_virtio_hw();
|
vdev = get_pci_virtio_hw();
|
||||||
} else {
|
} else {
|
||||||
SPDK_ERRLOG("Invalid type %s specified for index %d\n", type, i);
|
SPDK_ERRLOG("Invalid type %s specified for index %d\n", type, i);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hw == NULL) {
|
if (vdev == NULL) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -506,10 +506,10 @@ bdev_virtio_initialize(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* TODO check rc, add virtio_dev_deinit() */
|
/* TODO check rc, add virtio_dev_deinit() */
|
||||||
virtio_init_device(hw, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
|
virtio_init_device(vdev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
|
||||||
virtio_dev_start(hw);
|
virtio_dev_start(vdev);
|
||||||
|
|
||||||
base->hw = hw;
|
base->vdev = vdev;
|
||||||
TAILQ_INIT(&base->found_disks);
|
TAILQ_INIT(&base->found_disks);
|
||||||
|
|
||||||
spdk_bdev_poller_start(&base->scan_poller, bdev_scan_poll, base,
|
spdk_bdev_poller_start(&base->scan_poller, bdev_scan_poll, base,
|
||||||
|
@ -67,9 +67,9 @@ static const struct rte_pci_id pci_id_virtio_map[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static uint16_t
|
static uint16_t
|
||||||
virtio_get_nr_vq(struct virtio_hw *hw)
|
virtio_get_nr_vq(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
return hw->max_queues;
|
return dev->max_queues;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -102,7 +102,7 @@ virtio_init_vring(struct virtqueue *vq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
|
virtio_init_queue(struct virtio_dev *dev, uint16_t vtpci_queue_idx)
|
||||||
{
|
{
|
||||||
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
|
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
|
||||||
const struct rte_memzone *mz = NULL;
|
const struct rte_memzone *mz = NULL;
|
||||||
@ -116,7 +116,7 @@ virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
|
|||||||
* Read the virtqueue size from the Queue Size field
|
* Read the virtqueue size from the Queue Size field
|
||||||
* Always power of 2 and if 0 virtqueue does not exist
|
* Always power of 2 and if 0 virtqueue does not exist
|
||||||
*/
|
*/
|
||||||
vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
|
vq_size = VTPCI_OPS(dev)->get_queue_num(dev, vtpci_queue_idx);
|
||||||
PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
|
PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
|
||||||
if (vq_size == 0) {
|
if (vq_size == 0) {
|
||||||
PMD_INIT_LOG(ERR, "virtqueue does not exist");
|
PMD_INIT_LOG(ERR, "virtqueue does not exist");
|
||||||
@ -129,7 +129,7 @@ virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
|
snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
|
||||||
hw->port_id, vtpci_queue_idx);
|
dev->port_id, vtpci_queue_idx);
|
||||||
|
|
||||||
size = RTE_ALIGN_CEIL(sizeof(*vq) +
|
size = RTE_ALIGN_CEIL(sizeof(*vq) +
|
||||||
vq_size * sizeof(struct vq_desc_extra),
|
vq_size * sizeof(struct vq_desc_extra),
|
||||||
@ -141,9 +141,9 @@ virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
|
|||||||
PMD_INIT_LOG(ERR, "can not allocate vq");
|
PMD_INIT_LOG(ERR, "can not allocate vq");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
hw->vqs[vtpci_queue_idx] = vq;
|
dev->vqs[vtpci_queue_idx] = vq;
|
||||||
|
|
||||||
vq->hw = hw;
|
vq->vdev = dev;
|
||||||
vq->vq_queue_index = vtpci_queue_idx;
|
vq->vq_queue_index = vtpci_queue_idx;
|
||||||
vq->vq_nentries = vq_size;
|
vq->vq_nentries = vq_size;
|
||||||
|
|
||||||
@ -180,7 +180,7 @@ virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
|
|||||||
|
|
||||||
vq->mz = mz;
|
vq->mz = mz;
|
||||||
|
|
||||||
if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
|
if (VTPCI_OPS(dev)->setup_queue(dev, vq) < 0) {
|
||||||
PMD_INIT_LOG(ERR, "setup_queue failed");
|
PMD_INIT_LOG(ERR, "setup_queue failed");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -195,47 +195,47 @@ fail_q_alloc:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
virtio_free_queues(struct virtio_hw *hw)
|
virtio_free_queues(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
uint16_t nr_vq = virtio_get_nr_vq(hw);
|
uint16_t nr_vq = virtio_get_nr_vq(dev);
|
||||||
struct virtqueue *vq;
|
struct virtqueue *vq;
|
||||||
uint16_t i;
|
uint16_t i;
|
||||||
|
|
||||||
if (hw->vqs == NULL)
|
if (dev->vqs == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < nr_vq; i++) {
|
for (i = 0; i < nr_vq; i++) {
|
||||||
vq = hw->vqs[i];
|
vq = dev->vqs[i];
|
||||||
if (!vq)
|
if (!vq)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
rte_memzone_free(vq->mz);
|
rte_memzone_free(vq->mz);
|
||||||
|
|
||||||
rte_free(vq);
|
rte_free(vq);
|
||||||
hw->vqs[i] = NULL;
|
dev->vqs[i] = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
rte_free(hw->vqs);
|
rte_free(dev->vqs);
|
||||||
hw->vqs = NULL;
|
dev->vqs = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
virtio_alloc_queues(struct virtio_hw *hw)
|
virtio_alloc_queues(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
uint16_t nr_vq = virtio_get_nr_vq(hw);
|
uint16_t nr_vq = virtio_get_nr_vq(dev);
|
||||||
uint16_t i;
|
uint16_t i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
|
dev->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
|
||||||
if (!hw->vqs) {
|
if (!dev->vqs) {
|
||||||
PMD_INIT_LOG(ERR, "failed to allocate vqs");
|
PMD_INIT_LOG(ERR, "failed to allocate vqs");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < nr_vq; i++) {
|
for (i = 0; i < nr_vq; i++) {
|
||||||
ret = virtio_init_queue(hw, i);
|
ret = virtio_init_queue(dev, i);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
virtio_free_queues(hw);
|
virtio_free_queues(dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -244,7 +244,7 @@ virtio_alloc_queues(struct virtio_hw *hw)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
|
virtio_negotiate_features(struct virtio_dev *dev, uint64_t req_features)
|
||||||
{
|
{
|
||||||
uint64_t host_features;
|
uint64_t host_features;
|
||||||
|
|
||||||
@ -253,7 +253,7 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
|
|||||||
req_features);
|
req_features);
|
||||||
|
|
||||||
/* Read device(host) feature bits */
|
/* Read device(host) feature bits */
|
||||||
host_features = VTPCI_OPS(hw)->get_features(hw);
|
host_features = VTPCI_OPS(dev)->get_features(dev);
|
||||||
PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
|
PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
|
||||||
host_features);
|
host_features);
|
||||||
|
|
||||||
@ -261,66 +261,66 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
|
|||||||
* Negotiate features: Subset of device feature bits are written back
|
* Negotiate features: Subset of device feature bits are written back
|
||||||
* guest feature bits.
|
* guest feature bits.
|
||||||
*/
|
*/
|
||||||
hw->guest_features = req_features;
|
dev->guest_features = req_features;
|
||||||
hw->guest_features = vtpci_negotiate_features(hw, host_features);
|
dev->guest_features = vtpci_negotiate_features(dev, host_features);
|
||||||
PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
|
PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
|
||||||
hw->guest_features);
|
dev->guest_features);
|
||||||
|
|
||||||
if (hw->modern) {
|
if (dev->modern) {
|
||||||
if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
|
if (!vtpci_with_feature(dev, VIRTIO_F_VERSION_1)) {
|
||||||
PMD_INIT_LOG(ERR,
|
PMD_INIT_LOG(ERR,
|
||||||
"VIRTIO_F_VERSION_1 features is not enabled.");
|
"VIRTIO_F_VERSION_1 features is not enabled.");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
|
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FEATURES_OK);
|
||||||
if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
|
if (!(vtpci_get_status(dev) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
|
||||||
PMD_INIT_LOG(ERR,
|
PMD_INIT_LOG(ERR,
|
||||||
"failed to set FEATURES_OK status!");
|
"failed to set FEATURES_OK status!");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hw->req_guest_features = req_features;
|
dev->req_guest_features = req_features;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reset device and renegotiate features if needed */
|
/* reset device and renegotiate features if needed */
|
||||||
int
|
int
|
||||||
virtio_init_device(struct virtio_hw *hw, uint64_t req_features)
|
virtio_init_device(struct virtio_dev *dev, uint64_t req_features)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Reset the device although not necessary at startup */
|
/* Reset the device although not necessary at startup */
|
||||||
vtpci_reset(hw);
|
vtpci_reset(dev);
|
||||||
|
|
||||||
/* Tell the host we've noticed this device. */
|
/* Tell the host we've noticed this device. */
|
||||||
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
|
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
|
||||||
|
|
||||||
/* Tell the host we've known how to drive the device. */
|
/* Tell the host we've known how to drive the device. */
|
||||||
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
|
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
|
||||||
if (virtio_negotiate_features(hw, req_features) < 0)
|
if (virtio_negotiate_features(dev, req_features) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
vtpci_read_dev_config(hw, offsetof(struct virtio_scsi_config, num_queues),
|
vtpci_read_dev_config(dev, offsetof(struct virtio_scsi_config, num_queues),
|
||||||
&hw->max_queues, sizeof(hw->max_queues));
|
&dev->max_queues, sizeof(dev->max_queues));
|
||||||
/* FIXME
|
/* FIXME
|
||||||
* Hardcode num_queues to 3 until we add proper
|
* Hardcode num_queues to 3 until we add proper
|
||||||
* mutli-queue support. This value should be limited
|
* mutli-queue support. This value should be limited
|
||||||
* by number of cores assigned to SPDK
|
* by number of cores assigned to SPDK
|
||||||
*/
|
*/
|
||||||
hw->max_queues = 3;
|
dev->max_queues = 3;
|
||||||
|
|
||||||
ret = virtio_alloc_queues(hw);
|
ret = virtio_alloc_queues(dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
vtpci_reinit_complete(hw);
|
vtpci_reinit_complete(dev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
virtio_dev_start(struct virtio_hw *hw)
|
virtio_dev_start(struct virtio_dev *vdev)
|
||||||
{
|
{
|
||||||
struct virtnet_tx *txvq __rte_unused;
|
struct virtnet_tx *txvq __rte_unused;
|
||||||
|
|
||||||
@ -343,14 +343,14 @@ virtio_dev_start(struct virtio_hw *hw)
|
|||||||
|
|
||||||
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
|
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
|
||||||
|
|
||||||
hw->started = 1;
|
vdev->started = 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct virtio_hw *g_pci_hw = NULL;
|
static struct virtio_hw *g_pci_hw = NULL;
|
||||||
|
|
||||||
struct virtio_hw *
|
struct virtio_dev *
|
||||||
get_pci_virtio_hw(void)
|
get_pci_virtio_hw(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@ -361,11 +361,11 @@ get_pci_virtio_hw(void)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = vtpci_init(g_pci_hw->pci_dev, g_pci_hw);
|
ret = vtpci_init(g_pci_hw->pci_dev, &g_pci_hw->vdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return g_pci_hw;
|
return &g_pci_hw->vdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
static int virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
||||||
@ -374,6 +374,7 @@ static int virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
|
|||||||
struct virtio_hw *hw;
|
struct virtio_hw *hw;
|
||||||
|
|
||||||
hw = calloc(1, sizeof(*hw));
|
hw = calloc(1, sizeof(*hw));
|
||||||
|
hw->vdev.is_hw = 1;
|
||||||
hw->pci_dev = pci_dev;
|
hw->pci_dev = pci_dev;
|
||||||
|
|
||||||
g_pci_hw = hw;
|
g_pci_hw = hw;
|
||||||
|
@ -37,12 +37,21 @@
|
|||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <sys/uio.h>
|
#include <sys/uio.h>
|
||||||
|
|
||||||
#include "virtio_pci.h"
|
|
||||||
|
|
||||||
#define VIRTIO_MAX_RX_QUEUES 128U
|
#define VIRTIO_MAX_RX_QUEUES 128U
|
||||||
#define VIRTIO_MAX_TX_QUEUES 128U
|
#define VIRTIO_MAX_TX_QUEUES 128U
|
||||||
#define VIRTIO_MIN_RX_BUFSIZE 64
|
#define VIRTIO_MIN_RX_BUFSIZE 64
|
||||||
|
|
||||||
|
struct virtio_dev {
|
||||||
|
struct virtqueue **vqs;
|
||||||
|
uint16_t started;
|
||||||
|
uint32_t max_queues;
|
||||||
|
uint8_t port_id;
|
||||||
|
uint64_t req_guest_features;
|
||||||
|
uint64_t guest_features;
|
||||||
|
int is_hw;
|
||||||
|
uint8_t modern;
|
||||||
|
};
|
||||||
|
|
||||||
struct virtio_req {
|
struct virtio_req {
|
||||||
struct iovec *iov;
|
struct iovec *iov;
|
||||||
struct iovec iov_req;
|
struct iovec iov_req;
|
||||||
@ -66,9 +75,9 @@ uint16_t virtio_recv_pkts(struct virtqueue *vq, struct virtio_req **reqs,
|
|||||||
|
|
||||||
uint16_t virtio_xmit_pkts(struct virtqueue *vq, struct virtio_req *req);
|
uint16_t virtio_xmit_pkts(struct virtqueue *vq, struct virtio_req *req);
|
||||||
|
|
||||||
int virtio_init_device(struct virtio_hw *hw, uint64_t req_features);
|
int virtio_init_device(struct virtio_dev *hw, uint64_t req_features);
|
||||||
int virtio_dev_start(struct virtio_hw *hw);
|
int virtio_dev_start(struct virtio_dev *hw);
|
||||||
struct virtio_hw *get_pci_virtio_hw(void);
|
struct virtio_dev *get_pci_virtio_hw(void);
|
||||||
|
|
||||||
void virtio_interrupt_handler(void *param);
|
void virtio_interrupt_handler(void *param);
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@
|
|||||||
#include "virtio_logs.h"
|
#include "virtio_logs.h"
|
||||||
#include "virtio_queue.h"
|
#include "virtio_queue.h"
|
||||||
|
|
||||||
struct virtio_hw_internal virtio_hw_internal[128];
|
struct vtpci_internal virtio_hw_internal[128];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Following macros are derived from linux/pci_regs.h, however,
|
* Following macros are derived from linux/pci_regs.h, however,
|
||||||
@ -60,6 +60,9 @@ struct virtio_hw_internal virtio_hw_internal[128];
|
|||||||
*/
|
*/
|
||||||
#define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20)
|
#define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20)
|
||||||
|
|
||||||
|
#define virtio_dev_get_hw(hw) \
|
||||||
|
((struct virtio_hw *)((uintptr_t)(hw) - offsetof(struct virtio_hw, vdev)))
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
check_vq_phys_addr_ok(struct virtqueue *vq)
|
check_vq_phys_addr_ok(struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
@ -88,27 +91,28 @@ check_vq_phys_addr_ok(struct virtqueue *vq)
|
|||||||
* enforces this for the virtio-net stuff.
|
* enforces this for the virtio-net stuff.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
|
legacy_read_dev_config(struct virtio_dev *dev, size_t offset,
|
||||||
void *dst, int length)
|
void *dst, int length)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
#ifdef RTE_ARCH_PPC_64
|
#ifdef RTE_ARCH_PPC_64
|
||||||
int size;
|
int size;
|
||||||
|
|
||||||
while (length > 0) {
|
while (length > 0) {
|
||||||
if (length >= 4) {
|
if (length >= 4) {
|
||||||
size = 4;
|
size = 4;
|
||||||
rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
|
rte_pci_ioport_read(VTPCI_IO(dev), dst, size,
|
||||||
VIRTIO_PCI_CONFIG(hw) + offset);
|
VIRTIO_PCI_CONFIG(dev) + offset);
|
||||||
*(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
|
*(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
|
||||||
} else if (length >= 2) {
|
} else if (length >= 2) {
|
||||||
size = 2;
|
size = 2;
|
||||||
rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
|
rte_pci_ioport_read(VTPCI_IO(dev), dst, size,
|
||||||
VIRTIO_PCI_CONFIG(hw) + offset);
|
VIRTIO_PCI_CONFIG(dev) + offset);
|
||||||
*(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
|
*(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
|
||||||
} else {
|
} else {
|
||||||
size = 1;
|
size = 1;
|
||||||
rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
|
rte_pci_ioport_read(VTPCI_IO(dev), dst, size,
|
||||||
VIRTIO_PCI_CONFIG(hw) + offset);
|
VIRTIO_PCI_CONFIG(dev) + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
dst = (char *)dst + size;
|
dst = (char *)dst + size;
|
||||||
@ -116,15 +120,16 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
|
|||||||
length -= size;
|
length -= size;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
rte_pci_ioport_read(VTPCI_IO(hw), dst, length,
|
rte_pci_ioport_read(VTPCI_IO(dev), dst, length,
|
||||||
VIRTIO_PCI_CONFIG(hw) + offset);
|
VIRTIO_PCI_CONFIG(hw) + offset);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
|
legacy_write_dev_config(struct virtio_dev *dev, size_t offset,
|
||||||
const void *src, int length)
|
const void *src, int length)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
#ifdef RTE_ARCH_PPC_64
|
#ifdef RTE_ARCH_PPC_64
|
||||||
union {
|
union {
|
||||||
uint32_t u32;
|
uint32_t u32;
|
||||||
@ -154,125 +159,125 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
|
|||||||
length -= size;
|
length -= size;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
rte_pci_ioport_write(VTPCI_IO(hw), src, length,
|
rte_pci_ioport_write(VTPCI_IO(dev), src, length,
|
||||||
VIRTIO_PCI_CONFIG(hw) + offset);
|
VIRTIO_PCI_CONFIG(hw) + offset);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t
|
static uint64_t
|
||||||
legacy_get_features(struct virtio_hw *hw)
|
legacy_get_features(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
uint32_t dst;
|
uint32_t dst;
|
||||||
|
|
||||||
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES);
|
rte_pci_ioport_read(VTPCI_IO(dev), &dst, 4, VIRTIO_PCI_HOST_FEATURES);
|
||||||
return dst;
|
return dst;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
legacy_set_features(struct virtio_hw *hw, uint64_t features)
|
legacy_set_features(struct virtio_dev *dev, uint64_t features)
|
||||||
{
|
{
|
||||||
if ((features >> 32) != 0) {
|
if ((features >> 32) != 0) {
|
||||||
PMD_DRV_LOG(ERR,
|
PMD_DRV_LOG(ERR,
|
||||||
"only 32 bit features are allowed for legacy virtio!");
|
"only 32 bit features are allowed for legacy virtio!");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
rte_pci_ioport_write(VTPCI_IO(hw), &features, 4,
|
rte_pci_ioport_write(VTPCI_IO(dev), &features, 4,
|
||||||
VIRTIO_PCI_GUEST_FEATURES);
|
VIRTIO_PCI_GUEST_FEATURES);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint8_t
|
static uint8_t
|
||||||
legacy_get_status(struct virtio_hw *hw)
|
legacy_get_status(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
uint8_t dst;
|
uint8_t dst;
|
||||||
|
|
||||||
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
|
rte_pci_ioport_read(VTPCI_IO(dev), &dst, 1, VIRTIO_PCI_STATUS);
|
||||||
return dst;
|
return dst;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
legacy_set_status(struct virtio_hw *hw, uint8_t status)
|
legacy_set_status(struct virtio_dev *dev, uint8_t status)
|
||||||
{
|
{
|
||||||
rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
|
rte_pci_ioport_write(VTPCI_IO(dev), &status, 1, VIRTIO_PCI_STATUS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
legacy_reset(struct virtio_hw *hw)
|
legacy_reset(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
|
legacy_set_status(dev, VIRTIO_CONFIG_STATUS_RESET);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint8_t
|
static uint8_t
|
||||||
legacy_get_isr(struct virtio_hw *hw)
|
legacy_get_isr(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
uint8_t dst;
|
uint8_t dst;
|
||||||
|
|
||||||
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
|
rte_pci_ioport_read(VTPCI_IO(dev), &dst, 1, VIRTIO_PCI_ISR);
|
||||||
return dst;
|
return dst;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enable one vector (0) for Link State Intrerrupt */
|
/* Enable one vector (0) for Link State Intrerrupt */
|
||||||
static uint16_t
|
static uint16_t
|
||||||
legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
|
legacy_set_config_irq(struct virtio_dev *dev, uint16_t vec)
|
||||||
{
|
{
|
||||||
uint16_t dst;
|
uint16_t dst;
|
||||||
|
|
||||||
rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
|
rte_pci_ioport_write(VTPCI_IO(dev), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
|
||||||
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
|
rte_pci_ioport_read(VTPCI_IO(dev), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
|
||||||
return dst;
|
return dst;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint16_t
|
static uint16_t
|
||||||
legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
|
legacy_set_queue_irq(struct virtio_dev *dev, struct virtqueue *vq, uint16_t vec)
|
||||||
{
|
{
|
||||||
uint16_t dst;
|
uint16_t dst;
|
||||||
|
|
||||||
rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
|
rte_pci_ioport_write(VTPCI_IO(dev), &vq->vq_queue_index, 2,
|
||||||
VIRTIO_PCI_QUEUE_SEL);
|
VIRTIO_PCI_QUEUE_SEL);
|
||||||
rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
|
rte_pci_ioport_write(VTPCI_IO(dev), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
|
||||||
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
|
rte_pci_ioport_read(VTPCI_IO(dev), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
|
||||||
return dst;
|
return dst;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint16_t
|
static uint16_t
|
||||||
legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
|
legacy_get_queue_num(struct virtio_dev *dev, uint16_t queue_id)
|
||||||
{
|
{
|
||||||
uint16_t dst;
|
uint16_t dst;
|
||||||
|
|
||||||
rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
|
rte_pci_ioport_write(VTPCI_IO(dev), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
|
||||||
rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
|
rte_pci_ioport_read(VTPCI_IO(dev), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
|
||||||
return dst;
|
return dst;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
legacy_setup_queue(struct virtio_dev *dev, struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
uint32_t src;
|
uint32_t src;
|
||||||
|
|
||||||
if (!check_vq_phys_addr_ok(vq))
|
if (!check_vq_phys_addr_ok(vq))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
|
rte_pci_ioport_write(VTPCI_IO(dev), &vq->vq_queue_index, 2,
|
||||||
VIRTIO_PCI_QUEUE_SEL);
|
VIRTIO_PCI_QUEUE_SEL);
|
||||||
src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
|
src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
|
||||||
rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
|
rte_pci_ioport_write(VTPCI_IO(dev), &src, 4, VIRTIO_PCI_QUEUE_PFN);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
legacy_del_queue(struct virtio_dev *dev, struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
uint32_t src = 0;
|
uint32_t src = 0;
|
||||||
|
|
||||||
rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
|
rte_pci_ioport_write(VTPCI_IO(dev), &vq->vq_queue_index, 2,
|
||||||
VIRTIO_PCI_QUEUE_SEL);
|
VIRTIO_PCI_QUEUE_SEL);
|
||||||
rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
|
rte_pci_ioport_write(VTPCI_IO(dev), &src, 4, VIRTIO_PCI_QUEUE_PFN);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
legacy_notify_queue(struct virtio_dev *dev, struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
|
rte_pci_ioport_write(VTPCI_IO(dev), &vq->vq_queue_index, 2,
|
||||||
VIRTIO_PCI_QUEUE_NOTIFY);
|
VIRTIO_PCI_QUEUE_NOTIFY);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -301,9 +306,10 @@ io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
modern_read_dev_config(struct virtio_hw *hw, size_t offset,
|
modern_read_dev_config(struct virtio_dev *dev, size_t offset,
|
||||||
void *dst, int length)
|
void *dst, int length)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
int i;
|
int i;
|
||||||
uint8_t *p;
|
uint8_t *p;
|
||||||
uint8_t old_gen, new_gen;
|
uint8_t old_gen, new_gen;
|
||||||
@ -320,9 +326,10 @@ modern_read_dev_config(struct virtio_hw *hw, size_t offset,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
modern_write_dev_config(struct virtio_hw *hw, size_t offset,
|
modern_write_dev_config(struct virtio_dev *dev, size_t offset,
|
||||||
const void *src, int length)
|
const void *src, int length)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
int i;
|
int i;
|
||||||
const uint8_t *p = src;
|
const uint8_t *p = src;
|
||||||
|
|
||||||
@ -331,8 +338,9 @@ modern_write_dev_config(struct virtio_hw *hw, size_t offset,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t
|
static uint64_t
|
||||||
modern_get_features(struct virtio_hw *hw)
|
modern_get_features(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
uint32_t features_lo, features_hi;
|
uint32_t features_lo, features_hi;
|
||||||
|
|
||||||
rte_write32(0, &hw->common_cfg->device_feature_select);
|
rte_write32(0, &hw->common_cfg->device_feature_select);
|
||||||
@ -345,8 +353,10 @@ modern_get_features(struct virtio_hw *hw)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
modern_set_features(struct virtio_hw *hw, uint64_t features)
|
modern_set_features(struct virtio_dev *dev, uint64_t features)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
|
|
||||||
rte_write32(0, &hw->common_cfg->guest_feature_select);
|
rte_write32(0, &hw->common_cfg->guest_feature_select);
|
||||||
rte_write32(features & ((1ULL << 32) - 1),
|
rte_write32(features & ((1ULL << 32) - 1),
|
||||||
&hw->common_cfg->guest_feature);
|
&hw->common_cfg->guest_feature);
|
||||||
@ -357,55 +367,68 @@ modern_set_features(struct virtio_hw *hw, uint64_t features)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static uint8_t
|
static uint8_t
|
||||||
modern_get_status(struct virtio_hw *hw)
|
modern_get_status(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
|
|
||||||
return rte_read8(&hw->common_cfg->device_status);
|
return rte_read8(&hw->common_cfg->device_status);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
modern_set_status(struct virtio_hw *hw, uint8_t status)
|
modern_set_status(struct virtio_dev *dev, uint8_t status)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
|
|
||||||
rte_write8(status, &hw->common_cfg->device_status);
|
rte_write8(status, &hw->common_cfg->device_status);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
modern_reset(struct virtio_hw *hw)
|
modern_reset(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
|
modern_set_status(dev, VIRTIO_CONFIG_STATUS_RESET);
|
||||||
modern_get_status(hw);
|
modern_get_status(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint8_t
|
static uint8_t
|
||||||
modern_get_isr(struct virtio_hw *hw)
|
modern_get_isr(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
|
|
||||||
return rte_read8(hw->isr);
|
return rte_read8(hw->isr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint16_t
|
static uint16_t
|
||||||
modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
|
modern_set_config_irq(struct virtio_dev *dev, uint16_t vec)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
|
|
||||||
rte_write16(vec, &hw->common_cfg->msix_config);
|
rte_write16(vec, &hw->common_cfg->msix_config);
|
||||||
return rte_read16(&hw->common_cfg->msix_config);
|
return rte_read16(&hw->common_cfg->msix_config);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint16_t
|
static uint16_t
|
||||||
modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
|
modern_set_queue_irq(struct virtio_dev *dev, struct virtqueue *vq, uint16_t vec)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
|
|
||||||
rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
|
rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
|
||||||
rte_write16(vec, &hw->common_cfg->queue_msix_vector);
|
rte_write16(vec, &hw->common_cfg->queue_msix_vector);
|
||||||
return rte_read16(&hw->common_cfg->queue_msix_vector);
|
return rte_read16(&hw->common_cfg->queue_msix_vector);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint16_t
|
static uint16_t
|
||||||
modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
|
modern_get_queue_num(struct virtio_dev *dev, uint16_t queue_id)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
|
|
||||||
rte_write16(queue_id, &hw->common_cfg->queue_select);
|
rte_write16(queue_id, &hw->common_cfg->queue_select);
|
||||||
return rte_read16(&hw->common_cfg->queue_size);
|
return rte_read16(&hw->common_cfg->queue_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
modern_setup_queue(struct virtio_dev *dev, struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
uint64_t desc_addr, avail_addr, used_addr;
|
uint64_t desc_addr, avail_addr, used_addr;
|
||||||
uint16_t notify_off;
|
uint16_t notify_off;
|
||||||
|
|
||||||
@ -444,8 +467,10 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
modern_del_queue(struct virtio_dev *dev, struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(dev);
|
||||||
|
|
||||||
rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
|
rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
|
||||||
|
|
||||||
io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
|
io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
|
||||||
@ -459,7 +484,7 @@ modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
|
modern_notify_queue(struct virtio_dev *dev __rte_unused, struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
rte_write16(vq->vq_queue_index, vq->notify_addr);
|
rte_write16(vq->vq_queue_index, vq->notify_addr);
|
||||||
}
|
}
|
||||||
@ -483,21 +508,21 @@ const struct virtio_pci_ops modern_ops = {
|
|||||||
|
|
||||||
|
|
||||||
void
|
void
|
||||||
vtpci_read_dev_config(struct virtio_hw *hw, size_t offset,
|
vtpci_read_dev_config(struct virtio_dev *dev, size_t offset,
|
||||||
void *dst, int length)
|
void *dst, int length)
|
||||||
{
|
{
|
||||||
VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
|
VTPCI_OPS(dev)->read_dev_cfg(dev, offset, dst, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
|
vtpci_write_dev_config(struct virtio_dev *dev, size_t offset,
|
||||||
const void *src, int length)
|
const void *src, int length)
|
||||||
{
|
{
|
||||||
VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
|
VTPCI_OPS(dev)->write_dev_cfg(dev, offset, src, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t
|
uint64_t
|
||||||
vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
|
vtpci_negotiate_features(struct virtio_dev *dev, uint64_t host_features)
|
||||||
{
|
{
|
||||||
uint64_t features;
|
uint64_t features;
|
||||||
|
|
||||||
@ -505,45 +530,45 @@ vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
|
|||||||
* Limit negotiated features to what the driver, virtqueue, and
|
* Limit negotiated features to what the driver, virtqueue, and
|
||||||
* host all support.
|
* host all support.
|
||||||
*/
|
*/
|
||||||
features = host_features & hw->guest_features;
|
features = host_features & dev->guest_features;
|
||||||
VTPCI_OPS(hw)->set_features(hw, features);
|
VTPCI_OPS(dev)->set_features(dev, features);
|
||||||
|
|
||||||
return features;
|
return features;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
vtpci_reset(struct virtio_hw *hw)
|
vtpci_reset(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
|
VTPCI_OPS(dev)->set_status(dev, VIRTIO_CONFIG_STATUS_RESET);
|
||||||
/* flush status write */
|
/* flush status write */
|
||||||
VTPCI_OPS(hw)->get_status(hw);
|
VTPCI_OPS(dev)->get_status(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
vtpci_reinit_complete(struct virtio_hw *hw)
|
vtpci_reinit_complete(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
|
vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
vtpci_set_status(struct virtio_hw *hw, uint8_t status)
|
vtpci_set_status(struct virtio_dev *dev, uint8_t status)
|
||||||
{
|
{
|
||||||
if (status != VIRTIO_CONFIG_STATUS_RESET)
|
if (status != VIRTIO_CONFIG_STATUS_RESET)
|
||||||
status |= VTPCI_OPS(hw)->get_status(hw);
|
status |= VTPCI_OPS(dev)->get_status(dev);
|
||||||
|
|
||||||
VTPCI_OPS(hw)->set_status(hw, status);
|
VTPCI_OPS(dev)->set_status(dev, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t
|
uint8_t
|
||||||
vtpci_get_status(struct virtio_hw *hw)
|
vtpci_get_status(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
return VTPCI_OPS(hw)->get_status(hw);
|
return VTPCI_OPS(dev)->get_status(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t
|
uint8_t
|
||||||
vtpci_isr(struct virtio_hw *hw)
|
vtpci_isr(struct virtio_dev *dev)
|
||||||
{
|
{
|
||||||
return VTPCI_OPS(hw)->get_isr(hw);
|
return VTPCI_OPS(dev)->get_isr(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
@ -668,8 +693,10 @@ next:
|
|||||||
* Return 0 on success.
|
* Return 0 on success.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
|
vtpci_init(struct rte_pci_device *dev, struct virtio_dev *vdev)
|
||||||
{
|
{
|
||||||
|
struct virtio_hw *hw = virtio_dev_get_hw(vdev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try if we can succeed reading virtio pci caps, which exists
|
* Try if we can succeed reading virtio pci caps, which exists
|
||||||
* only on modern pci device. If failed, we fallback to legacy
|
* only on modern pci device. If failed, we fallback to legacy
|
||||||
@ -677,8 +704,8 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
|
|||||||
*/
|
*/
|
||||||
if (virtio_read_caps(dev, hw) == 0) {
|
if (virtio_read_caps(dev, hw) == 0) {
|
||||||
PMD_INIT_LOG(INFO, "modern virtio pci detected.");
|
PMD_INIT_LOG(INFO, "modern virtio pci detected.");
|
||||||
virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops;
|
virtio_hw_internal[vdev->port_id].vtpci_ops = &modern_ops;
|
||||||
hw->modern = 1;
|
vdev->modern = 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -697,8 +724,8 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops;
|
virtio_hw_internal[vdev->port_id].vtpci_ops = &legacy_ops;
|
||||||
hw->modern = 0;
|
vdev->modern = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,8 @@
|
|||||||
|
|
||||||
#include <rte_pci.h>
|
#include <rte_pci.h>
|
||||||
|
|
||||||
|
#include "virtio_dev.h"
|
||||||
|
|
||||||
struct virtqueue;
|
struct virtqueue;
|
||||||
|
|
||||||
/* VirtIO PCI vendor/device ID. */
|
/* VirtIO PCI vendor/device ID. */
|
||||||
@ -183,68 +185,57 @@ struct virtio_pci_common_cfg {
|
|||||||
uint32_t queue_used_hi; /* read-write */
|
uint32_t queue_used_hi; /* read-write */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct virtio_hw;
|
|
||||||
|
|
||||||
struct virtio_pci_ops {
|
struct virtio_pci_ops {
|
||||||
void (*read_dev_cfg)(struct virtio_hw *hw, size_t offset,
|
void (*read_dev_cfg)(struct virtio_dev *hw, size_t offset,
|
||||||
void *dst, int len);
|
void *dst, int len);
|
||||||
void (*write_dev_cfg)(struct virtio_hw *hw, size_t offset,
|
void (*write_dev_cfg)(struct virtio_dev *hw, size_t offset,
|
||||||
const void *src, int len);
|
const void *src, int len);
|
||||||
void (*reset)(struct virtio_hw *hw);
|
void (*reset)(struct virtio_dev *hw);
|
||||||
|
|
||||||
uint8_t (*get_status)(struct virtio_hw *hw);
|
uint8_t (*get_status)(struct virtio_dev *hw);
|
||||||
void (*set_status)(struct virtio_hw *hw, uint8_t status);
|
void (*set_status)(struct virtio_dev *hw, uint8_t status);
|
||||||
|
|
||||||
uint64_t (*get_features)(struct virtio_hw *hw);
|
uint64_t (*get_features)(struct virtio_dev *hw);
|
||||||
void (*set_features)(struct virtio_hw *hw, uint64_t features);
|
void (*set_features)(struct virtio_dev *hw, uint64_t features);
|
||||||
|
|
||||||
uint8_t (*get_isr)(struct virtio_hw *hw);
|
uint8_t (*get_isr)(struct virtio_dev *hw);
|
||||||
|
|
||||||
uint16_t (*set_config_irq)(struct virtio_hw *hw, uint16_t vec);
|
uint16_t (*set_config_irq)(struct virtio_dev *hw, uint16_t vec);
|
||||||
|
|
||||||
uint16_t (*set_queue_irq)(struct virtio_hw *hw, struct virtqueue *vq,
|
uint16_t (*set_queue_irq)(struct virtio_dev *hw, struct virtqueue *vq,
|
||||||
uint16_t vec);
|
uint16_t vec);
|
||||||
|
|
||||||
uint16_t (*get_queue_num)(struct virtio_hw *hw, uint16_t queue_id);
|
uint16_t (*get_queue_num)(struct virtio_dev *hw, uint16_t queue_id);
|
||||||
int (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq);
|
int (*setup_queue)(struct virtio_dev *hw, struct virtqueue *vq);
|
||||||
void (*del_queue)(struct virtio_hw *hw, struct virtqueue *vq);
|
void (*del_queue)(struct virtio_dev *hw, struct virtqueue *vq);
|
||||||
void (*notify_queue)(struct virtio_hw *hw, struct virtqueue *vq);
|
void (*notify_queue)(struct virtio_dev *hw, struct virtqueue *vq);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct virtio_hw {
|
struct virtio_hw {
|
||||||
uint64_t req_guest_features;
|
struct virtio_dev vdev;
|
||||||
uint64_t guest_features;
|
|
||||||
uint32_t max_queues;
|
|
||||||
uint16_t started;
|
|
||||||
uint8_t use_msix;
|
uint8_t use_msix;
|
||||||
uint8_t modern;
|
|
||||||
uint8_t port_id;
|
|
||||||
uint32_t notify_off_multiplier;
|
uint32_t notify_off_multiplier;
|
||||||
uint8_t *isr;
|
uint8_t *isr;
|
||||||
uint16_t *notify_base;
|
uint16_t *notify_base;
|
||||||
struct virtio_pci_common_cfg *common_cfg;
|
struct virtio_pci_common_cfg *common_cfg;
|
||||||
struct rte_pci_device *pci_dev;
|
struct rte_pci_device *pci_dev;
|
||||||
struct virtio_scsi_config *dev_cfg;
|
struct virtio_scsi_config *dev_cfg;
|
||||||
void *virtio_user_dev;
|
|
||||||
|
|
||||||
struct virtqueue **vqs;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* While virtio_hw is stored in shared memory, this structure stores
|
* While virtio_hw is stored in shared memory, this structure stores
|
||||||
* some infos that may vary in the multiple process model locally.
|
* some infos that may vary in the multiple process model locally.
|
||||||
* For example, the vtpci_ops pointer.
|
* For example, the vtpci_ops pointer.
|
||||||
*/
|
*/
|
||||||
struct virtio_hw_internal {
|
struct vtpci_internal {
|
||||||
const struct virtio_pci_ops *vtpci_ops;
|
const struct virtio_pci_ops *vtpci_ops;
|
||||||
struct rte_pci_ioport io;
|
struct rte_pci_ioport io;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->port_id].vtpci_ops)
|
#define VTPCI_OPS(dev) (virtio_hw_internal[(dev)->port_id].vtpci_ops)
|
||||||
#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->port_id].io)
|
#define VTPCI_IO(dev) (&virtio_hw_internal[(dev)->port_id].io)
|
||||||
|
|
||||||
extern struct virtio_hw_internal virtio_hw_internal[128];
|
extern struct vtpci_internal virtio_hw_internal[128];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* How many bits to shift physical queue address written to QUEUE_PFN.
|
* How many bits to shift physical queue address written to QUEUE_PFN.
|
||||||
@ -256,29 +247,29 @@ extern struct virtio_hw_internal virtio_hw_internal[128];
|
|||||||
#define VIRTIO_PCI_VRING_ALIGN 4096
|
#define VIRTIO_PCI_VRING_ALIGN 4096
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
|
vtpci_with_feature(struct virtio_dev *dev, uint64_t bit)
|
||||||
{
|
{
|
||||||
return (hw->guest_features & (1ULL << bit)) != 0;
|
return (dev->guest_features & (1ULL << bit)) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Function declaration from virtio_pci.c
|
* Function declaration from virtio_pci.c
|
||||||
*/
|
*/
|
||||||
int vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw);
|
int vtpci_init(struct rte_pci_device *dev, struct virtio_dev *vdev);
|
||||||
void vtpci_reset(struct virtio_hw *);
|
void vtpci_reset(struct virtio_dev *);
|
||||||
|
|
||||||
void vtpci_reinit_complete(struct virtio_hw *);
|
void vtpci_reinit_complete(struct virtio_dev *);
|
||||||
|
|
||||||
uint8_t vtpci_get_status(struct virtio_hw *);
|
uint8_t vtpci_get_status(struct virtio_dev *);
|
||||||
void vtpci_set_status(struct virtio_hw *, uint8_t);
|
void vtpci_set_status(struct virtio_dev *, uint8_t);
|
||||||
|
|
||||||
uint64_t vtpci_negotiate_features(struct virtio_hw *, uint64_t);
|
uint64_t vtpci_negotiate_features(struct virtio_dev *, uint64_t);
|
||||||
|
|
||||||
void vtpci_write_dev_config(struct virtio_hw *, size_t, const void *, int);
|
void vtpci_write_dev_config(struct virtio_dev *, size_t, const void *, int);
|
||||||
|
|
||||||
void vtpci_read_dev_config(struct virtio_hw *, size_t, void *, int);
|
void vtpci_read_dev_config(struct virtio_dev *, size_t, void *, int);
|
||||||
|
|
||||||
uint8_t vtpci_isr(struct virtio_hw *);
|
uint8_t vtpci_isr(struct virtio_dev *);
|
||||||
|
|
||||||
extern const struct virtio_pci_ops virtio_user_ops;
|
extern const struct virtio_pci_ops virtio_user_ops;
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct virtio_req **rx_pkts,
|
|||||||
static inline void
|
static inline void
|
||||||
virtqueue_iov_to_desc(struct virtqueue *vq, uint16_t desc_idx, struct iovec *iov)
|
virtqueue_iov_to_desc(struct virtqueue *vq, uint16_t desc_idx, struct iovec *iov)
|
||||||
{
|
{
|
||||||
if (vq->hw->virtio_user_dev) {
|
if (!vq->vdev->is_hw) {
|
||||||
vq->vq_ring.desc[desc_idx].addr = (uintptr_t)iov->iov_base;
|
vq->vq_ring.desc[desc_idx].addr = (uintptr_t)iov->iov_base;
|
||||||
} else {
|
} else {
|
||||||
vq->vq_ring.desc[desc_idx].addr = spdk_vtophys(iov->iov_base);
|
vq->vq_ring.desc[desc_idx].addr = spdk_vtophys(iov->iov_base);
|
||||||
@ -193,7 +193,7 @@ virtqueue_enqueue_xmit(struct virtqueue *vq, struct virtio_req *req)
|
|||||||
uint16_t
|
uint16_t
|
||||||
virtio_recv_pkts(struct virtqueue *vq, struct virtio_req **reqs, uint16_t nb_pkts)
|
virtio_recv_pkts(struct virtqueue *vq, struct virtio_req **reqs, uint16_t nb_pkts)
|
||||||
{
|
{
|
||||||
struct virtio_hw *hw = vq->hw;
|
struct virtio_dev *vdev = vq->vdev;
|
||||||
struct virtio_req *rxm;
|
struct virtio_req *rxm;
|
||||||
uint16_t nb_used, num, nb_rx;
|
uint16_t nb_used, num, nb_rx;
|
||||||
uint32_t len[VIRTIO_MBUF_BURST_SZ];
|
uint32_t len[VIRTIO_MBUF_BURST_SZ];
|
||||||
@ -201,7 +201,7 @@ virtio_recv_pkts(struct virtqueue *vq, struct virtio_req **reqs, uint16_t nb_pkt
|
|||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
nb_rx = 0;
|
nb_rx = 0;
|
||||||
if (unlikely(hw->started == 0))
|
if (unlikely(vdev->started == 0))
|
||||||
return nb_rx;
|
return nb_rx;
|
||||||
|
|
||||||
nb_used = VIRTQUEUE_NUSED(vq);
|
nb_used = VIRTQUEUE_NUSED(vq);
|
||||||
@ -232,9 +232,9 @@ virtio_recv_pkts(struct virtqueue *vq, struct virtio_req **reqs, uint16_t nb_pkt
|
|||||||
uint16_t
|
uint16_t
|
||||||
virtio_xmit_pkts(struct virtqueue *vq, struct virtio_req *req)
|
virtio_xmit_pkts(struct virtqueue *vq, struct virtio_req *req)
|
||||||
{
|
{
|
||||||
struct virtio_hw *hw = vq->hw;
|
struct virtio_dev *vdev = vq->vdev;
|
||||||
|
|
||||||
if (unlikely(hw->started == 0))
|
if (unlikely(vdev->started == 0))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
virtio_rmb();
|
virtio_rmb();
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
#include <rte_memzone.h>
|
#include <rte_memzone.h>
|
||||||
#include <rte_mempool.h>
|
#include <rte_mempool.h>
|
||||||
|
|
||||||
#include "virtio_pci.h"
|
#include "virtio_dev.h"
|
||||||
#include "virtio_logs.h"
|
#include "virtio_logs.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -72,7 +72,7 @@ struct vq_desc_extra {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct virtqueue {
|
struct virtqueue {
|
||||||
struct virtio_hw *hw; /**< virtio_hw structure pointer. */
|
struct virtio_dev *vdev; /**< owner of this virtqueue */
|
||||||
struct vring vq_ring; /**< vring keeping desc, used and avail */
|
struct vring vq_ring; /**< vring keeping desc, used and avail */
|
||||||
/**
|
/**
|
||||||
* Last consumed descriptor in the used table,
|
* Last consumed descriptor in the used table,
|
||||||
@ -175,7 +175,7 @@ virtqueue_notify(struct virtqueue *vq)
|
|||||||
* For virtio on IA, the notificaiton is through io port operation
|
* For virtio on IA, the notificaiton is through io port operation
|
||||||
* which is a serialization instruction itself.
|
* which is a serialization instruction itself.
|
||||||
*/
|
*/
|
||||||
VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
|
VTPCI_OPS(vq->vdev)->notify_queue(vq->vdev, vq);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _VIRTQUEUE_H_ */
|
#endif /* _VIRTQUEUE_H_ */
|
||||||
|
@ -50,74 +50,74 @@
|
|||||||
#include "virtio_queue.h"
|
#include "virtio_queue.h"
|
||||||
#include "virtio_user/virtio_user_dev.h"
|
#include "virtio_user/virtio_user_dev.h"
|
||||||
|
|
||||||
#define virtio_user_get_dev(hw) \
|
#define virtio_dev_get_user_dev(dev) \
|
||||||
((struct virtio_user_dev *)(hw)->virtio_user_dev)
|
((struct virtio_user_dev *)((uintptr_t)(dev) - offsetof(struct virtio_user_dev, vdev)))
|
||||||
|
|
||||||
static void
|
static void
|
||||||
virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
|
virtio_user_read_dev_config(struct virtio_dev *vdev, size_t offset,
|
||||||
void *dst, int length)
|
void *dst, int length)
|
||||||
{
|
{
|
||||||
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
|
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
|
||||||
|
|
||||||
if (offset == offsetof(struct virtio_scsi_config, num_queues))
|
if (offset == offsetof(struct virtio_scsi_config, num_queues))
|
||||||
*(uint16_t *)dst = dev->max_queues;
|
*(uint16_t *)dst = dev->max_queues;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
|
virtio_user_write_dev_config(struct virtio_dev *vdev, size_t offset,
|
||||||
const void *src, int length)
|
const void *src, int length)
|
||||||
{
|
{
|
||||||
PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d", offset, length);
|
PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d", offset, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
virtio_user_reset(struct virtio_hw *hw)
|
virtio_user_reset(struct virtio_dev *vdev)
|
||||||
{
|
{
|
||||||
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
|
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
|
||||||
|
|
||||||
if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
|
if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
|
||||||
virtio_user_stop_device(dev);
|
virtio_user_stop_device(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
|
virtio_user_set_status(struct virtio_dev *vdev, uint8_t status)
|
||||||
{
|
{
|
||||||
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
|
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
|
||||||
|
|
||||||
if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
|
if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
|
||||||
virtio_user_start_device(dev);
|
virtio_user_start_device(dev);
|
||||||
else if (status == VIRTIO_CONFIG_STATUS_RESET)
|
else if (status == VIRTIO_CONFIG_STATUS_RESET)
|
||||||
virtio_user_reset(hw);
|
virtio_user_reset(vdev);
|
||||||
dev->status = status;
|
dev->status = status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint8_t
|
static uint8_t
|
||||||
virtio_user_get_status(struct virtio_hw *hw)
|
virtio_user_get_status(struct virtio_dev *vdev)
|
||||||
{
|
{
|
||||||
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
|
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
|
||||||
|
|
||||||
return dev->status;
|
return dev->status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t
|
static uint64_t
|
||||||
virtio_user_get_features(struct virtio_hw *hw)
|
virtio_user_get_features(struct virtio_dev *vdev)
|
||||||
{
|
{
|
||||||
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
|
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
|
||||||
|
|
||||||
/* unmask feature bits defined in vhost user protocol */
|
/* unmask feature bits defined in vhost user protocol */
|
||||||
return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES;
|
return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
|
virtio_user_set_features(struct virtio_dev *vdev, uint64_t features)
|
||||||
{
|
{
|
||||||
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
|
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
|
||||||
|
|
||||||
dev->features = features & dev->device_features;
|
dev->features = features & dev->device_features;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint8_t
|
static uint8_t
|
||||||
virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
|
virtio_user_get_isr(struct virtio_dev *vdev __rte_unused)
|
||||||
{
|
{
|
||||||
/* rxq interrupts and config interrupt are separated in virtio-user,
|
/* rxq interrupts and config interrupt are separated in virtio-user,
|
||||||
* here we only report config change.
|
* here we only report config change.
|
||||||
@ -126,14 +126,14 @@ virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static uint16_t
|
static uint16_t
|
||||||
virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused,
|
virtio_user_set_config_irq(struct virtio_dev *vdev __rte_unused,
|
||||||
uint16_t vec __rte_unused)
|
uint16_t vec __rte_unused)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint16_t
|
static uint16_t
|
||||||
virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused,
|
virtio_user_set_queue_irq(struct virtio_dev *vdev __rte_unused,
|
||||||
struct virtqueue *vq __rte_unused,
|
struct virtqueue *vq __rte_unused,
|
||||||
uint16_t vec)
|
uint16_t vec)
|
||||||
{
|
{
|
||||||
@ -146,18 +146,18 @@ virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused,
|
|||||||
* max supported queues.
|
* max supported queues.
|
||||||
*/
|
*/
|
||||||
static uint16_t
|
static uint16_t
|
||||||
virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
|
virtio_user_get_queue_num(struct virtio_dev *vdev, uint16_t queue_id __rte_unused)
|
||||||
{
|
{
|
||||||
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
|
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
|
||||||
|
|
||||||
/* Currently, each queue has same queue size */
|
/* Currently, each queue has same queue size */
|
||||||
return dev->queue_size;
|
return dev->queue_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
virtio_user_setup_queue(struct virtio_dev *vdev, struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
|
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
|
||||||
uint16_t queue_idx = vq->vq_queue_index;
|
uint16_t queue_idx = vq->vq_queue_index;
|
||||||
uint64_t desc_addr, avail_addr, used_addr;
|
uint64_t desc_addr, avail_addr, used_addr;
|
||||||
|
|
||||||
@ -176,7 +176,7 @@ virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
virtio_user_del_queue(struct virtio_dev *vdev, struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
/* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
|
/* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
|
||||||
* correspondingly stops the ioeventfds, and reset the status of
|
* correspondingly stops the ioeventfds, and reset the status of
|
||||||
@ -187,17 +187,17 @@ virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
|||||||
* Here we just care about what information to deliver to vhost-user
|
* Here we just care about what information to deliver to vhost-user
|
||||||
* or vhost-kernel. So we just close ioeventfd for now.
|
* or vhost-kernel. So we just close ioeventfd for now.
|
||||||
*/
|
*/
|
||||||
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
|
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
|
||||||
|
|
||||||
close(dev->callfds[vq->vq_queue_index]);
|
close(dev->callfds[vq->vq_queue_index]);
|
||||||
close(dev->kickfds[vq->vq_queue_index]);
|
close(dev->kickfds[vq->vq_queue_index]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
|
virtio_user_notify_queue(struct virtio_dev *vdev, struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
uint64_t buf = 1;
|
uint64_t buf = 1;
|
||||||
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
|
struct virtio_user_dev *dev = virtio_dev_get_user_dev(vdev);
|
||||||
|
|
||||||
if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
|
if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
|
||||||
PMD_DRV_LOG(ERR, "failed to kick backend: %s",
|
PMD_DRV_LOG(ERR, "failed to kick backend: %s",
|
||||||
|
@ -233,18 +233,18 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
|
|||||||
(1ULL << VIRTIO_SCSI_F_INOUT | \
|
(1ULL << VIRTIO_SCSI_F_INOUT | \
|
||||||
1ULL << VIRTIO_F_VERSION_1)
|
1ULL << VIRTIO_F_VERSION_1)
|
||||||
|
|
||||||
struct virtio_hw *
|
struct virtio_dev *
|
||||||
virtio_user_dev_init(char *path, int queues, int queue_size)
|
virtio_user_dev_init(char *path, int queues, int queue_size)
|
||||||
{
|
{
|
||||||
struct virtio_hw *hw;
|
struct virtio_dev *vdev;
|
||||||
struct virtio_user_dev *dev;
|
struct virtio_user_dev *dev;
|
||||||
uint64_t max_queues;
|
uint64_t max_queues;
|
||||||
|
|
||||||
hw = calloc(1, sizeof(*hw));
|
dev = calloc(1, sizeof(*dev));
|
||||||
dev = calloc(1, sizeof(struct virtio_user_dev));
|
vdev = &dev->vdev;
|
||||||
hw->virtio_user_dev = dev;
|
vdev->is_hw = 0;
|
||||||
|
|
||||||
virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops;
|
virtio_hw_internal[0].vtpci_ops = &virtio_user_ops;
|
||||||
|
|
||||||
snprintf(dev->path, PATH_MAX, "%s", path);
|
snprintf(dev->path, PATH_MAX, "%s", path);
|
||||||
/* Account for control and event queue. */
|
/* Account for control and event queue. */
|
||||||
@ -279,10 +279,9 @@ virtio_user_dev_init(char *path, int queues, int queue_size)
|
|||||||
|
|
||||||
dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
|
dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
|
||||||
|
|
||||||
return hw;
|
return vdev;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
free(hw);
|
|
||||||
free(dev);
|
free(dev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -37,12 +37,15 @@
|
|||||||
#include <linux/virtio_ring.h>
|
#include <linux/virtio_ring.h>
|
||||||
|
|
||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
#include "../virtio_pci.h"
|
|
||||||
#include "vhost.h"
|
#include "vhost.h"
|
||||||
|
|
||||||
|
#include "../virtio_dev.h"
|
||||||
|
|
||||||
#define VIRTIO_MAX_VIRTQUEUES 0x100
|
#define VIRTIO_MAX_VIRTQUEUES 0x100
|
||||||
|
|
||||||
struct virtio_user_dev {
|
struct virtio_user_dev {
|
||||||
|
struct virtio_dev vdev;
|
||||||
|
|
||||||
/* for vhost_user backend */
|
/* for vhost_user backend */
|
||||||
int vhostfd;
|
int vhostfd;
|
||||||
|
|
||||||
@ -71,7 +74,7 @@ struct virtio_user_dev {
|
|||||||
int is_vhost_user_by_type(const char *path);
|
int is_vhost_user_by_type(const char *path);
|
||||||
int virtio_user_start_device(struct virtio_user_dev *dev);
|
int virtio_user_start_device(struct virtio_user_dev *dev);
|
||||||
int virtio_user_stop_device(struct virtio_user_dev *dev);
|
int virtio_user_stop_device(struct virtio_user_dev *dev);
|
||||||
struct virtio_hw *virtio_user_dev_init(char *path, int queues, int queue_size);
|
struct virtio_dev *virtio_user_dev_init(char *path, int queues, int queue_size);
|
||||||
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
|
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
|
||||||
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
|
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user