vhost_scsi: support initiators without eventq/controlq

Change-Id: I400edd6d6f71de005b3ccbe9968631a067226035
Signed-off-by: Dariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/408611
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Dariusz Stojaczyk 2018-04-23 13:04:18 +02:00 committed by Daniel Verkamp
parent 55eb9617cf
commit 1d74fea015
7 changed files with 66 additions and 90 deletions

View File

@ -349,34 +349,6 @@ int rte_vhost_get_mtu(int vid, uint16_t *mtu);
*/
int rte_vhost_get_numa_node(int vid);
/**
* @deprecated
* Get the number of queues the device supports.
*
* Note this function is deprecated, as it returns a queue pair number,
* which is vhost specific. Instead, rte_vhost_get_vring_num should
* be used.
*
* @param vid
* vhost device ID
*
* @return
* The number of queues, 0 on failure
*/
__rte_deprecated
uint32_t rte_vhost_get_queue_num(int vid);
/**
* Get the number of vrings the device supports.
*
* @param vid
* vhost device ID
*
* @return
* The number of vrings, 0 on failure
*/
uint16_t rte_vhost_get_vring_num(int vid);
/**
* Get the virtio net device's ifname, which is the vhost-user socket
* file path.

View File

@ -307,28 +307,6 @@ rte_vhost_get_numa_node(int vid)
#endif
}
uint32_t
rte_vhost_get_queue_num(int vid)
{
struct virtio_net *dev = get_device(vid);
if (dev == NULL)
return 0;
return dev->nr_vring / 2;
}
uint16_t
rte_vhost_get_vring_num(int vid)
{
struct virtio_net *dev = get_device(vid);
if (dev == NULL)
return 0;
return dev->nr_vring;
}
int
rte_vhost_get_ifname(int vid, char *buf, size_t len)
{

View File

@ -716,14 +716,15 @@ virtio_is_ready(struct virtio_net *dev)
for (i = 0; i < dev->nr_vring; i++) {
vq = dev->virtqueue[i];
if (!vq_is_ready(vq))
return 0;
}
if (vq_is_ready(vq)) {
RTE_LOG(INFO, VHOST_CONFIG,
"virtio is now ready for processing.\n");
return 1;
}
}
return 0;
}
static void
vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)

View File

@ -275,7 +275,7 @@ check_dev_io_stats(struct spdk_vhost_dev *vdev, uint64_t now)
}
vdev->next_stats_check_time = now + vdev->stats_check_interval;
for (q_idx = 0; q_idx < vdev->num_queues; q_idx++) {
for (q_idx = 0; q_idx < vdev->max_queues; q_idx++) {
virtqueue = &vdev->virtqueue[q_idx];
req_cnt = virtqueue->req_cnt + virtqueue->used_req_cnt;
@ -299,10 +299,11 @@ spdk_vhost_dev_used_signal(struct spdk_vhost_dev *vdev)
uint16_t q_idx;
if (vdev->coalescing_delay_time_base == 0) {
for (q_idx = 0; q_idx < vdev->num_queues; q_idx++) {
for (q_idx = 0; q_idx < vdev->max_queues; q_idx++) {
virtqueue = &vdev->virtqueue[q_idx];
if (virtqueue->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
if (virtqueue->vring.desc == NULL ||
(virtqueue->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
continue;
}
@ -312,7 +313,7 @@ spdk_vhost_dev_used_signal(struct spdk_vhost_dev *vdev)
now = spdk_get_ticks();
check_dev_io_stats(vdev, now);
for (q_idx = 0; q_idx < vdev->num_queues; q_idx++) {
for (q_idx = 0; q_idx < vdev->max_queues; q_idx++) {
virtqueue = &vdev->virtqueue[q_idx];
/* No need for event right now */
@ -987,8 +988,11 @@ stop_device(int vid)
return;
}
for (i = 0; i < vdev->num_queues; i++) {
for (i = 0; i < vdev->max_queues; i++) {
q = &vdev->virtqueue[i].vring;
if (q->desc == NULL) {
continue;
}
rte_vhost_set_vhost_vring_last_idx(vdev->vid, i, q->last_avail_idx, q->last_used_idx);
}
@ -1004,11 +1008,9 @@ start_device(int vid)
{
struct spdk_vhost_dev *vdev;
int rc = -1;
uint16_t num_queues;
uint16_t i;
pthread_mutex_lock(&g_spdk_vhost_mutex);
num_queues = rte_vhost_get_vring_num(vid);
vdev = spdk_vhost_dev_find_by_vid(vid);
if (vdev == NULL) {
@ -1021,22 +1023,16 @@ start_device(int vid)
goto out;
}
if (num_queues > SPDK_VHOST_MAX_VQUEUES) {
SPDK_ERRLOG("vhost device %d: Too many queues (%"PRIu16"). Max %"PRIu16"\n", vid, num_queues,
SPDK_VHOST_MAX_VQUEUES);
goto out;
}
vdev->max_queues = 0;
memset(vdev->virtqueue, 0, sizeof(vdev->virtqueue));
for (i = 0; i < num_queues; i++) {
for (i = 0; i < SPDK_VHOST_MAX_VQUEUES; i++) {
if (rte_vhost_get_vhost_vring(vid, i, &vdev->virtqueue[i].vring)) {
SPDK_ERRLOG("vhost device %d: Failed to get information of queue %"PRIu16"\n", vid, i);
goto out;
continue;
}
if (vdev->virtqueue[i].vring.size == 0) {
SPDK_ERRLOG("vhost device %d: Queue %"PRIu16" has size 0.\n", vid, i);
goto out;
if (vdev->virtqueue[i].vring.desc == NULL ||
vdev->virtqueue[i].vring.size == 0) {
continue;
}
/* Disable notifications. */
@ -1044,9 +1040,9 @@ start_device(int vid)
SPDK_ERRLOG("vhost device %d: Failed to disable guest notification on queue %"PRIu16"\n", vid, i);
goto out;
}
}
vdev->num_queues = num_queues;
vdev->max_queues = i + 1;
}
if (rte_vhost_get_negotiated_features(vid, &vdev->negotiated_features) != 0) {
SPDK_ERRLOG("vhost device %d: Failed to get negotiated driver features\n", vid);
@ -1067,7 +1063,7 @@ start_device(int vid)
*
* Tested on QEMU 2.10.91 and 2.11.50.
*/
for (i = 0; i < num_queues; i++) {
for (i = 0; i < vdev->max_queues; i++) {
if (vdev->virtqueue[i].vring.callfd != -1) {
eventfd_write(vdev->virtqueue[i].vring.callfd, (eventfd_t)1);
}

View File

@ -333,7 +333,7 @@ vdev_worker(void *arg)
struct spdk_vhost_blk_dev *bvdev = arg;
uint16_t q_idx;
for (q_idx = 0; q_idx < bvdev->vdev.num_queues; q_idx++) {
for (q_idx = 0; q_idx < bvdev->vdev.max_queues; q_idx++) {
process_vq(bvdev, &bvdev->vdev.virtqueue[q_idx]);
}
@ -368,7 +368,7 @@ no_bdev_vdev_worker(void *arg)
struct spdk_vhost_blk_dev *bvdev = arg;
uint16_t q_idx;
for (q_idx = 0; q_idx < bvdev->vdev.num_queues; q_idx++) {
for (q_idx = 0; q_idx < bvdev->vdev.max_queues; q_idx++) {
no_bdev_process_vq(bvdev, &bvdev->vdev.virtqueue[q_idx]);
}
@ -433,7 +433,7 @@ free_task_pool(struct spdk_vhost_blk_dev *bvdev)
struct spdk_vhost_virtqueue *vq;
uint16_t i;
for (i = 0; i < bvdev->vdev.num_queues; i++) {
for (i = 0; i < bvdev->vdev.max_queues; i++) {
vq = &bvdev->vdev.virtqueue[i];
if (vq->tasks == NULL) {
continue;
@ -453,8 +453,12 @@ alloc_task_pool(struct spdk_vhost_blk_dev *bvdev)
uint16_t i;
uint32_t j;
for (i = 0; i < bvdev->vdev.num_queues; i++) {
for (i = 0; i < bvdev->vdev.max_queues; i++) {
vq = &bvdev->vdev.virtqueue[i];
if (vq->vring.desc == NULL) {
continue;
}
task_cnt = vq->vring.size;
if (task_cnt > SPDK_VHOST_MAX_VQ_SIZE) {
/* sanity check */
@ -492,7 +496,7 @@ static int
spdk_vhost_blk_start(struct spdk_vhost_dev *vdev, void *event_ctx)
{
struct spdk_vhost_blk_dev *bvdev;
int rc = 0;
int i, rc = 0;
bvdev = to_blk_dev(vdev);
if (bvdev == NULL) {
@ -501,6 +505,15 @@ spdk_vhost_blk_start(struct spdk_vhost_dev *vdev, void *event_ctx)
goto out;
}
/* validate all I/O queues are in a contiguous index range */
for (i = 0; i < vdev->max_queues; i++) {
if (vdev->virtqueue[i].vring.desc == NULL) {
SPDK_ERRLOG("%s: queue %"PRIu32" is empty\n", vdev->name, i);
rc = -1;
goto out;
}
}
rc = alloc_task_pool(bvdev);
if (rc != 0) {
SPDK_ERRLOG("%s: failed to alloc task pool.\n", bvdev->vdev.name);
@ -542,7 +555,7 @@ destroy_device_poller_cb(void *arg)
return -1;
}
for (i = 0; i < bvdev->vdev.num_queues; i++) {
for (i = 0; i < bvdev->vdev.max_queues; i++) {
bvdev->vdev.virtqueue[i].next_event_time = 0;
spdk_vhost_vq_used_signal(&bvdev->vdev, &bvdev->vdev.virtqueue[i]);
}

View File

@ -161,7 +161,7 @@ struct spdk_vhost_dev {
/* Interval used for event coalescing checking. */
uint64_t stats_check_interval;
uint16_t num_queues;
uint16_t max_queues;
uint64_t negotiated_features;

View File

@ -670,7 +670,7 @@ vdev_worker(void *arg)
struct spdk_vhost_scsi_dev *svdev = arg;
uint32_t q_idx;
for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.num_queues; q_idx++) {
for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.max_queues; q_idx++) {
process_requestq(svdev, &svdev->vdev.virtqueue[q_idx]);
}
@ -1006,7 +1006,7 @@ free_task_pool(struct spdk_vhost_scsi_dev *svdev)
struct spdk_vhost_virtqueue *vq;
uint16_t i;
for (i = 0; i < svdev->vdev.num_queues; i++) {
for (i = 0; i < svdev->vdev.max_queues; i++) {
vq = &svdev->vdev.virtqueue[i];
if (vq->tasks == NULL) {
continue;
@ -1026,8 +1026,12 @@ alloc_task_pool(struct spdk_vhost_scsi_dev *svdev)
uint16_t i;
uint32_t j;
for (i = 0; i < svdev->vdev.num_queues; i++) {
for (i = 0; i < svdev->vdev.max_queues; i++) {
vq = &svdev->vdev.virtqueue[i];
if (vq->vring.desc == NULL) {
continue;
}
task_cnt = vq->vring.size;
if (task_cnt > SPDK_VHOST_MAX_VQ_SIZE) {
/* sanity check */
@ -1074,6 +1078,15 @@ spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx)
goto out;
}
/* validate all I/O queues are in a contiguous index range */
for (i = VIRTIO_SCSI_REQUESTQ; i < vdev->max_queues; i++) {
if (vdev->virtqueue[i].vring.desc == NULL) {
SPDK_ERRLOG("%s: queue %"PRIu32" is empty\n", vdev->name, i);
rc = -1;
goto out;
}
}
rc = alloc_task_pool(svdev);
if (rc != 0) {
SPDK_ERRLOG("%s: failed to alloc task pool.\n", vdev->name);
@ -1090,8 +1103,11 @@ spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx)
vdev->name, vdev->lcore);
svdev->requestq_poller = spdk_poller_register(vdev_worker, svdev, 0);
if (vdev->virtqueue[VIRTIO_SCSI_CONTROLQ].vring.desc &&
vdev->virtqueue[VIRTIO_SCSI_EVENTQ].vring.desc) {
svdev->mgmt_poller = spdk_poller_register(vdev_mgmt_worker, svdev,
MGMT_POLL_PERIOD_US);
}
out:
spdk_vhost_dev_backend_event_done(event_ctx, rc);
return rc;
@ -1115,7 +1131,7 @@ destroy_device_poller_cb(void *arg)
}
for (i = 0; i < svdev->vdev.num_queues; i++) {
for (i = 0; i < svdev->vdev.max_queues; i++) {
spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[i]);
}