diff --git a/include/spdk_internal/virtio.h b/include/spdk_internal/virtio.h index 3517fb95a..041472319 100644 --- a/include/spdk_internal/virtio.h +++ b/include/spdk_internal/virtio.h @@ -193,16 +193,18 @@ typedef int (*virtio_pci_create_cb)(struct virtio_pci_ctx *pci_ctx, void *ctx); uint16_t virtio_recv_pkts(struct virtqueue *vq, void **io, uint32_t *len, uint16_t io_cnt); /** - * Start a new request on the current vring head position. The request will - * be bound to given opaque cookie object. All previous requests will be - * still kept in a ring until they are flushed or the request is aborted. - * If a previous request is empty (no descriptors have been added) this call - * will overwrite it. The device owning given virtqueue must be started. + * Start a new request on the current vring head position and associate it + * with an opaque cookie object. The previous request in given vq will be + * made visible to the device in hopes it can be processed early, but there's + * no guarantee it will be until the device is notified with \c + * virtqueue_req_flush. This behavior is simply an optimization and virtqueues + * must always be flushed. Empty requests (with no descriptors added) will be + * ignored. The device owning given virtqueue must be started. * * \param vq virtio queue - * \param cookie opaque object to bind with this request. Once the request + * \param cookie opaque object to associate with this request. Once the request * is sent, processed and a response is received, the same object will be - * returned to the user calling the virtio poll API. + * returned to the user after calling the virtio poll API. * \param iovcnt number of required iovectors for the request. This can be * higher than than the actual number of iovectors to be added. * \return 0 on success or negative errno otherwise. If the `iovcnt` is @@ -212,9 +214,8 @@ uint16_t virtio_recv_pkts(struct virtqueue *vq, void **io, uint32_t *len, uint16 int virtqueue_req_start(struct virtqueue *vq, void *cookie, int iovcnt); /** - * Flush a virtqueue. This will make the host device see and process all - * previously queued requests. An interrupt might be automatically sent if - * the host device expects it. The device owning given virtqueue must be started. + * Flush a virtqueue. This will notify the device if it's required. + * The device owning given virtqueue must be started. * * \param vq virtio queue */ diff --git a/lib/virtio/virtio.c b/lib/virtio/virtio.c index 987045b72..60e2a018a 100644 --- a/lib/virtio/virtio.c +++ b/lib/virtio/virtio.c @@ -415,19 +415,41 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, void **rx_pkts, return i; } +static void +finish_req(struct virtqueue *vq) +{ + struct vring_desc *desc; + uint16_t avail_idx; + + desc = &vq->vq_ring.desc[vq->req_end]; + desc->flags &= ~VRING_DESC_F_NEXT; + + /* + * Place the head of the descriptor chain into the next slot and make + * it usable to the host. The chain is made available now rather than + * deferring to virtqueue_req_flush() in the hopes that if the host is + * currently running on another CPU, we can keep it processing the new + * descriptor. + */ + avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); + vq->vq_ring.avail->ring[avail_idx] = vq->req_start; + vq->vq_avail_idx++; + vq->req_end = VQ_RING_DESC_CHAIN_END; + virtio_wmb(); + vq->vq_ring.avail->idx = vq->vq_avail_idx; +} + int virtqueue_req_start(struct virtqueue *vq, void *cookie, int iovcnt) { - struct vring_desc *desc; struct vq_desc_extra *dxp; if (iovcnt > vq->vq_free_cnt) { return iovcnt > vq->vq_nentries ? -EINVAL : -ENOMEM; } - if (vq->req_start != VQ_RING_DESC_CHAIN_END) { - desc = &vq->vq_ring.desc[vq->req_end]; - desc->flags &= ~VRING_DESC_F_NEXT; + if (vq->req_end != VQ_RING_DESC_CHAIN_END) { + finish_req(vq); } vq->req_start = vq->vq_desc_head_idx; @@ -441,34 +463,14 @@ virtqueue_req_start(struct virtqueue *vq, void *cookie, int iovcnt) void virtqueue_req_flush(struct virtqueue *vq) { - struct vring_desc *desc; - uint16_t avail_idx; - - if (vq->req_start == VQ_RING_DESC_CHAIN_END) { - /* no requests have been started */ + if (vq->req_end == VQ_RING_DESC_CHAIN_END) { + /* no non-empty requests have been started */ return; } - desc = &vq->vq_ring.desc[vq->req_end]; - desc->flags &= ~VRING_DESC_F_NEXT; - - /* - * Place the head of the descriptor chain into the next slot and make - * it usable to the host. The chain is made available now rather than - * deferring to virtqueue_notify() in the hopes that if the host is - * currently running on another CPU, we can keep it processing the new - * descriptor. - */ - avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); - vq->vq_ring.avail->ring[avail_idx] = vq->req_start; - - vq->vq_avail_idx++; - vq->req_start = VQ_RING_DESC_CHAIN_END; - - virtio_wmb(); - vq->vq_ring.avail->idx = vq->vq_avail_idx; - + finish_req(vq); virtio_mb(); + if (spdk_unlikely(!(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY))) { virtio_dev_backend_ops(vq->vdev)->notify_queue(vq->vdev, vq); SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_DEV, "Notified backend after xmit\n");