From 2cd9f3959d14b2537bb61f7e59009ac52969eeb8 Mon Sep 17 00:00:00 2001 From: Changpeng Liu Date: Wed, 25 Apr 2018 05:25:48 -0400 Subject: [PATCH] vhost/nvme: move completion irq signal into IO poller context Previously after IO is finished, we will put completion entry and irq event notice at the same IO completion callback, for performance consideration, move the irq event routine into IO poller context, this can be used to implement interrupt coalescing feature in future. Change-Id: Ic20b50af47b73ffcb91938802e18b316c07a4d11 Signed-off-by: Changpeng Liu Reviewed-on: https://review.gerrithub.io/408943 Tested-by: SPDK Automated Test System Reviewed-by: Daniel Verkamp Reviewed-by: Pawel Wodkowski Reviewed-by: Jim Harris --- lib/vhost/vhost_nvme.c | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/lib/vhost/vhost_nvme.c b/lib/vhost/vhost_nvme.c index 9681121b6..dd3447298 100644 --- a/lib/vhost/vhost_nvme.c +++ b/lib/vhost/vhost_nvme.c @@ -72,6 +72,7 @@ struct spdk_vhost_nvme_cq { volatile struct spdk_nvme_cpl *cq_cqe; uint16_t cq_head; uint16_t guest_signaled_cq_head; + uint32_t need_signaled_cnt; STAILQ_HEAD(, spdk_vhost_nvme_task) cq_full_waited_tasks; bool irq_enabled; int virq; @@ -284,6 +285,28 @@ spdk_nvme_map_prps(struct spdk_vhost_nvme_dev *nvme, struct spdk_nvme_cmd *cmd, return 0; } +static void +spdk_nvme_cq_signal_fd(struct spdk_vhost_nvme_dev *nvme) +{ + struct spdk_vhost_nvme_cq *cq; + uint32_t qid, cq_head; + + assert(nvme != NULL); + + for (qid = 1; qid <= MAX_IO_QUEUES; qid++) { + cq = spdk_vhost_nvme_get_cq_from_qid(nvme, qid); + if (!cq || !cq->valid) { + continue; + } + + cq_head = nvme->dbbuf_dbs[cq_offset(qid, 1)]; + if (cq->irq_enabled && cq->need_signaled_cnt && (cq->cq_head != cq_head)) { + eventfd_write(cq->virq, (eventfd_t)1); + cq->need_signaled_cnt = 0; + } + } +} + static void blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) { @@ -338,14 +361,11 @@ blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg cq->cq_cqe[cq->cq_head].status.p = cq->phase; nvme_inc_cq_head(cq); + cq->need_signaled_cnt++; /* MMIO Controll */ nvme->dbbuf_eis[cq_offset(cqid, 1)] = (uint32_t)(cq->guest_signaled_cq_head - 1); - if (cq->irq_enabled && (cq->cq_head != cq->guest_signaled_cq_head)) { - eventfd_write(cq->virq, (eventfd_t)1); - } - STAILQ_INSERT_TAIL(&nvme->free_tasks, task, stailq); } @@ -570,6 +590,9 @@ nvme_worker(void *arg) } } + /* Completion Queue */ + spdk_nvme_cq_signal_fd(nvme); + return count; } @@ -716,6 +739,7 @@ vhost_nvme_create_io_cq(struct spdk_vhost_nvme_dev *nvme, cq->virq = -1; cq->cq_head = 0; cq->guest_signaled_cq_head = 0; + cq->need_signaled_cnt = 0; requested_len = sizeof(struct spdk_nvme_cpl) * cq->size; cq->cq_cqe = spdk_vhost_gpa_to_vva(&nvme->vdev, dma_addr, requested_len); if (!cq->cq_cqe) {