From 94e395631ec1ef4f29a1a5cf73b941837d6b4476 Mon Sep 17 00:00:00 2001 From: Swapnil Ingle Date: Mon, 23 Jan 2023 15:01:54 +0100 Subject: [PATCH] nvmf/vfio_user: move cq_is_full() closer to caller Move cq_is_full() closer to its caller post_completion() and along with fixing comments. Signed-off-by: Swapnil Ingle Change-Id: I93262d1805f0f9f075c6946ed97cd3006ffba130 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16415 Reviewed-by: Jim Harris Tested-by: SPDK CI Jenkins Community-CI: Mellanox Build Bot Reviewed-by: Ben Walker --- lib/nvmf/vfio_user.c | 95 ++++++++++++++++++++++---------------------- 1 file changed, 48 insertions(+), 47 deletions(-) diff --git a/lib/nvmf/vfio_user.c b/lib/nvmf/vfio_user.c index 0ad4bcc0f..e558edadc 100644 --- a/lib/nvmf/vfio_user.c +++ b/lib/nvmf/vfio_user.c @@ -605,53 +605,6 @@ cq_tail_advance(struct nvmf_vfio_user_cq *cq) } } -static uint32_t -cq_free_slots(struct nvmf_vfio_user_cq *cq) -{ - uint32_t free_slots; - - assert(cq != NULL); - - if (cq->tail == cq->last_head) { - free_slots = cq->size; - } else if (cq->tail > cq->last_head) { - free_slots = cq->size - (cq->tail - cq->last_head); - } else { - free_slots = cq->last_head - cq->tail; - } - assert(free_slots > 0); - - return free_slots - 1; -} - -/* - * As per NVMe Base spec 3.3.1.2.1, we are supposed to implement CQ flow - * control: if there is no space in the CQ, we should wait until there is. - * - * In practice, we just fail the controller instead: as it happens, all host - * implementations we care about right-size the CQ: this is required anyway for - * NVMEoF support (see 3.3.2.8). - * - * Since reading the head doorbell is relatively expensive, we use the cached - * value, so we only have to read it for real if it appears that we are full. - */ -static inline bool -cq_is_full(struct nvmf_vfio_user_cq *cq) -{ - uint32_t free_cq_slots; - - assert(cq != NULL); - - free_cq_slots = cq_free_slots(cq); - - if (spdk_unlikely(free_cq_slots == 0)) { - cq->last_head = *cq_dbl_headp(cq); - free_cq_slots = cq_free_slots(cq); - } - - return free_cq_slots == 0; -} - static bool io_q_exists(struct nvmf_vfio_user_ctrlr *vu_ctrlr, const uint16_t qid, const bool is_cq) { @@ -1724,6 +1677,46 @@ vfio_user_map_cmd(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvmf_request * static int handle_cmd_req(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvme_cmd *cmd, struct nvmf_vfio_user_sq *sq); +static uint32_t +cq_free_slots(struct nvmf_vfio_user_cq *cq) +{ + uint32_t free_slots; + + assert(cq != NULL); + + if (cq->tail == cq->last_head) { + free_slots = cq->size; + } else if (cq->tail > cq->last_head) { + free_slots = cq->size - (cq->tail - cq->last_head); + } else { + free_slots = cq->last_head - cq->tail; + } + assert(free_slots > 0); + + return free_slots - 1; +} + +/* + * Since reading the head doorbell is relatively expensive, we use the cached + * value, so we only have to read it for real if it appears that we are full. + */ +static inline bool +cq_is_full(struct nvmf_vfio_user_cq *cq) +{ + uint32_t free_cq_slots; + + assert(cq != NULL); + + free_cq_slots = cq_free_slots(cq); + + if (spdk_unlikely(free_cq_slots == 0)) { + cq->last_head = *cq_dbl_headp(cq); + free_cq_slots = cq_free_slots(cq); + } + + return free_cq_slots == 0; +} + /* * Posts a CQE in the completion queue. * @@ -1753,6 +1746,14 @@ post_completion(struct nvmf_vfio_user_ctrlr *ctrlr, struct nvmf_vfio_user_cq *cq assert(spdk_get_thread() == cq->group->group->thread); } + /* + * As per NVMe Base spec 3.3.1.2.1, we are supposed to implement CQ flow + * control: if there is no space in the CQ, we should wait until there is. + * + * In practice, we just fail the controller instead: as it happens, all host + * implementations we care about right-size the CQ: this is required anyway for + * NVMEoF support (see 3.3.2.8). + */ if (cq_is_full(cq)) { SPDK_ERRLOG("%s: cqid:%d full (tail=%d, head=%d)\n", ctrlr_id(ctrlr), cq->qid, *cq_tailp(cq),