nvmf/vfio-user: clarify CQ in handle_queue_connect_rsp()

Clarify via a variable name that we're dealing with the admin CQ
specifically.

Signed-off-by: John Levon <john.levon@nutanix.com>
Change-Id: I032f6b27e2d75bffb9d95481f177ce0c3655550c
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12556
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Thanos Makatos <thanos.makatos@nutanix.com>
This commit is contained in:
John Levon 2022-05-05 23:34:41 +01:00 committed by Jim Harris
parent 10ba934845
commit 0849aadceb

View File

@ -4566,7 +4566,7 @@ handle_queue_connect_rsp(struct nvmf_vfio_user_req *req, void *cb_arg)
{ {
struct nvmf_vfio_user_poll_group *vu_group; struct nvmf_vfio_user_poll_group *vu_group;
struct nvmf_vfio_user_sq *sq = cb_arg; struct nvmf_vfio_user_sq *sq = cb_arg;
struct nvmf_vfio_user_cq *cq; struct nvmf_vfio_user_cq *admin_cq;
struct nvmf_vfio_user_ctrlr *vu_ctrlr; struct nvmf_vfio_user_ctrlr *vu_ctrlr;
struct nvmf_vfio_user_endpoint *endpoint; struct nvmf_vfio_user_endpoint *endpoint;
@ -4588,8 +4588,8 @@ handle_queue_connect_rsp(struct nvmf_vfio_user_req *req, void *cb_arg)
vu_group = SPDK_CONTAINEROF(sq->group, struct nvmf_vfio_user_poll_group, group); vu_group = SPDK_CONTAINEROF(sq->group, struct nvmf_vfio_user_poll_group, group);
TAILQ_INSERT_TAIL(&vu_group->sqs, sq, link); TAILQ_INSERT_TAIL(&vu_group->sqs, sq, link);
cq = vu_ctrlr->cqs[0]; admin_cq = vu_ctrlr->cqs[0];
assert(cq != NULL); assert(admin_cq != NULL);
pthread_mutex_lock(&endpoint->lock); pthread_mutex_lock(&endpoint->lock);
if (nvmf_qpair_is_admin_queue(&sq->qpair)) { if (nvmf_qpair_is_admin_queue(&sq->qpair)) {
@ -4598,7 +4598,7 @@ handle_queue_connect_rsp(struct nvmf_vfio_user_req *req, void *cb_arg)
vu_ctrlr->ctrlr = sq->qpair.ctrlr; vu_ctrlr->ctrlr = sq->qpair.ctrlr;
vu_ctrlr->state = VFIO_USER_CTRLR_RUNNING; vu_ctrlr->state = VFIO_USER_CTRLR_RUNNING;
cq->thread = spdk_get_thread(); admin_cq->thread = spdk_get_thread();
if (in_interrupt_mode(endpoint->transport)) { if (in_interrupt_mode(endpoint->transport)) {
vu_ctrlr->vfu_ctx_poller = SPDK_POLLER_REGISTER(vfio_user_poll_vfu_ctx, vu_ctrlr->vfu_ctx_poller = SPDK_POLLER_REGISTER(vfio_user_poll_vfu_ctx,
@ -4626,8 +4626,8 @@ handle_queue_connect_rsp(struct nvmf_vfio_user_req *req, void *cb_arg)
* been completed. Complete it now. * been completed. Complete it now.
*/ */
if (sq->post_create_io_sq_completion) { if (sq->post_create_io_sq_completion) {
assert(cq->thread != NULL); assert(admin_cq->thread != NULL);
if (cq->thread != spdk_get_thread()) { if (admin_cq->thread != spdk_get_thread()) {
struct vfio_user_post_cpl_ctx *cpl_ctx; struct vfio_user_post_cpl_ctx *cpl_ctx;
cpl_ctx = calloc(1, sizeof(*cpl_ctx)); cpl_ctx = calloc(1, sizeof(*cpl_ctx));
@ -4635,16 +4635,17 @@ handle_queue_connect_rsp(struct nvmf_vfio_user_req *req, void *cb_arg)
return -ENOMEM; return -ENOMEM;
} }
cpl_ctx->ctrlr = vu_ctrlr; cpl_ctx->ctrlr = vu_ctrlr;
cpl_ctx->cq = cq; cpl_ctx->cq = admin_cq;
cpl_ctx->cpl.sqid = 0; cpl_ctx->cpl.sqid = 0;
cpl_ctx->cpl.cdw0 = 0; cpl_ctx->cpl.cdw0 = 0;
cpl_ctx->cpl.cid = sq->create_io_sq_cmd.cid; cpl_ctx->cpl.cid = sq->create_io_sq_cmd.cid;
cpl_ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS; cpl_ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
cpl_ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC; cpl_ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
spdk_thread_send_msg(cq->thread, _post_completion_msg, cpl_ctx); spdk_thread_send_msg(admin_cq->thread, _post_completion_msg,
cpl_ctx);
} else { } else {
post_completion(vu_ctrlr, cq, 0, 0, post_completion(vu_ctrlr, admin_cq, 0, 0,
sq->create_io_sq_cmd.cid, SPDK_NVME_SC_SUCCESS, SPDK_NVME_SCT_GENERIC); sq->create_io_sq_cmd.cid, SPDK_NVME_SC_SUCCESS, SPDK_NVME_SCT_GENERIC);
} }
sq->post_create_io_sq_completion = false; sq->post_create_io_sq_completion = false;