From 49c0d28ab1fc8a8b37226032e15680fa2ba428a9 Mon Sep 17 00:00:00 2001 From: John Levon Date: Sat, 7 Jan 2023 18:31:25 +0000 Subject: [PATCH] nvmf: drop req->data usage in ctrlr.c Refer to req->iov instead of req->data. As the queue connection code already presumes a single data buffer, add some sanity checking for this. We also need to fix vfio_user.c as a result to correctly set ->iovcnt. Signed-off-by: John Levon Change-Id: Ib1e4ef3885200ffc5194f00b4e3fe20ab1934fd7 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16194 Tested-by: SPDK CI Jenkins Reviewed-by: Ben Walker Reviewed-by: Aleksey Marchuk Reviewed-by: Jim Harris --- lib/nvmf/ctrlr.c | 24 ++++++++++++++++++------ lib/nvmf/vfio_user.c | 16 +++++++++------- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/lib/nvmf/ctrlr.c b/lib/nvmf/ctrlr.c index ca1265c67..f7973629d 100644 --- a/lib/nvmf/ctrlr.c +++ b/lib/nvmf/ctrlr.c @@ -601,7 +601,7 @@ _nvmf_ctrlr_add_io_qpair(void *ctx) { struct spdk_nvmf_request *req = ctx; struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; - struct spdk_nvmf_fabric_connect_data *data = req->data; + struct spdk_nvmf_fabric_connect_data *data; struct spdk_nvmf_ctrlr *ctrlr; struct spdk_nvmf_qpair *qpair = req->qpair; struct spdk_nvmf_qpair *admin_qpair; @@ -610,6 +610,10 @@ _nvmf_ctrlr_add_io_qpair(void *ctx) struct spdk_nvme_transport_id listen_trid = {}; const struct spdk_nvmf_subsystem_listener *listener; + assert(req->iovcnt == 1); + + data = req->iov[0].iov_base; + SPDK_DEBUGLOG(nvmf, "Connect I/O Queue for controller id 0x%x\n", data->cntlid); subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn); @@ -693,7 +697,7 @@ nvmf_qpair_access_allowed(struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_subsys static int _nvmf_ctrlr_connect(struct spdk_nvmf_request *req) { - struct spdk_nvmf_fabric_connect_data *data = req->data; + struct spdk_nvmf_fabric_connect_data *data = req->iov[0].iov_base; struct spdk_nvmf_fabric_connect_cmd *cmd = &req->cmd->connect_cmd; struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; struct spdk_nvmf_qpair *qpair = req->qpair; @@ -811,8 +815,9 @@ nvmf_subsystem_pg_from_connect_cmd(struct spdk_nvmf_request *req) assert(nvmf_request_is_fabric_connect(req)); assert(req->qpair->ctrlr == NULL); + assert(req->iovcnt == 1); - data = req->data; + data = req->iov[0].iov_base; tgt = req->qpair->transport->tgt; subsystem = spdk_nvmf_tgt_find_subsystem(tgt, data->subnqn); @@ -831,6 +836,13 @@ spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req) struct spdk_nvmf_qpair *qpair = req->qpair; enum spdk_nvmf_request_exec_status status; + if (req->iovcnt > 1) { + SPDK_ERRLOG("Connect command invalid iovcnt: %d\n", req->iovcnt); + rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; + status = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + goto out; + } + sgroup = nvmf_subsystem_pg_from_connect_cmd(req); if (!sgroup) { SPDK_NVMF_INVALID_CONNECT_DATA(rsp, subnqn); @@ -874,7 +886,7 @@ retry_connect(void *arg) static int nvmf_ctrlr_cmd_connect(struct spdk_nvmf_request *req) { - struct spdk_nvmf_fabric_connect_data *data = req->data; + struct spdk_nvmf_fabric_connect_data *data = req->iov[0].iov_base; struct spdk_nvmf_fabric_connect_rsp *rsp = &req->rsp->connect_rsp; struct spdk_nvmf_transport *transport = req->qpair->transport; struct spdk_nvmf_subsystem *subsystem; @@ -2474,7 +2486,7 @@ nvmf_ctrlr_get_log_page(struct spdk_nvmf_request *req) uint32_t rae, numdl, numdu; uint8_t lid; - if (req->data == NULL) { + if (req->iovcnt < 1) { SPDK_DEBUGLOG(nvmf, "get log command with no buffer\n"); response->status.sct = SPDK_NVME_SCT_GENERIC; response->status.sc = SPDK_NVME_SC_INVALID_FIELD; @@ -3506,7 +3518,7 @@ nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req) return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; } - if (req->data && spdk_nvme_opc_get_data_transfer(cmd->opc) == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { + if (req->iovcnt && spdk_nvme_opc_get_data_transfer(cmd->opc) == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { spdk_iov_memset(req->iov, req->iovcnt, 0); } diff --git a/lib/nvmf/vfio_user.c b/lib/nvmf/vfio_user.c index c6c2b0d40..0317f5c84 100644 --- a/lib/nvmf/vfio_user.c +++ b/lib/nvmf/vfio_user.c @@ -5173,13 +5173,16 @@ nvmf_vfio_user_poll_group_add(struct spdk_nvmf_transport_poll_group *group, req->cmd->connect_cmd.qid = admin ? 0 : qpair->qid; req->length = sizeof(struct spdk_nvmf_fabric_connect_data); - req->data = calloc(1, req->length); - if (req->data == NULL) { + + data = calloc(1, req->length); + if (data == NULL) { nvmf_vfio_user_req_free(req); return -ENOMEM; } - data = (struct spdk_nvmf_fabric_connect_data *)req->data; + spdk_iov_one(req->iov, &req->iovcnt, data, req->length); + req->data = data; + data->cntlid = ctrlr->cntlid; snprintf(data->subnqn, sizeof(data->subnqn), "%s", spdk_nvmf_subsystem_get_nqn(ctrlr->endpoint->subsystem)); @@ -5221,6 +5224,9 @@ _nvmf_vfio_user_req_free(struct nvmf_vfio_user_sq *sq, struct nvmf_vfio_user_req memset(&vu_req->cmd, 0, sizeof(vu_req->cmd)); memset(&vu_req->rsp, 0, sizeof(vu_req->rsp)); vu_req->iovcnt = 0; + vu_req->req.iovcnt = 0; + vu_req->req.data = NULL; + vu_req->req.length = 0; vu_req->state = VFIO_USER_REQUEST_STATE_FREE; TAILQ_INSERT_TAIL(&sq->free_reqs, vu_req, link); @@ -5355,8 +5361,6 @@ map_admin_cmd_req(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvmf_request * int iovcnt; req->xfer = spdk_nvme_opc_get_data_transfer(cmd->opc); - req->length = 0; - req->data = NULL; if (req->xfer == SPDK_NVME_DATA_NONE) { return 0; @@ -5439,8 +5443,6 @@ map_io_cmd_req(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvmf_request *req cmd = &req->cmd->nvme_cmd; req->xfer = spdk_nvme_opc_get_data_transfer(cmd->opc); - req->length = 0; - req->data = NULL; if (spdk_unlikely(req->xfer == SPDK_NVME_DATA_NONE)) { return 0;