lib/nvme: rework cuse admin command

For now only controller to host transmition is implemented
for CUSE.

This patch separates cuse_nvme_admin_cmd_send() entry point
to be used to implement another transmition directions.


Change-Id: Ic9013a30c16cf71957c8b411ee00a43c7aa8bbb6
Signed-off-by: Tomasz Kulasek <tomaszx.kulasek@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/1674
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
This commit is contained in:
Tomasz Kulasek 2020-04-02 17:24:44 +02:00 committed by Tomasz Zawadzki
parent 8365e15666
commit 8da21f69d3

View File

@ -128,6 +128,53 @@ cuse_nvme_admin_cmd_execute(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, void *
}
}
static void
cuse_nvme_admin_cmd_send(fuse_req_t req, struct nvme_admin_cmd *admin_cmd)
{
struct cuse_io_ctx *ctx;
struct cuse_device *cuse_device = fuse_req_userdata(req);
int rv;
ctx = (struct cuse_io_ctx *)calloc(1, sizeof(struct cuse_io_ctx));
if (!ctx) {
SPDK_ERRLOG("Cannot allocate memory for cuse_io_ctx\n");
fuse_reply_err(req, ENOMEM);
return;
}
ctx->req = req;
memset(&ctx->nvme_cmd, 0, sizeof(ctx->nvme_cmd));
ctx->nvme_cmd.opc = admin_cmd->opcode;
ctx->nvme_cmd.nsid = admin_cmd->nsid;
ctx->nvme_cmd.cdw10 = admin_cmd->cdw10;
ctx->nvme_cmd.cdw11 = admin_cmd->cdw11;
ctx->nvme_cmd.cdw12 = admin_cmd->cdw12;
ctx->nvme_cmd.cdw13 = admin_cmd->cdw13;
ctx->nvme_cmd.cdw14 = admin_cmd->cdw14;
ctx->nvme_cmd.cdw15 = admin_cmd->cdw15;
ctx->data_len = admin_cmd->data_len;
if (ctx->data_len > 0) {
ctx->data = spdk_malloc(ctx->data_len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->data) {
SPDK_ERRLOG("Cannot allocate memory for data\n");
fuse_reply_err(req, ENOMEM);
free(ctx);
return;
}
}
rv = nvme_io_msg_send(cuse_device->ctrlr, 0, cuse_nvme_admin_cmd_execute, ctx);
if (rv) {
SPDK_ERRLOG("Cannot send io msg to the controller\n");
fuse_reply_err(req, -rv);
cuse_io_ctx_free(ctx);
return;
}
}
static void
cuse_nvme_admin_cmd(fuse_req_t req, int cmd, void *arg,
struct fuse_file_info *fi, unsigned flags,
@ -135,9 +182,6 @@ cuse_nvme_admin_cmd(fuse_req_t req, int cmd, void *arg,
{
struct nvme_admin_cmd *admin_cmd;
struct iovec in_iov, out_iov[2];
struct cuse_io_ctx *ctx;
int rv;
struct cuse_device *cuse_device = fuse_req_userdata(req);
in_iov.iov_base = (void *)arg;
in_iov.iov_len = sizeof(*admin_cmd);
@ -171,49 +215,13 @@ cuse_nvme_admin_cmd(fuse_req_t req, int cmd, void *arg,
return;
}
ctx = (struct cuse_io_ctx *)calloc(1, sizeof(struct cuse_io_ctx));
if (!ctx) {
SPDK_ERRLOG("Cannot allocate memory for cuse_io_ctx\n");
fuse_reply_err(req, ENOMEM);
cuse_nvme_admin_cmd_send(req, admin_cmd);
return;
}
ctx->req = req;
memset(&ctx->nvme_cmd, 0, sizeof(ctx->nvme_cmd));
ctx->nvme_cmd.opc = admin_cmd->opcode;
ctx->nvme_cmd.nsid = admin_cmd->nsid;
ctx->nvme_cmd.cdw10 = admin_cmd->cdw10;
ctx->nvme_cmd.cdw11 = admin_cmd->cdw11;
ctx->nvme_cmd.cdw12 = admin_cmd->cdw12;
ctx->nvme_cmd.cdw13 = admin_cmd->cdw13;
ctx->nvme_cmd.cdw14 = admin_cmd->cdw14;
ctx->nvme_cmd.cdw15 = admin_cmd->cdw15;
ctx->data_len = admin_cmd->data_len;
if (ctx->data_len > 0) {
ctx->data = spdk_malloc(ctx->data_len, 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
if (!ctx->data) {
SPDK_ERRLOG("Cannot allocate memory for data\n");
fuse_reply_err(req, ENOMEM);
free(ctx);
return;
}
}
break;
case SPDK_NVME_DATA_BIDIRECTIONAL:
fuse_reply_err(req, EINVAL);
return;
}
rv = nvme_io_msg_send(cuse_device->ctrlr, 0, cuse_nvme_admin_cmd_execute, ctx);
if (rv) {
SPDK_ERRLOG("Cannot send io msg to the controller\n");
fuse_reply_err(req, -rv);
cuse_io_ctx_free(ctx);
return;
}
}
static void