From e66ea624124ea336c921731ea6a143e69efb6d61 Mon Sep 17 00:00:00 2001 From: Liu Xiaodong Date: Thu, 5 Jul 2018 03:46:48 -0400 Subject: [PATCH] scripts/rpc.py: add method "send_nvme_cmd" An example to send nvme_cmd rpc request: scripts/rpc.py send_nvme_cmd -n Nvme0 -t admin -r c2h \ -c -D 4096 nvme-rpc will be processed internally by bdev_nvme. Change-Id: I6e731b76be0f503d48154a8b34a1e81b4b454396 Signed-off-by: Liu Xiaodong Reviewed-on: https://review.gerrithub.io/417962 Reviewed-by: Ben Walker Reviewed-by: Jim Harris Tested-by: SPDK CI Jenkins --- doc/jsonrpc.md | 59 +++++ lib/bdev/nvme/Makefile | 2 +- lib/bdev/nvme/bdev_nvme.c | 24 ++ lib/bdev/nvme/bdev_nvme.h | 2 + lib/bdev/nvme/nvme_rpc.c | 487 ++++++++++++++++++++++++++++++++++++++ scripts/rpc.py | 27 +++ scripts/rpc/__init__.py | 1 + scripts/rpc/nvme.py | 39 +++ 8 files changed, 640 insertions(+), 1 deletion(-) create mode 100644 lib/bdev/nvme/nvme_rpc.c create mode 100644 scripts/rpc/nvme.py diff --git a/doc/jsonrpc.md b/doc/jsonrpc.md index 566d8974b..7051ba168 100644 --- a/doc/jsonrpc.md +++ b/doc/jsonrpc.md @@ -4289,3 +4289,62 @@ Example response: "result": true } ~~~ + +## send_nvme_cmd {#rpc_send_nvme_cmd} + +Send NVMe command directly to NVMe controller or namespace. Parameters and responses encoded by base64 urlsafe need further processing. + +Notice: send_nvme_cmd requires user to guarentee the correctness of NVMe command itself, and also optional parameters. Illegal command contents or mismatching buffer size may result in unpredictable behavior. + +### Parameters + +Name | Optional | Type | Description +----------------------- | -------- | ----------- | ----------- +name | Required | string | Name of the operating NVMe controller +cmd_type | Required | string | Type of nvme cmd. Valid values are: admin, io +data_direction | Required | string | Direction of data transfer. Valid values are: c2h, h2c +cmdbuf | Required | string | NVMe command encoded by base64 urlsafe +data | Optional | string | Data transferring to controller from host, encoded by base64 urlsafe +metadata | Optional | string | Metadata transferring to controller from host, encoded by base64 urlsafe +data_len | Optional | number | Data length required to transfer from controller to host +metadata_len | Optional | number | Metadata length required to transfer from controller to host +timeout_ms | Optional | number | Command execution timeout value, in milliseconds + +### Response + +Name | Type | Description +----------------------- | ----------- | ----------- +cpl | string | NVMe completion queue entry, encoded by base64 urlsafe +data | string | Data transferred from controller to host, encoded by base64 urlsafe +metadata | string | Metadata transferred from controller to host, encoded by base64 urlsafe + +### Example + +Example request: +~~~ +{ + "jsonrpc": "2.0", + "method": "send_nvme_cmd", + "id": 1, + "params": { + "name": "Nvme0", + "cmd_type": "admin" + "data_direction": "c2h", + "cmdbuf": "BgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAsGUs9P5_AAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + "data_len": 60, + } +} +~~~ + +Example response: +~~~ +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "cpl": "AAAAAAAAAAARAAAAWrmwABAA==", + "data": "sIjg6AAAAACwiODoAAAAALCI4OgAAAAAAAYAAREAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + } + +} +~~~ diff --git a/lib/bdev/nvme/Makefile b/lib/bdev/nvme/Makefile index fbe2fc054..c5a40c749 100644 --- a/lib/bdev/nvme/Makefile +++ b/lib/bdev/nvme/Makefile @@ -34,7 +34,7 @@ SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..) include $(SPDK_ROOT_DIR)/mk/spdk.common.mk -C_SRCS = bdev_nvme.c bdev_nvme_rpc.c +C_SRCS = bdev_nvme.c bdev_nvme_rpc.c nvme_rpc.c LIBNAME = bdev_nvme include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk diff --git a/lib/bdev/nvme/bdev_nvme.c b/lib/bdev/nvme/bdev_nvme.c index 784e4eef3..2eb8705de 100644 --- a/lib/bdev/nvme/bdev_nvme.c +++ b/lib/bdev/nvme/bdev_nvme.c @@ -132,6 +132,30 @@ static int bdev_nvme_io_passthru_md(struct nvme_bdev *nbdev, struct spdk_io_chan struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len); static int nvme_ctrlr_create_bdev(struct nvme_ctrlr *nvme_ctrlr, uint32_t nsid); +struct spdk_nvme_qpair * +spdk_bdev_nvme_get_io_qpair(struct spdk_io_channel *ctrlr_io_ch) +{ + struct nvme_io_channel *nvme_ch; + + nvme_ch = spdk_io_channel_get_ctx(ctrlr_io_ch); + + return nvme_ch->qpair; +} + +struct nvme_ctrlr * +spdk_bdev_nvme_lookup_ctrlr(const char *ctrlr_name) +{ + struct nvme_ctrlr *_nvme_ctrlr; + + TAILQ_FOREACH(_nvme_ctrlr, &g_nvme_ctrlrs, tailq) { + if (strcmp(ctrlr_name, _nvme_ctrlr->name) == 0) { + return _nvme_ctrlr; + } + } + + return NULL; +} + static int bdev_nvme_get_ctx_size(void) { diff --git a/lib/bdev/nvme/bdev_nvme.h b/lib/bdev/nvme/bdev_nvme.h index 10c73d60f..025d1e591 100644 --- a/lib/bdev/nvme/bdev_nvme.h +++ b/lib/bdev/nvme/bdev_nvme.h @@ -86,6 +86,8 @@ struct nvme_bdev { void spdk_bdev_nvme_dump_trid_json(struct spdk_nvme_transport_id *trid, struct spdk_json_write_ctx *w); +struct spdk_nvme_qpair *spdk_bdev_nvme_get_io_qpair(struct spdk_io_channel *ctrlr_io_ch); +struct nvme_ctrlr *spdk_bdev_nvme_lookup_ctrlr(const char *ctrlr_name); void spdk_bdev_nvme_get_opts(struct spdk_bdev_nvme_opts *opts); int spdk_bdev_nvme_set_opts(const struct spdk_bdev_nvme_opts *opts); int spdk_bdev_nvme_set_hotplug(bool enabled, uint64_t period_us, spdk_thread_fn cb, void *cb_ctx); diff --git a/lib/bdev/nvme/nvme_rpc.c b/lib/bdev/nvme/nvme_rpc.c new file mode 100644 index 000000000..b49a7d42e --- /dev/null +++ b/lib/bdev/nvme/nvme_rpc.c @@ -0,0 +1,487 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spdk/stdinc.h" +#include "spdk/string.h" +#include "spdk/rpc.h" +#include "spdk/util.h" +#include "spdk/bdev_module.h" +#include "spdk_internal/log.h" + +#include "bdev_nvme.h" +#include "spdk/base64.h" + +enum spdk_nvme_rpc_type { + NVME_ADMIN_CMD = 1, + NVME_IO_CMD, +}; + +struct rpc_send_nvme_cmd_req { + char *name; + int cmd_type; + int data_direction; + uint32_t timeout_ms; + uint32_t data_len; + uint32_t md_len; + + struct spdk_nvme_cmd *cmdbuf; + char *data; + char *md; +}; + +struct rpc_send_nvme_cmd_resp { + char *cpl_text; + char *data_text; + char *md_text; +}; + +struct rpc_send_nvme_cmd_ctx { + struct spdk_jsonrpc_request *jsonrpc_request; + struct rpc_send_nvme_cmd_req req; + struct rpc_send_nvme_cmd_resp resp; + struct nvme_ctrlr *nvme_ctrlr; + struct spdk_io_channel *ctrlr_io_ch; +}; + +static void +free_rpc_send_nvme_cmd_ctx(struct rpc_send_nvme_cmd_ctx *ctx) +{ + assert(ctx != NULL); + + free(ctx->req.name); + free(ctx->req.cmdbuf); + spdk_dma_free(ctx->req.data); + spdk_dma_free(ctx->req.md); + free(ctx->resp.cpl_text); + free(ctx->resp.data_text); + free(ctx->resp.md_text); + free(ctx); +} + +static int +rpc_send_nvme_cmd_resp_construct(struct rpc_send_nvme_cmd_resp *resp, + struct rpc_send_nvme_cmd_req *req, + const struct spdk_nvme_cpl *cpl) +{ + resp->cpl_text = malloc(spdk_base64_get_encoded_strlen(sizeof(*cpl)) + 1); + if (!resp->cpl_text) { + return -ENOMEM; + } + spdk_base64_urlsafe_encode(resp->cpl_text, cpl, sizeof(*cpl)); + + if (req->data_direction == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { + if (req->data_len) { + resp->data_text = malloc(spdk_base64_get_encoded_strlen(req->data_len) + 1); + if (!resp->data_text) { + return -ENOMEM; + } + spdk_base64_urlsafe_encode(resp->data_text, req->data, req->data_len); + } + if (req->md_len) { + resp->md_text = malloc(spdk_base64_get_encoded_strlen(req->md_len) + 1); + if (!resp->md_text) { + return -ENOMEM; + } + spdk_base64_urlsafe_encode(resp->md_text, req->md, req->md_len); + } + } + + return 0; +} + +static void +spdk_rpc_send_nvme_cmd_complete(struct rpc_send_nvme_cmd_ctx *ctx, const struct spdk_nvme_cpl *cpl) +{ + struct spdk_jsonrpc_request *request = ctx->jsonrpc_request; + struct spdk_json_write_ctx *w; + int ret; + + ret = rpc_send_nvme_cmd_resp_construct(&ctx->resp, &ctx->req, cpl); + if (ret) { + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, + spdk_strerror(-ret)); + goto out; + } + + w = spdk_jsonrpc_begin_result(request); + if (w == NULL) { + goto out; + } + + spdk_json_write_object_begin(w); + spdk_json_write_named_string(w, "cpl", ctx->resp.cpl_text); + + if (ctx->resp.data_text) { + spdk_json_write_named_string(w, "data", ctx->resp.data_text); + } + + if (ctx->resp.md_text) { + spdk_json_write_named_string(w, "metadata", ctx->resp.md_text); + } + + spdk_json_write_object_end(w); + spdk_jsonrpc_end_result(request, w); + +out: + free_rpc_send_nvme_cmd_ctx(ctx); + return; +} + +static void +nvme_rpc_bdev_nvme_cb(void *ref, const struct spdk_nvme_cpl *cpl) +{ + struct rpc_send_nvme_cmd_ctx *ctx = (struct rpc_send_nvme_cmd_ctx *)ref; + + if (ctx->ctrlr_io_ch) { + spdk_put_io_channel(ctx->ctrlr_io_ch); + ctx->ctrlr_io_ch = NULL; + } + + spdk_rpc_send_nvme_cmd_complete(ctx, cpl); +} + +static int +nvme_rpc_admin_cmd_bdev_nvme(struct rpc_send_nvme_cmd_ctx *ctx, struct spdk_nvme_cmd *cmd, + void *buf, uint32_t nbytes, uint32_t timeout_ms) +{ + struct nvme_ctrlr *_nvme_ctrlr = ctx->nvme_ctrlr; + int ret; + + ret = spdk_nvme_ctrlr_cmd_admin_raw(_nvme_ctrlr->ctrlr, cmd, buf, + nbytes, nvme_rpc_bdev_nvme_cb, ctx); + + return ret; +} + +static int +nvme_rpc_io_cmd_bdev_nvme(struct rpc_send_nvme_cmd_ctx *ctx, struct spdk_nvme_cmd *cmd, + void *buf, uint32_t nbytes, void *md_buf, uint32_t md_len, + uint32_t timeout_ms) +{ + struct nvme_ctrlr *_nvme_ctrlr = ctx->nvme_ctrlr; + struct spdk_nvme_qpair *io_qpair; + int ret; + + ctx->ctrlr_io_ch = spdk_get_io_channel(_nvme_ctrlr->ctrlr); + io_qpair = spdk_bdev_nvme_get_io_qpair(ctx->ctrlr_io_ch); + + ret = spdk_nvme_ctrlr_cmd_io_raw_with_md(_nvme_ctrlr->ctrlr, io_qpair, + cmd, buf, nbytes, md_buf, nvme_rpc_bdev_nvme_cb, ctx); + if (ret) { + spdk_put_io_channel(ctx->ctrlr_io_ch); + } + + return ret; + +} + +static int +rpc_send_nvme_cmd_exec(struct rpc_send_nvme_cmd_ctx *ctx) +{ + struct rpc_send_nvme_cmd_req *req = &ctx->req; + int ret = -EINVAL; + + switch (req->cmd_type) { + case NVME_ADMIN_CMD: + ret = nvme_rpc_admin_cmd_bdev_nvme(ctx, req->cmdbuf, req->data, + req->data_len, req->timeout_ms); + break; + case NVME_IO_CMD: + ret = nvme_rpc_io_cmd_bdev_nvme(ctx, req->cmdbuf, req->data, + req->data_len, req->md, req->md_len, req->timeout_ms); + break; + } + + return ret; +} + +static int +rpc_decode_cmd_type(const struct spdk_json_val *val, void *out) +{ + int *cmd_type = out; + + if (spdk_json_strequal(val, "admin") == true) { + *cmd_type = NVME_ADMIN_CMD; + } else if (spdk_json_strequal(val, "io") == true) { + *cmd_type = NVME_IO_CMD; + } else { + SPDK_NOTICELOG("Invalid parameter value: cmd_type\n"); + return -EINVAL; + } + + return 0; +} + +static int +rpc_decode_data_direction(const struct spdk_json_val *val, void *out) +{ + int *data_direction = out; + + if (spdk_json_strequal(val, "h2c") == true) { + *data_direction = SPDK_NVME_DATA_HOST_TO_CONTROLLER; + } else if (spdk_json_strequal(val, "c2h") == true) { + *data_direction = SPDK_NVME_DATA_CONTROLLER_TO_HOST; + } else { + SPDK_NOTICELOG("Invalid parameter value: data_direction\n"); + return -EINVAL; + } + + return 0; +} + +static int +rpc_decode_cmdbuf(const struct spdk_json_val *val, void *out) +{ + char *text = NULL; + size_t text_strlen, raw_len; + struct spdk_nvme_cmd *cmdbuf, **_cmdbuf = out; + int rc; + + rc = spdk_json_decode_string(val, &text); + if (rc) { + return val->type == SPDK_JSON_VAL_STRING ? -ENOMEM : -EINVAL; + } + + text_strlen = strlen(text); + raw_len = spdk_base64_get_decoded_len(text_strlen); + cmdbuf = malloc(raw_len); + if (!cmdbuf) { + rc = -ENOMEM; + goto out; + } + + rc = spdk_base64_urlsafe_decode(cmdbuf, &raw_len, text); + if (rc) { + goto out; + } + if (raw_len != sizeof(*cmdbuf)) { + rc = -EINVAL; + goto out; + } + + *_cmdbuf = cmdbuf; + +out: + free(text); + return rc; +} + +static int +rpc_decode_data(const struct spdk_json_val *val, void *out) +{ + struct rpc_send_nvme_cmd_req *req = (struct rpc_send_nvme_cmd_req *)out; + char *text = NULL; + size_t text_strlen; + int rc; + + rc = spdk_json_decode_string(val, &text); + if (rc) { + return val->type == SPDK_JSON_VAL_STRING ? -ENOMEM : -EINVAL; + } + text_strlen = strlen(text); + + if (req->data_len) { + /* data_len is decoded by param "data_len" */ + if (req->data_len != spdk_base64_get_decoded_len(text_strlen)) { + rc = -EINVAL; + goto out; + } + } else { + req->data_len = spdk_base64_get_decoded_len(text_strlen); + req->data = spdk_dma_malloc(req->data_len > 0x1000 ? req->data_len : 0x1000, 0x1000, NULL); + if (!req->data) { + rc = -ENOMEM; + goto out; + } + } + + rc = spdk_base64_urlsafe_decode(req->data, (size_t *)&req->data_len, text); + +out: + free(text); + return rc; +} + +static int +rpc_decode_data_len(const struct spdk_json_val *val, void *out) +{ + struct rpc_send_nvme_cmd_req *req = (struct rpc_send_nvme_cmd_req *)out; + uint32_t data_len; + int rc; + + rc = spdk_json_decode_uint32(val, &data_len); + if (rc) { + return rc; + } + + if (req->data_len) { + /* data_len is decoded by param "data" */ + if (req->data_len != data_len) { + rc = -EINVAL; + } + } else { + req->data_len = data_len; + req->data = spdk_dma_malloc(req->data_len > 0x1000 ? req->data_len : 0x1000, 0x1000, NULL); + if (!req->data) { + rc = -ENOMEM; + } + } + + return rc; +} + +static int +rpc_decode_metadata(const struct spdk_json_val *val, void *out) +{ + struct rpc_send_nvme_cmd_req *req = (struct rpc_send_nvme_cmd_req *)out; + char *text = NULL; + size_t text_strlen; + int rc; + + rc = spdk_json_decode_string(val, &text); + if (rc) { + return rc = val->type == SPDK_JSON_VAL_STRING ? -ENOMEM : -EINVAL; + } + text_strlen = strlen(text); + + if (req->md_len) { + /* md_len is decoded by param "metadata_len" */ + if (req->md_len != spdk_base64_get_decoded_len(text_strlen)) { + rc = -EINVAL; + goto out; + } + } else { + req->md_len = spdk_base64_get_decoded_len(text_strlen); + req->md = spdk_dma_malloc(req->md_len, 0x1000, NULL); + if (!req->md) { + rc = -ENOMEM; + goto out; + } + } + + rc = spdk_base64_urlsafe_decode(req->md, (size_t *)&req->md_len, text); + +out: + free(text); + return rc; +} + +static int +rpc_decode_metadata_len(const struct spdk_json_val *val, void *out) +{ + struct rpc_send_nvme_cmd_req *req = (struct rpc_send_nvme_cmd_req *)out; + uint32_t md_len; + int rc; + + rc = spdk_json_decode_uint32(val, &md_len); + if (rc) { + return rc; + } + + if (req->md_len) { + /* md_len is decoded by param "metadata" */ + if (req->md_len != md_len) { + rc = -EINVAL; + } + } else { + req->md_len = md_len; + req->md = spdk_dma_malloc(req->md_len, 0x1000, NULL); + if (!req->md) { + rc = -ENOMEM; + } + } + + return rc; +} + +static const struct spdk_json_object_decoder rpc_send_nvme_cmd_req_decoders[] = { + {"name", offsetof(struct rpc_send_nvme_cmd_req, name), spdk_json_decode_string}, + {"cmd_type", offsetof(struct rpc_send_nvme_cmd_req, cmd_type), rpc_decode_cmd_type}, + {"data_direction", offsetof(struct rpc_send_nvme_cmd_req, data_direction), rpc_decode_data_direction}, + {"cmdbuf", offsetof(struct rpc_send_nvme_cmd_req, cmdbuf), rpc_decode_cmdbuf}, + {"timeout_ms", offsetof(struct rpc_send_nvme_cmd_req, timeout_ms), spdk_json_decode_uint32, true}, + {"data_len", 0, rpc_decode_data_len, true}, + {"metadata_len", 0, rpc_decode_metadata_len, true}, + {"data", 0, rpc_decode_data, true}, + {"metadata", 0, rpc_decode_metadata, true}, +}; + +static void +spdk_rpc_send_nvme_cmd(struct spdk_jsonrpc_request *request, + const struct spdk_json_val *params) +{ + struct rpc_send_nvme_cmd_ctx *ctx; + int ret, error_code; + + ctx = calloc(1, sizeof(*ctx)); + if (!ctx) { + SPDK_ERRLOG("Failed at Malloc ctx\n"); + error_code = SPDK_JSONRPC_ERROR_INTERNAL_ERROR; + ret = -ENOMEM; + goto invalid; + } + + if (spdk_json_decode_object(params, rpc_send_nvme_cmd_req_decoders, + SPDK_COUNTOF(rpc_send_nvme_cmd_req_decoders), + &ctx->req)) { + SPDK_ERRLOG("spdk_json_decode_object failed\n"); + error_code = SPDK_JSONRPC_ERROR_INVALID_PARAMS; + ret = -EINVAL; + goto invalid; + } + + ctx->nvme_ctrlr = spdk_bdev_nvme_lookup_ctrlr(ctx->req.name); + if (ctx->nvme_ctrlr == NULL) { + SPDK_ERRLOG("Failed at device lookup\n"); + error_code = SPDK_JSONRPC_ERROR_INVALID_PARAMS; + ret = -EINVAL; + goto invalid; + } + + ctx->jsonrpc_request = request; + + ret = rpc_send_nvme_cmd_exec(ctx); + if (ret < 0) { + SPDK_NOTICELOG("Failed at rpc_send_nvme_cmd_exec\n"); + error_code = SPDK_JSONRPC_ERROR_INTERNAL_ERROR; + goto invalid; + } + + return; + +invalid: + spdk_jsonrpc_send_error_response(request, error_code, spdk_strerror(-ret)); + free_rpc_send_nvme_cmd_ctx(ctx); + return; +} +SPDK_RPC_REGISTER("send_nvme_cmd", spdk_rpc_send_nvme_cmd, SPDK_RPC_RUNTIME) diff --git a/scripts/rpc.py b/scripts/rpc.py index a8114575d..90e6604ba 100755 --- a/scripts/rpc.py +++ b/scripts/rpc.py @@ -1780,6 +1780,33 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse domain:bus:device.function format or domain.bus.device.function format""") p.set_defaults(func=scan_ioat_copy_engine) + # send_nvme_cmd + @call_cmd + def send_nvme_cmd(args): + print_dict(rpc.nvme.send_nvme_cmd(args.client, + name=args.nvme_name, + cmd_type=args.cmd_type, + data_direction=args.data_direction, + cmdbuf=args.cmdbuf, + data=args.data, + metadata=args.metadata, + data_len=args.data_length, + metadata_len=args.metadata_length, + timeout_ms=args.timeout_ms)) + + p = subparsers.add_parser('send_nvme_cmd', help='NVMe passthrough cmd.') + p.add_argument('-n', '--nvme-name', help="""Name of the operating NVMe controller""") + p.add_argument('-t', '--cmd-type', help="""Type of nvme cmd. Valid values are: admin, io""") + p.add_argument('-r', '--data-direction', help="""Direction of data transfer. Valid values are: c2h, h2c""") + p.add_argument('-c', '--cmdbuf', help="""NVMe command encoded by base64 urlsafe""") + p.add_argument('-d', '--data', help="""Data transferring to controller from host, encoded by base64 urlsafe""") + p.add_argument('-m', '--metadata', help="""Metadata transferring to controller from host, encoded by base64 urlsafe""") + p.add_argument('-D', '--data-length', help="""Data length required to transfer from controller to host""", type=int) + p.add_argument('-M', '--metadata-length', help="""Metadata length required to transfer from controller to host""", type=int) + p.add_argument('-T', '--timeout-ms', + help="""Command execution timeout value, in milliseconds, if 0, don't track timeout""", type=int, default=0) + p.set_defaults(func=send_nvme_cmd) + args = parser.parse_args() try: diff --git a/scripts/rpc/__init__.py b/scripts/rpc/__init__.py index 8e8dc1dd1..9a4dbb58c 100644 --- a/scripts/rpc/__init__.py +++ b/scripts/rpc/__init__.py @@ -9,6 +9,7 @@ from . import log from . import lvol from . import nbd from . import net +from . import nvme from . import nvmf from . import pmem from . import subsystem diff --git a/scripts/rpc/nvme.py b/scripts/rpc/nvme.py new file mode 100644 index 000000000..fed8740e2 --- /dev/null +++ b/scripts/rpc/nvme.py @@ -0,0 +1,39 @@ + + +def send_nvme_cmd(client, name, cmd_type, data_direction, cmdbuf, + data=None, metadata=None, + data_len=None, metadata_len=None, + timeout_ms=None): + """Send one NVMe command + + Args: + name: Name of the operating NVMe controller + cmd_type: Type of nvme cmd. Valid values are: admin, io + data_direction: Direction of data transfer. Valid values are: c2h, h2c + cmdbuf: NVMe command encoded by base64 urlsafe + data: Data transferring to controller from host, encoded by base64 urlsafe + metadata: metadata transferring to controller from host, encoded by base64 urlsafe + data_length: Data length required to transfer from controller to host + metadata_length: Metadata length required to transfer from controller to host + timeout-ms: Command execution timeout value, in milliseconds, if 0, don't track timeout + + Returns: + NVMe completion queue entry, requested data and metadata, all are encoded by base64 urlsafe. + """ + params = {'name': name, + 'cmd_type': cmd_type, + 'data_direction': data_direction, + 'cmdbuf': cmdbuf} + + if data: + params['data'] = data + if metadata: + params['metadata'] = metadata + if data_len: + params['data_len'] = data_len + if metadata_len: + params['metadata_len'] = metadata_len + if timeout_ms: + params['timeout_ms'] = timeout_ms + + return client.call('send_nvme_cmd', params)