From ef4a5bc922b94d064f6bfc8917d884f3387ca9e1 Mon Sep 17 00:00:00 2001 From: Changpeng Liu Date: Thu, 11 Jan 2018 23:06:49 -0500 Subject: [PATCH] rpc/vhost_nvme: add rpc support for vhost-nvme target Change-Id: I215bc269dee704e60a167023e2a6c24d3ae1fab0 Signed-off-by: Changpeng Liu Reviewed-on: https://review.gerrithub.io/395404 Tested-by: SPDK Automated Test System Reviewed-by: Daniel Verkamp Reviewed-by: Ben Walker --- lib/vhost/vhost_nvme.c | 8 +-- lib/vhost/vhost_rpc.c | 146 +++++++++++++++++++++++++++++++++++++++++ scripts/rpc.py | 19 ++++++ scripts/rpc/vhost.py | 21 ++++++ 4 files changed, 190 insertions(+), 4 deletions(-) diff --git a/lib/vhost/vhost_nvme.c b/lib/vhost/vhost_nvme.c index b2ded8387..cab826282 100644 --- a/lib/vhost/vhost_nvme.c +++ b/lib/vhost/vhost_nvme.c @@ -1205,18 +1205,18 @@ spdk_vhost_nvme_dev_add_ns(struct spdk_vhost_dev *vdev, const char *bdev_name) int rc = -1; if (nvme == NULL) { - return -1; + return -ENODEV; } if (nvme->num_ns == MAX_NAMESPACE) { SPDK_ERRLOG("Can't support %d Namespaces\n", nvme->num_ns); - return -1; + return -ENOSPC; } bdev = spdk_bdev_get_by_name(bdev_name); if (!bdev) { SPDK_ERRLOG("could not find bdev %s\n", bdev_name); - return -1; + return -ENODEV; } ns = &nvme->ns[nvme->num_ns]; @@ -1224,7 +1224,7 @@ spdk_vhost_nvme_dev_add_ns(struct spdk_vhost_dev *vdev, const char *bdev_name) if (rc != 0) { SPDK_ERRLOG("Could not open bdev '%s', error=%d\n", bdev_name, rc); - return -1; + return rc; } nvme->ns[nvme->num_ns].bdev = bdev; diff --git a/lib/vhost/vhost_rpc.c b/lib/vhost/vhost_rpc.c index b76482047..0493f0ee3 100644 --- a/lib/vhost/vhost_rpc.c +++ b/lib/vhost/vhost_rpc.c @@ -607,5 +607,151 @@ invalid: } SPDK_RPC_REGISTER("set_vhost_controller_coalescing", spdk_rpc_set_vhost_controller_coalescing) +struct rpc_vhost_nvme_ctrlr { + char *ctrlr; + uint32_t io_queues; + char *cpumask; +}; + +static const struct spdk_json_object_decoder rpc_construct_vhost_nvme_ctrlr[] = { + {"ctrlr", offsetof(struct rpc_vhost_nvme_ctrlr, ctrlr), spdk_json_decode_string }, + {"io_queues", offsetof(struct rpc_vhost_nvme_ctrlr, io_queues), spdk_json_decode_uint32}, + {"cpumask", offsetof(struct rpc_vhost_nvme_ctrlr, cpumask), spdk_json_decode_string, true}, +}; + +static void +free_rpc_vhost_nvme_ctrlr(struct rpc_vhost_nvme_ctrlr *req) +{ + free(req->ctrlr); + free(req->cpumask); +} + +static void +spdk_rpc_construct_vhost_nvme_controller(struct spdk_jsonrpc_request *request, + const struct spdk_json_val *params) +{ + struct rpc_vhost_nvme_ctrlr req = {0}; + struct spdk_json_write_ctx *w; + int rc; + + if (spdk_json_decode_object(params, rpc_construct_vhost_nvme_ctrlr, + SPDK_COUNTOF(rpc_construct_vhost_nvme_ctrlr), + &req)) { + rc = -EINVAL; + goto invalid; + } + + rc = spdk_vhost_nvme_dev_construct(req.ctrlr, req.cpumask, req.io_queues); + if (rc < 0) { + free_rpc_vhost_nvme_ctrlr(&req); + goto invalid; + } + + free_rpc_vhost_nvme_ctrlr(&req); + + w = spdk_jsonrpc_begin_result(request); + if (w == NULL) { + return; + } + + spdk_json_write_bool(w, true); + spdk_jsonrpc_end_result(request, w); + return; + +invalid: + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, + spdk_strerror(-rc)); + +} +SPDK_RPC_REGISTER("construct_vhost_nvme_controller", spdk_rpc_construct_vhost_nvme_controller) + +struct rpc_add_vhost_nvme_ctrlr_ns { + char *ctrlr; + char *bdev_name; + struct spdk_jsonrpc_request *request; +}; + +static void +free_rpc_add_vhost_nvme_ctrlr_ns(struct rpc_add_vhost_nvme_ctrlr_ns *req) +{ + free(req->ctrlr); + free(req->bdev_name); + free(req); +} + +static const struct spdk_json_object_decoder rpc_vhost_nvme_add_ns[] = { + {"ctrlr", offsetof(struct rpc_add_vhost_nvme_ctrlr_ns, ctrlr), spdk_json_decode_string }, + {"bdev_name", offsetof(struct rpc_add_vhost_nvme_ctrlr_ns, bdev_name), spdk_json_decode_string }, +}; + +static int +spdk_rpc_add_vhost_nvme_ns_cb(struct spdk_vhost_dev *vdev, void *arg) +{ + struct rpc_add_vhost_nvme_ctrlr_ns *rpc = arg; + struct spdk_jsonrpc_request *request = rpc->request; + struct spdk_json_write_ctx *w; + int rc; + + if (vdev == NULL) { + rc = -ENODEV; + goto invalid; + } + + rc = spdk_vhost_nvme_dev_add_ns(vdev, rpc->bdev_name); + if (rc < 0) { + goto invalid; + } + free_rpc_add_vhost_nvme_ctrlr_ns(rpc); + + w = spdk_jsonrpc_begin_result(request); + if (w == NULL) { + return -1; + } + + spdk_json_write_bool(w, true); + spdk_jsonrpc_end_result(request, w); + return 0; + +invalid: + free_rpc_add_vhost_nvme_ctrlr_ns(rpc); + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, + spdk_strerror(-rc)); + return rc; +} + +static void +spdk_rpc_add_vhost_nvme_ns(struct spdk_jsonrpc_request *request, + const struct spdk_json_val *params) +{ + struct rpc_add_vhost_nvme_ctrlr_ns *req; + int rc; + + req = calloc(1, sizeof(*req)); + if (req == NULL) { + rc = -ENOMEM; + goto invalid; + } + + req->request = request; + if (spdk_json_decode_object(params, rpc_vhost_nvme_add_ns, + SPDK_COUNTOF(rpc_vhost_nvme_add_ns), + req)) { + SPDK_DEBUGLOG(SPDK_LOG_VHOST_RPC, "spdk_json_decode_object failed\n"); + rc = -EINVAL; + goto invalid; + } + + spdk_vhost_call_external_event(req->ctrlr, spdk_rpc_add_vhost_nvme_ns_cb, req); + return; + +invalid: + if (req) { + free_rpc_add_vhost_nvme_ctrlr_ns(req); + } + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, + spdk_strerror(-rc)); +} +SPDK_RPC_REGISTER("add_vhost_nvme_ns", spdk_rpc_add_vhost_nvme_ns) + SPDK_LOG_REGISTER_COMPONENT("vhost_rpc", SPDK_LOG_VHOST_RPC) diff --git a/scripts/rpc.py b/scripts/rpc.py index 42ecd9795..ca0424737 100755 --- a/scripts/rpc.py +++ b/scripts/rpc.py @@ -893,6 +893,25 @@ if __name__ == "__main__": p.add_argument("-r", "--readonly", action='store_true', help='Set controller as read-only') p.set_defaults(func=construct_vhost_blk_controller) + @call_cmd + def construct_vhost_nvme_controller(args): + rpc.vhost.construct_vhost_nvme_controller(args.client, args) + + p = subparsers.add_parser('construct_vhost_nvme_controller', help='Add new vhost controller') + p.add_argument('ctrlr', help='controller name') + p.add_argument('io_queues', help='number of IO queues for the controller', type=int) + p.add_argument('--cpumask', help='cpu mask for this controller') + p.set_defaults(func=construct_vhost_nvme_controller) + + @call_cmd + def add_vhost_nvme_ns(args): + rpc.vhost.add_vhost_nvme_ns(args.client, args) + + p = subparsers.add_parser('add_vhost_nvme_ns', help='Add a Namespace to vhost controller') + p.add_argument('ctrlr', help='conntroller name where add a Namespace') + p.add_argument('bdev_name', help='block device name for a new Namespace') + p.set_defaults(func=add_vhost_nvme_ns) + @call_cmd def get_vhost_controllers(args): print_dict(rpc.vhost.get_vhost_controllers(args.client, args)) diff --git a/scripts/rpc/vhost.py b/scripts/rpc/vhost.py index 4577da689..09d277b47 100755 --- a/scripts/rpc/vhost.py +++ b/scripts/rpc/vhost.py @@ -33,6 +33,27 @@ def remove_vhost_scsi_target(client, args): return client.call('remove_vhost_scsi_target', params) +def construct_vhost_nvme_controller(client, args): + params = { + 'ctrlr': args.ctrlr, + 'io_queues': args.io_queues + } + + if args.cpumask: + params['cpumask'] = args.cpumask + + return client.call('construct_vhost_nvme_controller', params) + + +def add_vhost_nvme_ns(client, args): + params = { + 'ctrlr': args.ctrlr, + 'bdev_name': args.bdev_name, + } + + return client.call('add_vhost_nvme_ns', params) + + def construct_vhost_blk_controller(client, args): params = { 'ctrlr': args.ctrlr,