From fca6ff8f758da4b699b82465ca331acccc4e3d1b Mon Sep 17 00:00:00 2001 From: Evgeniy Kochetov Date: Mon, 15 Apr 2019 09:54:38 +0000 Subject: [PATCH] rpc: Add nvmf_get_stats RPC method This patch adds nvmf_get_stats RPC method and basic infrastructure to report NVMf global and per poll group statistics in JSON format. Signed-off-by: Evgeniy Kochetov Change-Id: I13b83e28b75a02bc1dcb7b95cbce52ae10ff0f7b Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/452298 Tested-by: SPDK CI Jenkins Reviewed-by: Ben Walker Reviewed-by: Shuhei Matsumoto Reviewed-by: Darek Stojaczyk --- CHANGELOG.md | 4 ++ doc/jsonrpc.md | 38 +++++++++++++++ include/spdk/nvmf.h | 18 ++++++- lib/event/subsystems/nvmf/nvmf_rpc.c | 71 +++++++++++++++++++++++++++- lib/nvmf/nvmf.c | 19 +++++++- lib/nvmf/nvmf_internal.h | 7 ++- scripts/rpc.py | 7 +++ scripts/rpc/nvmf.py | 9 ++++ 8 files changed, 168 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 02c018dce..848d5e77a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,8 @@ poll group for the qpair. And `ConnectionScheduler` configuration is added into [Nvmf] section in etc/spdk/nvmf.conf.in to demonstrate how to configure the connection scheduling strategy among different spdk threads. +Added infrastructure to retrieve global and per poll group NVMf statistics. + ### notify The function `spdk_notify_get_types()` and `spdk_notify_get_events()` were @@ -99,6 +101,8 @@ spdk_sock_group_create() is updated to allow input the user provided ctx. Added thread_get_stats RPC method to retrieve existing statistics. +Added nvmf_get_stats RPC method to retrieve NVMf susbsystem statistics. + ## v19.04: ### nvme diff --git a/doc/jsonrpc.md b/doc/jsonrpc.md index 8cea5cbed..41e820523 100644 --- a/doc/jsonrpc.md +++ b/doc/jsonrpc.md @@ -4138,6 +4138,44 @@ Example response: } ~~~ +## nvmf_get_stats method {#rpc_nvmf_get_stats} + +Retrieve current statistics of the NVMf subsystem. + +### Parameters + +This method has no parameters. + +### Response + +The response is an object containing NVMf subsystem statistics. + +### Example + +Example request: +~~~ +{ + "jsonrpc": "2.0", + "method": "nvmf_get_stats", + "id": 1 +} +~~~ + +Example response: +~~~ +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "poll_groups": [ + { + "name": "app_thread" + } + ] + } +} +~~~ + # Vhost Target {#jsonrpc_components_vhost_tgt} The following common preconditions need to be met in all target types. diff --git a/include/spdk/nvmf.h b/include/spdk/nvmf.h index 6fa3aebcc..106dbd1bc 100644 --- a/include/spdk/nvmf.h +++ b/include/spdk/nvmf.h @@ -2,7 +2,7 @@ * BSD LICENSE * * Copyright (c) Intel Corporation. All rights reserved. - * Copyright (c) 2018 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 2018-2019 Mellanox Technologies LTD. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -78,6 +78,10 @@ struct spdk_nvmf_transport_opts { bool dif_insert_or_strip; }; +struct spdk_nvmf_poll_group_stat { + int dummy; +}; + /** * Construct an NVMe-oF target. * @@ -190,6 +194,18 @@ void spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group); int spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group, struct spdk_nvmf_qpair *qpair); +/** + * Get current poll group statistics. + * + * \param tgt The NVMf target. + * \param stat Pointer to allocated statistics structure to fill with values. + * + * \return 0 upon success. + * \return -EINVAL if either group or stat is NULL. + */ +int spdk_nvmf_poll_group_get_stat(struct spdk_nvmf_tgt *tgt, + struct spdk_nvmf_poll_group_stat *stat); + typedef void (*nvmf_qpair_disconnect_cb)(void *ctx); /** diff --git a/lib/event/subsystems/nvmf/nvmf_rpc.c b/lib/event/subsystems/nvmf/nvmf_rpc.c index e00f60516..968c92053 100644 --- a/lib/event/subsystems/nvmf/nvmf_rpc.c +++ b/lib/event/subsystems/nvmf/nvmf_rpc.c @@ -2,7 +2,7 @@ * BSD LICENSE * * Copyright (c) Intel Corporation. All rights reserved. - * Copyright (c) 2018 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 2018-2019 Mellanox Technologies LTD. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -1628,3 +1628,72 @@ spdk_rpc_get_nvmf_transports(struct spdk_jsonrpc_request *request, spdk_jsonrpc_end_result(request, w); } SPDK_RPC_REGISTER("get_nvmf_transports", spdk_rpc_get_nvmf_transports, SPDK_RPC_RUNTIME) + +struct rpc_nvmf_get_stats_ctx { + struct spdk_jsonrpc_request *request; + struct spdk_json_write_ctx *w; +}; + +static void +rpc_nvmf_get_stats_done(struct spdk_io_channel_iter *i, int status) +{ + struct rpc_nvmf_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(i); + + spdk_json_write_array_end(ctx->w); + spdk_json_write_object_end(ctx->w); + spdk_jsonrpc_end_result(ctx->request, ctx->w); + free(ctx); +} + +static void +rpc_nvmf_get_stats(struct spdk_io_channel_iter *i) +{ + struct rpc_nvmf_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(i); + struct spdk_nvmf_poll_group_stat stat; + + if (0 == spdk_nvmf_poll_group_get_stat(g_spdk_nvmf_tgt, &stat)) { + spdk_json_write_object_begin(ctx->w); + spdk_json_write_named_string(ctx->w, "name", spdk_thread_get_name(spdk_get_thread())); + spdk_json_write_object_end(ctx->w); + } + + spdk_for_each_channel_continue(i, 0); +} + + +static void +spdk_rpc_nvmf_get_stats(struct spdk_jsonrpc_request *request, + const struct spdk_json_val *params) +{ + struct rpc_nvmf_get_stats_ctx *ctx; + + if (params) { + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, + "'nvmf_get_stats' requires no arguments"); + return; + } + + ctx = calloc(1, sizeof(*ctx)); + if (!ctx) { + spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, + "Memory allocation error"); + return; + } + ctx->request = request; + + ctx->w = spdk_jsonrpc_begin_result(ctx->request); + if (NULL == ctx->w) { + free(ctx); + return; + } + + spdk_json_write_object_begin(ctx->w); + spdk_json_write_named_array_begin(ctx->w, "poll_groups"); + + spdk_for_each_channel(g_spdk_nvmf_tgt, + rpc_nvmf_get_stats, + ctx, + rpc_nvmf_get_stats_done); +} + +SPDK_RPC_REGISTER("nvmf_get_stats", spdk_rpc_nvmf_get_stats, SPDK_RPC_RUNTIME) diff --git a/lib/nvmf/nvmf.c b/lib/nvmf/nvmf.c index 7762cc9e9..657d21775 100644 --- a/lib/nvmf/nvmf.c +++ b/lib/nvmf/nvmf.c @@ -2,7 +2,7 @@ * BSD LICENSE * * Copyright (c) Intel Corporation. All rights reserved. - * Copyright (c) 2018 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 2018-2019 Mellanox Technologies LTD. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -1227,3 +1227,20 @@ spdk_nvmf_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair) return tgroup->group; } + +int +spdk_nvmf_poll_group_get_stat(struct spdk_nvmf_tgt *tgt, + struct spdk_nvmf_poll_group_stat *stat) +{ + struct spdk_io_channel *ch; + struct spdk_nvmf_poll_group *group; + + if (tgt == NULL || stat == NULL) { + return -EINVAL; + } + + ch = spdk_get_io_channel(tgt); + group = spdk_io_channel_get_ctx(ch); + *stat = group->stat; + return 0; +} diff --git a/lib/nvmf/nvmf_internal.h b/lib/nvmf/nvmf_internal.h index 3ce39cb68..f3a602438 100644 --- a/lib/nvmf/nvmf_internal.h +++ b/lib/nvmf/nvmf_internal.h @@ -1,8 +1,8 @@ /*- * BSD LICENSE * - * Copyright (c) Intel Corporation. - * All rights reserved. + * Copyright (c) Intel Corporation. All rights reserved. + * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -172,6 +172,9 @@ struct spdk_nvmf_poll_group { /* All of the queue pairs that belong to this poll group */ TAILQ_HEAD(, spdk_nvmf_qpair) qpairs; + + /* Statistics */ + struct spdk_nvmf_poll_group_stat stat; }; typedef enum _spdk_nvmf_request_exec_status { diff --git a/scripts/rpc.py b/scripts/rpc.py index d882ef658..697e27986 100755 --- a/scripts/rpc.py +++ b/scripts/rpc.py @@ -1575,6 +1575,13 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse p.add_argument('-d', '--disable', action='store_true', help='Disable allowing any host') p.set_defaults(func=nvmf_subsystem_allow_any_host) + def nvmf_get_stats(args): + print_dict(rpc.nvmf.nvmf_get_stats(args.client)) + + p = subparsers.add_parser( + 'nvmf_get_stats', help='Display current statistics for NVMf subsystem') + p.set_defaults(func=nvmf_get_stats) + # pmem def create_pmem_pool(args): num_blocks = int((args.total_size * 1024 * 1024) / args.block_size) diff --git a/scripts/rpc/nvmf.py b/scripts/rpc/nvmf.py index 24b0c0bed..bfd7c994f 100644 --- a/scripts/rpc/nvmf.py +++ b/scripts/rpc/nvmf.py @@ -323,3 +323,12 @@ def delete_nvmf_subsystem(client, nqn): """ params = {'nqn': nqn} return client.call('delete_nvmf_subsystem', params) + + +def nvmf_get_stats(client): + """Query NVMf statistics. + + Returns: + Current NVMf statistics. + """ + return client.call('nvmf_get_stats')