rpc: Add NVMf transport statistics to nvmf_get_stats RPC method

This patch adds transport part to nvmf_get_stats RPC method and basic
infrastructure to report NVMf transport specific statistics.

Signed-off-by: Evgeniy Kochetov <evgeniik@mellanox.com>
Change-Id: Ie83b34f4ed932dd5f6d6e37897cf45228114bd88
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/452299
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Evgeniy Kochetov 2019-05-23 12:04:49 +03:00 committed by Jim Harris
parent e7ef737702
commit 43bb4e6b1f
7 changed files with 156 additions and 4 deletions

View File

@ -60,6 +60,8 @@ updated the related rpc function nvmf_create_transport to make this
configurable parameter available to users. The `dif_insert_or_strip` is relevant configurable parameter available to users. The `dif_insert_or_strip` is relevant
for TCP transport for now and used to configure the DIF strip and insert. for TCP transport for now and used to configure the DIF strip and insert.
Added infrastructure to retrieve NVMf transport statistics.
### notify ### notify
The function `spdk_notify_get_types()` and `spdk_notify_get_events()` were The function `spdk_notify_get_types()` and `spdk_notify_get_events()` were

View File

@ -4173,7 +4173,12 @@ Example response:
"name": "app_thread", "name": "app_thread",
"admin_qpairs": 1, "admin_qpairs": 1,
"io_qpairs": 4, "io_qpairs": 4,
"pending_bdev_io": 1721 "pending_bdev_io": 1721,
"transports": [
{
"trtype": "RDMA"
}
]
} }
] ]
} }

View File

@ -85,6 +85,15 @@ struct spdk_nvmf_poll_group_stat {
uint64_t pending_bdev_io; uint64_t pending_bdev_io;
}; };
struct spdk_nvmf_transport_poll_group_stat {
spdk_nvme_transport_type_t trtype;
union {
struct {
int dummy;
} rdma;
};
};
/** /**
* Construct an NVMe-oF target. * Construct an NVMe-oF target.
* *
@ -866,6 +875,40 @@ int spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
void void
spdk_nvmf_tgt_transport_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt); spdk_nvmf_tgt_transport_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_tgt *tgt);
/**
* \brief Get current transport poll group statistics.
*
* This function allocates memory for statistics and returns it
* in \p stat parameter. Caller must free this memory with
* spdk_nvmf_transport_poll_group_free_stat() when it is not needed
* anymore.
*
* \param tgt The NVMf target.
* \param transport The NVMf transport.
* \param stat Output parameter that will contain pointer to allocated statistics structure.
*
* \return 0 upon success.
* \return -ENOTSUP if transport does not support statistics.
* \return -EINVAL if any of parameters is NULL.
* \return -ENOENT if transport poll group is not found.
* \return -ENOMEM if memory allocation failed.
*/
int
spdk_nvmf_transport_poll_group_get_stat(struct spdk_nvmf_tgt *tgt,
struct spdk_nvmf_transport *transport,
struct spdk_nvmf_transport_poll_group_stat **stat);
/**
* Free statistics memory previously allocated with spdk_nvmf_transport_poll_group_get_stat().
*
* \param transport The NVMf transport.
* \param stat Pointer to transport poll group statistics structure.
*/
void
spdk_nvmf_transport_poll_group_free_stat(struct spdk_nvmf_transport *transport,
struct spdk_nvmf_transport_poll_group_stat *stat);
#ifdef SPDK_CONFIG_RDMA #ifdef SPDK_CONFIG_RDMA
/** /**
* \brief Set the global hooks for the RDMA transport, if necessary. * \brief Set the global hooks for the RDMA transport, if necessary.

View File

@ -1608,11 +1608,29 @@ rpc_nvmf_get_stats_done(struct spdk_io_channel_iter *i, int status)
free(ctx); free(ctx);
} }
static void
write_nvmf_transport_stats(struct spdk_json_write_ctx *w,
struct spdk_nvmf_transport_poll_group_stat *stat)
{
spdk_json_write_object_begin(w);
spdk_json_write_named_string(w, "trtype",
spdk_nvme_transport_id_trtype_str(stat->trtype));
switch (stat->trtype) {
case SPDK_NVME_TRANSPORT_RDMA:
default:
break;
}
spdk_json_write_object_end(w);
}
static void static void
rpc_nvmf_get_stats(struct spdk_io_channel_iter *i) rpc_nvmf_get_stats(struct spdk_io_channel_iter *i)
{ {
struct rpc_nvmf_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(i); struct rpc_nvmf_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
struct spdk_nvmf_transport *transport;
struct spdk_nvmf_poll_group_stat stat; struct spdk_nvmf_poll_group_stat stat;
struct spdk_nvmf_transport_poll_group_stat *trstat;
int rc;
if (0 == spdk_nvmf_poll_group_get_stat(g_spdk_nvmf_tgt, &stat)) { if (0 == spdk_nvmf_poll_group_get_stat(g_spdk_nvmf_tgt, &stat)) {
spdk_json_write_object_begin(ctx->w); spdk_json_write_object_begin(ctx->w);
@ -1620,6 +1638,22 @@ rpc_nvmf_get_stats(struct spdk_io_channel_iter *i)
spdk_json_write_named_uint32(ctx->w, "admin_qpairs", stat.admin_qpairs); spdk_json_write_named_uint32(ctx->w, "admin_qpairs", stat.admin_qpairs);
spdk_json_write_named_uint32(ctx->w, "io_qpairs", stat.io_qpairs); spdk_json_write_named_uint32(ctx->w, "io_qpairs", stat.io_qpairs);
spdk_json_write_named_uint64(ctx->w, "pending_bdev_io", stat.pending_bdev_io); spdk_json_write_named_uint64(ctx->w, "pending_bdev_io", stat.pending_bdev_io);
spdk_json_write_named_array_begin(ctx->w, "transports");
transport = spdk_nvmf_transport_get_first(g_spdk_nvmf_tgt);
while (transport) {
rc = spdk_nvmf_transport_poll_group_get_stat(g_spdk_nvmf_tgt, transport, &trstat);
if (0 == rc) {
write_nvmf_transport_stats(ctx->w, trstat);
spdk_nvmf_transport_poll_group_free_stat(transport, trstat);
} else if (-ENOTSUP != rc) {
SPDK_ERRLOG("Failed to get poll group statistics for transport %s, errno %d\n",
spdk_nvme_transport_id_trtype_str(spdk_nvmf_get_transport_type(transport)),
rc);
}
transport = spdk_nvmf_transport_get_next(transport);
}
spdk_json_write_array_end(ctx->w);
spdk_json_write_object_end(ctx->w); spdk_json_write_object_end(ctx->w);
} }

View File

@ -3647,6 +3647,40 @@ spdk_nvmf_rdma_init_hooks(struct spdk_nvme_rdma_hooks *hooks)
g_nvmf_hooks = *hooks; g_nvmf_hooks = *hooks;
} }
static int
spdk_nvmf_rdma_poll_group_get_stat(struct spdk_nvmf_tgt *tgt,
struct spdk_nvmf_transport_poll_group_stat **stat)
{
struct spdk_io_channel *ch;
struct spdk_nvmf_poll_group *group;
struct spdk_nvmf_transport_poll_group *tgroup;
if (tgt == NULL || stat == NULL) {
return -EINVAL;
}
ch = spdk_get_io_channel(tgt);
group = spdk_io_channel_get_ctx(ch);;
TAILQ_FOREACH(tgroup, &group->tgroups, link) {
if (SPDK_NVME_TRANSPORT_RDMA == tgroup->transport->ops->type) {
*stat = calloc(1, sizeof(struct spdk_nvmf_transport_poll_group_stat));
if (!*stat) {
SPDK_ERRLOG("Failed to allocate memory for NVMf RDMA statistics\n");
return -ENOMEM;
}
(*stat)->trtype = SPDK_NVME_TRANSPORT_RDMA;
return 0;
}
}
return -ENOENT;
}
static void
spdk_nvmf_rdma_poll_group_free_stat(struct spdk_nvmf_transport_poll_group_stat *stat)
{
free(stat);
}
const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = { const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
.type = SPDK_NVME_TRANSPORT_RDMA, .type = SPDK_NVME_TRANSPORT_RDMA,
.opts_init = spdk_nvmf_rdma_opts_init, .opts_init = spdk_nvmf_rdma_opts_init,
@ -3672,6 +3706,8 @@ const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
.qpair_get_local_trid = spdk_nvmf_rdma_qpair_get_local_trid, .qpair_get_local_trid = spdk_nvmf_rdma_qpair_get_local_trid,
.qpair_get_listen_trid = spdk_nvmf_rdma_qpair_get_listen_trid, .qpair_get_listen_trid = spdk_nvmf_rdma_qpair_get_listen_trid,
.poll_group_get_stat = spdk_nvmf_rdma_poll_group_get_stat,
.poll_group_free_stat = spdk_nvmf_rdma_poll_group_free_stat,
}; };
SPDK_LOG_REGISTER_COMPONENT("rdma", SPDK_LOG_RDMA) SPDK_LOG_REGISTER_COMPONENT("rdma", SPDK_LOG_RDMA)

View File

@ -2,7 +2,7 @@
* BSD LICENSE * BSD LICENSE
* *
* Copyright (c) Intel Corporation. All rights reserved. * Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2018 Mellanox Technologies LTD. All rights reserved. * Copyright (c) 2018-2019 Mellanox Technologies LTD. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@ -335,3 +335,24 @@ spdk_nvmf_transport_qpair_set_sqsize(struct spdk_nvmf_qpair *qpair)
return 0; return 0;
} }
int
spdk_nvmf_transport_poll_group_get_stat(struct spdk_nvmf_tgt *tgt,
struct spdk_nvmf_transport *transport,
struct spdk_nvmf_transport_poll_group_stat **stat)
{
if (transport->ops->poll_group_get_stat) {
return transport->ops->poll_group_get_stat(tgt, stat);
} else {
return -ENOTSUP;
}
}
void
spdk_nvmf_transport_poll_group_free_stat(struct spdk_nvmf_transport *transport,
struct spdk_nvmf_transport_poll_group_stat *stat)
{
if (transport->ops->poll_group_free_stat) {
transport->ops->poll_group_free_stat(stat);
}
}

View File

@ -1,8 +1,8 @@
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright (c) Intel Corporation. * Copyright (c) Intel Corporation. All rights reserved.
* All rights reserved. * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions * modification, are permitted provided that the following conditions
@ -167,6 +167,17 @@ struct spdk_nvmf_transport_ops {
* set the submission queue size of the queue pair * set the submission queue size of the queue pair
*/ */
int (*qpair_set_sqsize)(struct spdk_nvmf_qpair *qpair); int (*qpair_set_sqsize)(struct spdk_nvmf_qpair *qpair);
/*
* Get transport poll group statistics
*/
int (*poll_group_get_stat)(struct spdk_nvmf_tgt *tgt,
struct spdk_nvmf_transport_poll_group_stat **stat);
/*
* Free transport poll group statistics previously allocated with poll_group_get_stat()
*/
void (*poll_group_free_stat)(struct spdk_nvmf_transport_poll_group_stat *stat);
}; };
int spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport, int spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,