lib/nvmf: Add current qpair count to poll group statistic
In struct spdk_nvmf_poll_group_stat, there are statistics of cumulative IO and admin queue pair counts. But current qpair counts are not reflected. Use this patch to add current admin and io qpair counts for a poll group. Signed-off-by: Rui Chang <rui.chang@arm.com> Change-Id: I7d40aed8b3fb09f9d34e5b5232380d162b97882b Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/7969 Reviewed-by: Ziye Yang <ziye.yang@intel.com> Reviewed-by: Eugene Kochetov <evgeniik@nvidia.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: GangCao <gang.cao@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Mellanox Build Bot
This commit is contained in:
parent
75a507736b
commit
080118cd83
@ -6770,6 +6770,7 @@ tgt_name | Optional | string | Parent NVMe-oF target nam
|
|||||||
### Response
|
### Response
|
||||||
|
|
||||||
The response is an object containing NVMf subsystem statistics.
|
The response is an object containing NVMf subsystem statistics.
|
||||||
|
In the response, `admin_qpairs` and `io_qpairs` are reflecting cumulative queue pair counts while `current_admin_qpairs` and `current_io_qpairs` are showing the current number.
|
||||||
|
|
||||||
### Example
|
### Example
|
||||||
|
|
||||||
@ -6794,6 +6795,8 @@ Example response:
|
|||||||
"name": "app_thread",
|
"name": "app_thread",
|
||||||
"admin_qpairs": 1,
|
"admin_qpairs": 1,
|
||||||
"io_qpairs": 4,
|
"io_qpairs": 4,
|
||||||
|
"current_admin_qpairs": 1,
|
||||||
|
"current_io_qpairs": 2,
|
||||||
"pending_bdev_io": 1721,
|
"pending_bdev_io": 1721,
|
||||||
"transports": [
|
"transports": [
|
||||||
{
|
{
|
||||||
|
@ -120,8 +120,14 @@ struct spdk_nvmf_listen_opts {
|
|||||||
void spdk_nvmf_listen_opts_init(struct spdk_nvmf_listen_opts *opts, size_t opts_size);
|
void spdk_nvmf_listen_opts_init(struct spdk_nvmf_listen_opts *opts, size_t opts_size);
|
||||||
|
|
||||||
struct spdk_nvmf_poll_group_stat {
|
struct spdk_nvmf_poll_group_stat {
|
||||||
|
/* cumulative admin qpair count */
|
||||||
uint32_t admin_qpairs;
|
uint32_t admin_qpairs;
|
||||||
|
/* cumulative io qpair count */
|
||||||
uint32_t io_qpairs;
|
uint32_t io_qpairs;
|
||||||
|
/* current admin qpair count */
|
||||||
|
uint32_t current_admin_qpairs;
|
||||||
|
/* current io qpair count */
|
||||||
|
uint32_t current_io_qpairs;
|
||||||
uint64_t pending_bdev_io;
|
uint64_t pending_bdev_io;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -711,8 +711,10 @@ _nvmf_ctrlr_connect(struct spdk_nvmf_request *req)
|
|||||||
|
|
||||||
if (0 == qpair->qid) {
|
if (0 == qpair->qid) {
|
||||||
qpair->group->stat.admin_qpairs++;
|
qpair->group->stat.admin_qpairs++;
|
||||||
|
qpair->group->stat.current_admin_qpairs++;
|
||||||
} else {
|
} else {
|
||||||
qpair->group->stat.io_qpairs++;
|
qpair->group->stat.io_qpairs++;
|
||||||
|
qpair->group->stat.current_io_qpairs++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd->qid == 0) {
|
if (cmd->qid == 0) {
|
||||||
|
@ -999,6 +999,16 @@ _nvmf_qpair_destroy(void *ctx, int status)
|
|||||||
assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING);
|
assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING);
|
||||||
qpair_ctx->qid = qpair->qid;
|
qpair_ctx->qid = qpair->qid;
|
||||||
|
|
||||||
|
if (ctrlr) {
|
||||||
|
if (0 == qpair->qid) {
|
||||||
|
assert(qpair->group->stat.current_admin_qpairs > 0);
|
||||||
|
qpair->group->stat.current_admin_qpairs--;
|
||||||
|
} else {
|
||||||
|
assert(qpair->group->stat.current_io_qpairs > 0);
|
||||||
|
qpair->group->stat.current_io_qpairs--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!ctrlr || !ctrlr->thread) {
|
if (!ctrlr || !ctrlr->thread) {
|
||||||
spdk_nvmf_poll_group_remove(qpair);
|
spdk_nvmf_poll_group_remove(qpair);
|
||||||
nvmf_transport_qpair_fini(qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx);
|
nvmf_transport_qpair_fini(qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx);
|
||||||
@ -1607,6 +1617,8 @@ spdk_nvmf_poll_group_dump_stat(struct spdk_nvmf_poll_group *group, struct spdk_j
|
|||||||
spdk_json_write_named_string(w, "name", spdk_thread_get_name(spdk_get_thread()));
|
spdk_json_write_named_string(w, "name", spdk_thread_get_name(spdk_get_thread()));
|
||||||
spdk_json_write_named_uint32(w, "admin_qpairs", group->stat.admin_qpairs);
|
spdk_json_write_named_uint32(w, "admin_qpairs", group->stat.admin_qpairs);
|
||||||
spdk_json_write_named_uint32(w, "io_qpairs", group->stat.io_qpairs);
|
spdk_json_write_named_uint32(w, "io_qpairs", group->stat.io_qpairs);
|
||||||
|
spdk_json_write_named_uint32(w, "current_admin_qpairs", group->stat.current_admin_qpairs);
|
||||||
|
spdk_json_write_named_uint32(w, "current_io_qpairs", group->stat.current_io_qpairs);
|
||||||
spdk_json_write_named_uint64(w, "pending_bdev_io", group->stat.pending_bdev_io);
|
spdk_json_write_named_uint64(w, "pending_bdev_io", group->stat.pending_bdev_io);
|
||||||
|
|
||||||
spdk_json_write_named_array_begin(w, "transports");
|
spdk_json_write_named_array_begin(w, "transports");
|
||||||
|
Loading…
Reference in New Issue
Block a user