diff --git a/doc/jsonrpc.md b/doc/jsonrpc.md index 623997c3c..fc8909242 100644 --- a/doc/jsonrpc.md +++ b/doc/jsonrpc.md @@ -6770,6 +6770,7 @@ tgt_name | Optional | string | Parent NVMe-oF target nam ### Response The response is an object containing NVMf subsystem statistics. +In the response, `admin_qpairs` and `io_qpairs` are reflecting cumulative queue pair counts while `current_admin_qpairs` and `current_io_qpairs` are showing the current number. ### Example @@ -6794,6 +6795,8 @@ Example response: "name": "app_thread", "admin_qpairs": 1, "io_qpairs": 4, + "current_admin_qpairs": 1, + "current_io_qpairs": 2, "pending_bdev_io": 1721, "transports": [ { diff --git a/include/spdk/nvmf.h b/include/spdk/nvmf.h index cf8b52ad4..5a22b9426 100644 --- a/include/spdk/nvmf.h +++ b/include/spdk/nvmf.h @@ -120,8 +120,14 @@ struct spdk_nvmf_listen_opts { void spdk_nvmf_listen_opts_init(struct spdk_nvmf_listen_opts *opts, size_t opts_size); struct spdk_nvmf_poll_group_stat { + /* cumulative admin qpair count */ uint32_t admin_qpairs; + /* cumulative io qpair count */ uint32_t io_qpairs; + /* current admin qpair count */ + uint32_t current_admin_qpairs; + /* current io qpair count */ + uint32_t current_io_qpairs; uint64_t pending_bdev_io; }; diff --git a/lib/nvmf/ctrlr.c b/lib/nvmf/ctrlr.c index ff684ceec..a0b7a3306 100644 --- a/lib/nvmf/ctrlr.c +++ b/lib/nvmf/ctrlr.c @@ -711,8 +711,10 @@ _nvmf_ctrlr_connect(struct spdk_nvmf_request *req) if (0 == qpair->qid) { qpair->group->stat.admin_qpairs++; + qpair->group->stat.current_admin_qpairs++; } else { qpair->group->stat.io_qpairs++; + qpair->group->stat.current_io_qpairs++; } if (cmd->qid == 0) { diff --git a/lib/nvmf/nvmf.c b/lib/nvmf/nvmf.c index d1ebe2331..eb6ba3935 100644 --- a/lib/nvmf/nvmf.c +++ b/lib/nvmf/nvmf.c @@ -999,6 +999,16 @@ _nvmf_qpair_destroy(void *ctx, int status) assert(qpair->state == SPDK_NVMF_QPAIR_DEACTIVATING); qpair_ctx->qid = qpair->qid; + if (ctrlr) { + if (0 == qpair->qid) { + assert(qpair->group->stat.current_admin_qpairs > 0); + qpair->group->stat.current_admin_qpairs--; + } else { + assert(qpair->group->stat.current_io_qpairs > 0); + qpair->group->stat.current_io_qpairs--; + } + } + if (!ctrlr || !ctrlr->thread) { spdk_nvmf_poll_group_remove(qpair); nvmf_transport_qpair_fini(qpair, _nvmf_transport_qpair_fini_complete, qpair_ctx); @@ -1607,6 +1617,8 @@ spdk_nvmf_poll_group_dump_stat(struct spdk_nvmf_poll_group *group, struct spdk_j spdk_json_write_named_string(w, "name", spdk_thread_get_name(spdk_get_thread())); spdk_json_write_named_uint32(w, "admin_qpairs", group->stat.admin_qpairs); spdk_json_write_named_uint32(w, "io_qpairs", group->stat.io_qpairs); + spdk_json_write_named_uint32(w, "current_admin_qpairs", group->stat.current_admin_qpairs); + spdk_json_write_named_uint32(w, "current_io_qpairs", group->stat.current_io_qpairs); spdk_json_write_named_uint64(w, "pending_bdev_io", group->stat.pending_bdev_io); spdk_json_write_named_array_begin(w, "transports");