diff --git a/doc/jsonrpc.md b/doc/jsonrpc.md index 3b445c5cb..c3183221c 100644 --- a/doc/jsonrpc.md +++ b/doc/jsonrpc.md @@ -4177,16 +4177,23 @@ Example response: "transports": [ { "trtype": "RDMA", + "pending_data_buffer": 0, "devices": [ { "name": "mlx5_1", "polls": 1536729, - "completions": 0 + "completions": 0, + "pending_free_request": 0, + "pending_rdma_read": 0, + "pending_rdma_write": 0 }, { "name": "mlx5_0", "polls": 1536729, - "completions": 18667357 + "completions": 18667357, + "pending_free_request": 0, + "pending_rdma_read": 337602, + "pending_rdma_write": 0 } ] } diff --git a/include/spdk/nvmf.h b/include/spdk/nvmf.h index 43b284cc5..53770f6fd 100644 --- a/include/spdk/nvmf.h +++ b/include/spdk/nvmf.h @@ -89,12 +89,16 @@ struct spdk_nvmf_rdma_device_stat { const char *name; uint64_t polls; uint64_t completions; + uint64_t pending_free_request; + uint64_t pending_rdma_read; + uint64_t pending_rdma_write; }; struct spdk_nvmf_transport_poll_group_stat { spdk_nvme_transport_type_t trtype; union { struct { + uint64_t pending_data_buffer; uint64_t num_devices; struct spdk_nvmf_rdma_device_stat *devices; } rdma; diff --git a/lib/event/subsystems/nvmf/nvmf_rpc.c b/lib/event/subsystems/nvmf/nvmf_rpc.c index 5f9234982..eb413cf2e 100644 --- a/lib/event/subsystems/nvmf/nvmf_rpc.c +++ b/lib/event/subsystems/nvmf/nvmf_rpc.c @@ -1619,12 +1619,19 @@ write_nvmf_transport_stats(struct spdk_json_write_ctx *w, spdk_nvme_transport_id_trtype_str(stat->trtype)); switch (stat->trtype) { case SPDK_NVME_TRANSPORT_RDMA: + spdk_json_write_named_uint64(w, "pending_data_buffer", stat->rdma.pending_data_buffer); spdk_json_write_named_array_begin(w, "devices"); for (i = 0; i < stat->rdma.num_devices; ++i) { spdk_json_write_object_begin(w); spdk_json_write_named_string(w, "name", stat->rdma.devices[i].name); spdk_json_write_named_uint64(w, "polls", stat->rdma.devices[i].polls); spdk_json_write_named_uint64(w, "completions", stat->rdma.devices[i].completions); + spdk_json_write_named_uint64(w, "pending_free_request", + stat->rdma.devices[i].pending_free_request); + spdk_json_write_named_uint64(w, "pending_rdma_read", + stat->rdma.devices[i].pending_rdma_read); + spdk_json_write_named_uint64(w, "pending_rdma_write", + stat->rdma.devices[i].pending_rdma_write); spdk_json_write_object_end(w); } spdk_json_write_array_end(w); diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index b1dfd53fd..3b62982b0 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -412,6 +412,9 @@ struct spdk_nvmf_rdma_qpair { struct spdk_nvmf_rdma_poller_stat { uint64_t completions; uint64_t polls; + uint64_t pending_free_request; + uint64_t pending_rdma_read; + uint64_t pending_rdma_write; }; struct spdk_nvmf_rdma_poller { @@ -440,6 +443,10 @@ struct spdk_nvmf_rdma_poller { TAILQ_ENTRY(spdk_nvmf_rdma_poller) link; }; +struct spdk_nvmf_rdma_poll_group_stat { + uint64_t pending_data_buffer; +}; + struct spdk_nvmf_rdma_poll_group { struct spdk_nvmf_transport_poll_group group; @@ -447,6 +454,8 @@ struct spdk_nvmf_rdma_poll_group { STAILQ_HEAD(, spdk_nvmf_rdma_request) pending_data_buf_queue; TAILQ_HEAD(, spdk_nvmf_rdma_poller) pollers; + + struct spdk_nvmf_rdma_poll_group_stat stat; }; /* Assuming rdma_cm uses just one protection domain per ibv_context. */ @@ -1950,6 +1959,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport, if (!rdma_req->req.data) { /* No buffers available. */ + rgroup->stat.pending_data_buffer++; break; } @@ -1977,6 +1987,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport, if (rqpair->current_send_depth + rdma_req->num_outstanding_data_wr > rqpair->max_send_depth || rqpair->current_read_depth + rdma_req->num_outstanding_data_wr > rqpair->max_read_depth) { /* We can only have so many WRs outstanding. we have to wait until some finish. */ + rqpair->poller->stat.pending_rdma_read++; break; } @@ -2031,6 +2042,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport, rqpair->max_send_depth) { /* We can only have so many WRs outstanding. we have to wait until some finish. * +1 since each request has an additional wr in the resp. */ + rqpair->poller->stat.pending_rdma_write++; break; } @@ -2616,6 +2628,9 @@ spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport break; } } + if (!STAILQ_EMPTY(&resources->incoming_queue) && STAILQ_EMPTY(&resources->free_queue)) { + rqpair->poller->stat.pending_free_request++; + } } static void @@ -3695,6 +3710,7 @@ spdk_nvmf_rdma_poll_group_get_stat(struct spdk_nvmf_tgt *tgt, return -ENOMEM; } + (*stat)->rdma.pending_data_buffer = rgroup->stat.pending_data_buffer; (*stat)->rdma.num_devices = num_devices; num_devices = 0; TAILQ_FOREACH(rpoller, &rgroup->pollers, link) { @@ -3702,6 +3718,9 @@ spdk_nvmf_rdma_poll_group_get_stat(struct spdk_nvmf_tgt *tgt, device_stat->name = ibv_get_device_name(rpoller->device->context->device); device_stat->polls = rpoller->stat.polls; device_stat->completions = rpoller->stat.completions; + device_stat->pending_free_request = rpoller->stat.pending_free_request; + device_stat->pending_rdma_read = rpoller->stat.pending_rdma_read; + device_stat->pending_rdma_write = rpoller->stat.pending_rdma_write; } return 0; }