nvmf/rdma: Add request latency statistics

This patch adds measurement of time request spends from the moment it
was polled till completion.

Signed-off-by: Evgeniy Kochetov <evgeniik@mellanox.com>
Change-Id: I1fcda68735f2210c5365dd06f26c10162e4ddf33
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/445291
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Evgeniy Kochetov 2019-03-06 11:38:07 +00:00 committed by Jim Harris
parent 251db8144f
commit fbe8f8040c
4 changed files with 27 additions and 4 deletions

View File

@ -4168,6 +4168,7 @@ Example response:
"jsonrpc": "2.0",
"id": 1,
"result": {
"tick_rate": 2400000000,
"poll_groups": [
{
"name": "app_thread",
@ -4177,20 +4178,24 @@ Example response:
"transports": [
{
"trtype": "RDMA",
"pending_data_buffer": 0,
"pending_data_buffer": 12131888,
"devices": [
{
"name": "mlx5_1",
"polls": 1536729,
"polls": 72284105,
"completions": 0,
"requests": 0,
"request_latency": 0,
"pending_free_request": 0,
"pending_rdma_read": 0,
"pending_rdma_write": 0
},
{
"name": "mlx5_0",
"polls": 1536729,
"completions": 18667357,
"polls": 72284105,
"completions": 15165875,
"requests": 7582935,
"request_latency": 1249323766184,
"pending_free_request": 0,
"pending_rdma_read": 337602,
"pending_rdma_write": 0

View File

@ -89,6 +89,8 @@ struct spdk_nvmf_rdma_device_stat {
const char *name;
uint64_t polls;
uint64_t completions;
uint64_t requests;
uint64_t request_latency;
uint64_t pending_free_request;
uint64_t pending_rdma_read;
uint64_t pending_rdma_write;

View File

@ -1626,6 +1626,10 @@ write_nvmf_transport_stats(struct spdk_json_write_ctx *w,
spdk_json_write_named_string(w, "name", stat->rdma.devices[i].name);
spdk_json_write_named_uint64(w, "polls", stat->rdma.devices[i].polls);
spdk_json_write_named_uint64(w, "completions", stat->rdma.devices[i].completions);
spdk_json_write_named_uint64(w, "requests",
stat->rdma.devices[i].requests);
spdk_json_write_named_uint64(w, "request_latency",
stat->rdma.devices[i].request_latency);
spdk_json_write_named_uint64(w, "pending_free_request",
stat->rdma.devices[i].pending_free_request);
spdk_json_write_named_uint64(w, "pending_rdma_read",
@ -1707,6 +1711,7 @@ spdk_rpc_nvmf_get_stats(struct spdk_jsonrpc_request *request,
}
spdk_json_write_object_begin(ctx->w);
spdk_json_write_named_uint64(ctx->w, "tick_rate", spdk_get_ticks_hz());
spdk_json_write_named_array_begin(ctx->w, "poll_groups");
spdk_for_each_channel(g_spdk_nvmf_tgt,

View File

@ -238,6 +238,7 @@ struct spdk_nvmf_rdma_recv {
uint8_t *buf;
struct spdk_nvmf_rdma_wr rdma_wr;
uint64_t receive_tsc;
STAILQ_ENTRY(spdk_nvmf_rdma_recv) link;
};
@ -266,6 +267,7 @@ struct spdk_nvmf_rdma_request {
void *buffers[NVMF_REQ_MAX_BUFFERS];
uint32_t num_outstanding_data_wr;
uint64_t receive_tsc;
STAILQ_ENTRY(spdk_nvmf_rdma_request) state_link;
};
@ -412,6 +414,8 @@ struct spdk_nvmf_rdma_qpair {
struct spdk_nvmf_rdma_poller_stat {
uint64_t completions;
uint64_t polls;
uint64_t requests;
uint64_t request_latency;
uint64_t pending_free_request;
uint64_t pending_rdma_read;
uint64_t pending_rdma_write;
@ -2082,6 +2086,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
spdk_trace_record(TRACE_RDMA_REQUEST_STATE_COMPLETED, 0, 0,
(uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
rqpair->poller->stat.request_latency += spdk_get_ticks() - rdma_req->receive_tsc;
nvmf_rdma_request_free(rdma_req, rtransport);
break;
case RDMA_REQUEST_NUM_STATES:
@ -2623,6 +2628,7 @@ spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport
rqpair->qd++;
}
rdma_req->receive_tsc = rdma_req->recv->receive_tsc;
rdma_req->state = RDMA_REQUEST_STATE_NEW;
if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) {
break;
@ -3432,6 +3438,7 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
int reaped, i;
int count = 0;
bool error = false;
uint64_t poll_tsc = spdk_get_ticks();
/* Poll for completing operations. */
reaped = ibv_poll_cq(rpoller->cq, 32, wc);
@ -3487,6 +3494,8 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
rdma_recv->wr.next = NULL;
rqpair->current_recv_depth++;
rdma_recv->receive_tsc = poll_tsc;
rpoller->stat.requests++;
STAILQ_INSERT_TAIL(&rqpair->resources->incoming_queue, rdma_recv, link);
break;
case RDMA_WR_TYPE_DATA:
@ -3718,6 +3727,8 @@ spdk_nvmf_rdma_poll_group_get_stat(struct spdk_nvmf_tgt *tgt,
device_stat->name = ibv_get_device_name(rpoller->device->context->device);
device_stat->polls = rpoller->stat.polls;
device_stat->completions = rpoller->stat.completions;
device_stat->requests = rpoller->stat.requests;
device_stat->request_latency = rpoller->stat.request_latency;
device_stat->pending_free_request = rpoller->stat.pending_free_request;
device_stat->pending_rdma_read = rpoller->stat.pending_rdma_read;
device_stat->pending_rdma_write = rpoller->stat.pending_rdma_write;