nvmf: add tracepoints for ib async events

While here, clean up the trace application output based
on some debugging done with these tracepoints.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: Iaf79f0ff8c80d0a6b9768ae0da213d57e98ec552

Reviewed-on: https://review.gerrithub.io/424286
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
Jim Harris 2018-08-31 16:01:01 -07:00
parent 82c3c30f44
commit e8881867f8
2 changed files with 45 additions and 1 deletions

View File

@ -194,6 +194,8 @@ print_event(struct spdk_trace_entry *e, uint64_t tsc_rate,
} else { } else {
printf("id: N/A"); printf("id: N/A");
} }
} else if (e->object_id != 0) {
print_arg(true, "object: ", e->object_id);
} }
printf("\n"); printf("\n");
} }

View File

@ -126,6 +126,12 @@ enum spdk_nvmf_rdma_request_state {
#define TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x8) #define TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x8)
#define TRACE_RDMA_REQUEST_STATE_COMPLETING SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x9) #define TRACE_RDMA_REQUEST_STATE_COMPLETING SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x9)
#define TRACE_RDMA_REQUEST_STATE_COMPLETED SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xA) #define TRACE_RDMA_REQUEST_STATE_COMPLETED SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xA)
#define TRACE_RDMA_QP_CREATE SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xB)
#define TRACE_RDMA_IBV_ASYNC_EVENT SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xC)
#define TRACE_RDMA_CM_ASYNC_EVENT SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xD)
#define TRACE_RDMA_QP_STATE_CHANGE SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xE)
#define TRACE_RDMA_QP_DISCONNECT SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xF)
#define TRACE_RDMA_QP_DESTROY SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x10)
SPDK_TRACE_REGISTER_FN(nvmf_trace) SPDK_TRACE_REGISTER_FN(nvmf_trace)
{ {
@ -163,6 +169,19 @@ SPDK_TRACE_REGISTER_FN(nvmf_trace)
spdk_trace_register_description("RDMA_REQ_COMPLETED", "", spdk_trace_register_description("RDMA_REQ_COMPLETED", "",
TRACE_RDMA_REQUEST_STATE_COMPLETED, TRACE_RDMA_REQUEST_STATE_COMPLETED,
OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid: "); OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid: ");
spdk_trace_register_description("RDMA_QP_CREATE", "", TRACE_RDMA_QP_CREATE,
OWNER_NONE, OBJECT_NONE, 0, 0, "");
spdk_trace_register_description("RDMA_IBV_ASYNC_EVENT", "", TRACE_RDMA_IBV_ASYNC_EVENT,
OWNER_NONE, OBJECT_NONE, 0, 0, "type: ");
spdk_trace_register_description("RDMA_CM_ASYNC_EVENT", "", TRACE_RDMA_CM_ASYNC_EVENT,
OWNER_NONE, OBJECT_NONE, 0, 0, "type: ");
spdk_trace_register_description("RDMA_QP_STATE_CHANGE", "", TRACE_RDMA_QP_STATE_CHANGE,
OWNER_NONE, OBJECT_NONE, 0, 1, "state: ");
spdk_trace_register_description("RDMA_QP_DISCONNECT", "", TRACE_RDMA_QP_DISCONNECT,
OWNER_NONE, OBJECT_NONE, 0, 0, "");
spdk_trace_register_description("RDMA_QP_DESTROY", "", TRACE_RDMA_QP_DESTROY,
OWNER_NONE, OBJECT_NONE, 0, 0, "");
} }
/* This structure holds commands as they are received off the wire. /* This structure holds commands as they are received off the wire.
@ -342,6 +361,7 @@ static const char *str_ibv_qp_state[] = {
static enum ibv_qp_state static enum ibv_qp_state
spdk_nvmf_rdma_update_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair) { spdk_nvmf_rdma_update_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair) {
enum ibv_qp_state old_state, new_state;
int rc; int rc;
/* All the attributes needed for recovery */ /* All the attributes needed for recovery */
@ -362,6 +382,7 @@ spdk_nvmf_rdma_update_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair) {
IBV_QP_RNR_RETRY | IBV_QP_RNR_RETRY |
IBV_QP_MAX_QP_RD_ATOMIC; IBV_QP_MAX_QP_RD_ATOMIC;
old_state = rqpair->ibv_attr.qp_state;
rc = ibv_query_qp(rqpair->cm_id->qp, &rqpair->ibv_attr, rc = ibv_query_qp(rqpair->cm_id->qp, &rqpair->ibv_attr,
spdk_nvmf_ibv_attr_mask, &rqpair->ibv_init_attr); spdk_nvmf_ibv_attr_mask, &rqpair->ibv_init_attr);
@ -371,7 +392,13 @@ spdk_nvmf_rdma_update_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair) {
assert(false); assert(false);
} }
return rqpair->ibv_attr.qp_state; new_state = rqpair->ibv_attr.qp_state;
if (old_state != new_state)
{
spdk_trace_record(TRACE_RDMA_QP_STATE_CHANGE, 0, 0,
(uintptr_t)rqpair->cm_id, new_state);
}
return new_state;
} }
static int static int
@ -499,6 +526,8 @@ spdk_nvmf_rdma_cur_queue_depth(struct spdk_nvmf_rdma_qpair *rqpair)
static void static void
spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair) spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
{ {
spdk_trace_record(TRACE_RDMA_QP_DESTROY, 0, 0, (uintptr_t)rqpair->cm_id, 0);
if (spdk_nvmf_rdma_cur_queue_depth(rqpair)) { if (spdk_nvmf_rdma_cur_queue_depth(rqpair)) {
rqpair->qpair_disconnected = true; rqpair->qpair_disconnected = true;
return; return;
@ -572,6 +601,7 @@ spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
return -1; return -1;
} }
spdk_trace_record(TRACE_RDMA_QP_CREATE, 0, 0, (uintptr_t)rqpair->cm_id, 0);
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "New RDMA Connection: %p\n", qpair); SPDK_DEBUGLOG(SPDK_LOG_RDMA, "New RDMA Connection: %p\n", qpair);
rqpair->reqs = calloc(rqpair->max_queue_depth, sizeof(*rqpair->reqs)); rqpair->reqs = calloc(rqpair->max_queue_depth, sizeof(*rqpair->reqs));
@ -949,6 +979,7 @@ nvmf_rdma_disconnect(struct rdma_cm_event *evt)
} }
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair); rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
spdk_trace_record(TRACE_RDMA_QP_DISCONNECT, 0, 0, (uintptr_t)rqpair->cm_id, 0);
spdk_nvmf_rdma_update_ibv_state(rqpair); spdk_nvmf_rdma_update_ibv_state(rqpair);
spdk_nvmf_qpair_disconnect(qpair, NULL, NULL); spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
@ -995,6 +1026,8 @@ spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn c
if (rc == 0) { if (rc == 0) {
SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Acceptor Event: %s\n", CM_EVENT_STR[event->event]); SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Acceptor Event: %s\n", CM_EVENT_STR[event->event]);
spdk_trace_record(TRACE_RDMA_CM_ASYNC_EVENT, 0, 0, 0, event->event);
switch (event->event) { switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED: case RDMA_CM_EVENT_ADDR_RESOLVED:
case RDMA_CM_EVENT_ADDR_ERROR: case RDMA_CM_EVENT_ADDR_ERROR:
@ -2164,10 +2197,14 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
switch (event.event_type) { switch (event.event_type) {
case IBV_EVENT_QP_FATAL: case IBV_EVENT_QP_FATAL:
spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
(uintptr_t)rqpair->cm_id, event.event_type);
spdk_nvmf_rdma_update_ibv_state(rqpair); spdk_nvmf_rdma_update_ibv_state(rqpair);
spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair); spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair);
break; break;
case IBV_EVENT_QP_LAST_WQE_REACHED: case IBV_EVENT_QP_LAST_WQE_REACHED:
spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
(uintptr_t)rqpair->cm_id, event.event_type);
spdk_nvmf_rdma_update_ibv_state(rqpair); spdk_nvmf_rdma_update_ibv_state(rqpair);
spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_last_wqe, rqpair); spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_last_wqe, rqpair);
break; break;
@ -2177,6 +2214,8 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
* Note that we're not on the correct thread to access the qpair, but * Note that we're not on the correct thread to access the qpair, but
* the operations that the below calls make all happen to be thread * the operations that the below calls make all happen to be thread
* safe. */ * safe. */
spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
(uintptr_t)rqpair->cm_id, event.event_type);
state = spdk_nvmf_rdma_update_ibv_state(rqpair); state = spdk_nvmf_rdma_update_ibv_state(rqpair);
if (state == IBV_QPS_ERR) { if (state == IBV_QPS_ERR) {
spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair); spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair);
@ -2187,6 +2226,8 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
case IBV_EVENT_COMM_EST: case IBV_EVENT_COMM_EST:
case IBV_EVENT_PATH_MIG: case IBV_EVENT_PATH_MIG:
case IBV_EVENT_PATH_MIG_ERR: case IBV_EVENT_PATH_MIG_ERR:
spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
(uintptr_t)rqpair->cm_id, event.event_type);
spdk_nvmf_rdma_update_ibv_state(rqpair); spdk_nvmf_rdma_update_ibv_state(rqpair);
break; break;
case IBV_EVENT_CQ_ERR: case IBV_EVENT_CQ_ERR:
@ -2201,6 +2242,7 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
case IBV_EVENT_CLIENT_REREGISTER: case IBV_EVENT_CLIENT_REREGISTER:
case IBV_EVENT_GID_CHANGE: case IBV_EVENT_GID_CHANGE:
default: default:
spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0, 0, event.event_type);
break; break;
} }
ibv_ack_async_event(&event); ibv_ack_async_event(&event);