lib/trace: don't pass zero as a non-argument
Now that the trace library can handle multiple arguments, there's no point in passing 0 for tracepoints that don't have any arguments. This patch removes all such instances. It allows us to to verify that `spdk_trace_record()` was issued with the exact number of arguments as specified in the definition of the tracepoint. Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com> Change-Id: Idbdb6f5111bd6175e145a12c1f0c095b62d744a9 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8125 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Community-CI: Mellanox Build Bot Reviewed-by: Ziye Yang <ziye.yang@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
This commit is contained in:
parent
c514ba5053
commit
c556b6b892
@ -2152,7 +2152,7 @@ bdev_io_split_submit(struct spdk_bdev_io *bdev_io, struct iovec *iov, int iovcnt
|
||||
} else {
|
||||
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
|
||||
if (bdev_io->u.bdev.split_outstanding == 0) {
|
||||
spdk_trace_record(TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)bdev_io, 0);
|
||||
spdk_trace_record(TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)bdev_io);
|
||||
TAILQ_REMOVE(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link);
|
||||
bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
|
||||
}
|
||||
@ -2267,7 +2267,7 @@ _bdev_rw_split(void *_bdev_io)
|
||||
if (bdev_io->u.bdev.split_outstanding == 0) {
|
||||
SPDK_ERRLOG("The first child io was less than a block size\n");
|
||||
bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
|
||||
spdk_trace_record(TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)bdev_io, 0);
|
||||
spdk_trace_record(TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)bdev_io);
|
||||
TAILQ_REMOVE(&bdev_io->internal.ch->io_submitted, bdev_io, internal.ch_link);
|
||||
bdev_io->internal.cb(bdev_io, false, bdev_io->internal.caller_ctx);
|
||||
}
|
||||
@ -2368,7 +2368,7 @@ bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
||||
*/
|
||||
if (parent_io->u.bdev.split_remaining_num_blocks == 0) {
|
||||
assert(parent_io->internal.cb != bdev_io_split_done);
|
||||
spdk_trace_record(TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)parent_io, 0);
|
||||
spdk_trace_record(TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)parent_io);
|
||||
TAILQ_REMOVE(&parent_io->internal.ch->io_submitted, parent_io, internal.ch_link);
|
||||
parent_io->internal.cb(parent_io, parent_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS,
|
||||
parent_io->internal.caller_ctx);
|
||||
@ -5198,7 +5198,7 @@ bdev_io_complete(void *ctx)
|
||||
|
||||
tsc = spdk_get_ticks();
|
||||
tsc_diff = tsc - bdev_io->internal.submit_tsc;
|
||||
spdk_trace_record_tsc(tsc, TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)bdev_io, 0);
|
||||
spdk_trace_record_tsc(tsc, TRACE_BDEV_IO_DONE, 0, 0, (uintptr_t)bdev_io);
|
||||
|
||||
TAILQ_REMOVE(&bdev_ch->io_submitted, bdev_io, internal.ch_link);
|
||||
|
||||
|
@ -1212,7 +1212,7 @@ iscsi_task_cpl(struct spdk_scsi_task *scsi_task)
|
||||
struct spdk_iscsi_conn *conn = task->conn;
|
||||
struct spdk_iscsi_pdu *pdu = task->pdu;
|
||||
|
||||
spdk_trace_record(TRACE_ISCSI_TASK_DONE, conn->id, 0, (uintptr_t)task, 0);
|
||||
spdk_trace_record(TRACE_ISCSI_TASK_DONE, conn->id, 0, (uintptr_t)task);
|
||||
|
||||
task->is_queued = false;
|
||||
primary = iscsi_task_get_primary(task);
|
||||
@ -1223,7 +1223,7 @@ iscsi_task_cpl(struct spdk_scsi_task *scsi_task)
|
||||
process_non_read_task_completion(conn, task, primary);
|
||||
}
|
||||
if (!task->parent) {
|
||||
spdk_trace_record(TRACE_ISCSI_PDU_COMPLETED, 0, 0, (uintptr_t)pdu, 0);
|
||||
spdk_trace_record(TRACE_ISCSI_PDU_COMPLETED, 0, 0, (uintptr_t)pdu);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1321,7 +1321,7 @@ iscsi_conn_read_data(struct spdk_iscsi_conn *conn, int bytes,
|
||||
ret = spdk_sock_recv(conn->sock, buf, bytes);
|
||||
|
||||
if (ret > 0) {
|
||||
spdk_trace_record(TRACE_ISCSI_READ_FROM_SOCKET_DONE, conn->id, ret, 0, 0);
|
||||
spdk_trace_record(TRACE_ISCSI_READ_FROM_SOCKET_DONE, conn->id, ret, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1362,7 +1362,7 @@ iscsi_conn_readv_data(struct spdk_iscsi_conn *conn,
|
||||
ret = spdk_sock_readv(conn->sock, iov, iovcnt);
|
||||
|
||||
if (ret > 0) {
|
||||
spdk_trace_record(TRACE_ISCSI_READ_FROM_SOCKET_DONE, conn->id, ret, 0, 0);
|
||||
spdk_trace_record(TRACE_ISCSI_READ_FROM_SOCKET_DONE, conn->id, ret, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1439,7 +1439,7 @@ _iscsi_conn_pdu_write_done(void *cb_arg, int err)
|
||||
if (err != 0) {
|
||||
conn->state = ISCSI_CONN_STATE_EXITING;
|
||||
} else {
|
||||
spdk_trace_record(TRACE_ISCSI_FLUSH_WRITEBUF_DONE, conn->id, pdu->mapped_length, (uintptr_t)pdu, 0);
|
||||
spdk_trace_record(TRACE_ISCSI_FLUSH_WRITEBUF_DONE, conn->id, pdu->mapped_length, (uintptr_t)pdu);
|
||||
}
|
||||
|
||||
if ((conn->full_feature) &&
|
||||
|
@ -4813,7 +4813,7 @@ iscsi_read_pdu(struct spdk_iscsi_conn *conn)
|
||||
rc = 0;
|
||||
}
|
||||
if (rc == 0) {
|
||||
spdk_trace_record(TRACE_ISCSI_TASK_EXECUTED, 0, 0, (uintptr_t)pdu, 0);
|
||||
spdk_trace_record(TRACE_ISCSI_TASK_EXECUTED, 0, 0, (uintptr_t)pdu);
|
||||
iscsi_put_pdu(pdu);
|
||||
conn->pdu_in_progress = NULL;
|
||||
conn->pdu_recv_state = ISCSI_PDU_RECV_STATE_AWAIT_PDU_READY;
|
||||
|
@ -331,7 +331,7 @@ nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
|
||||
}
|
||||
if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
|
||||
spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
|
||||
(uint64_t)(&fc_req->req), 0);
|
||||
(uint64_t)(&fc_req->req));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -884,7 +884,7 @@ nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
|
||||
struct ibv_recv_wr *bad_recv_wr = NULL;
|
||||
int rc;
|
||||
|
||||
spdk_trace_record(TRACE_RDMA_QP_DESTROY, 0, 0, (uintptr_t)rqpair, 0);
|
||||
spdk_trace_record(TRACE_RDMA_QP_DESTROY, 0, 0, (uintptr_t)rqpair);
|
||||
|
||||
if (rqpair->qd != 0) {
|
||||
struct spdk_nvmf_qpair *qpair = &rqpair->qpair;
|
||||
@ -1043,7 +1043,7 @@ nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
|
||||
qp_init_attr.cap.max_send_wr);
|
||||
rqpair->max_send_sge = spdk_min(NVMF_DEFAULT_TX_SGE, qp_init_attr.cap.max_send_sge);
|
||||
rqpair->max_recv_sge = spdk_min(NVMF_DEFAULT_RX_SGE, qp_init_attr.cap.max_recv_sge);
|
||||
spdk_trace_record(TRACE_RDMA_QP_CREATE, 0, 0, (uintptr_t)rqpair, 0);
|
||||
spdk_trace_record(TRACE_RDMA_QP_CREATE, 0, 0, (uintptr_t)rqpair);
|
||||
SPDK_DEBUGLOG(rdma, "New RDMA Connection: %p\n", qpair);
|
||||
|
||||
if (rqpair->poller->srq == NULL) {
|
||||
@ -2811,7 +2811,7 @@ nvmf_rdma_disconnect(struct rdma_cm_event *evt)
|
||||
|
||||
rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
|
||||
|
||||
spdk_trace_record(TRACE_RDMA_QP_DISCONNECT, 0, 0, (uintptr_t)rqpair, 0);
|
||||
spdk_trace_record(TRACE_RDMA_QP_DISCONNECT, 0, 0, (uintptr_t)rqpair);
|
||||
|
||||
spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
|
||||
|
||||
|
@ -1985,7 +1985,7 @@ nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
return NVME_TCP_PDU_FATAL;
|
||||
} else if (rc > 0) {
|
||||
pdu->ch_valid_bytes += rc;
|
||||
spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, 0, rc, 0, 0);
|
||||
spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, 0, rc, 0);
|
||||
if (spdk_likely(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY)) {
|
||||
nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
|
||||
}
|
||||
@ -2006,8 +2006,7 @@ nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
if (rc < 0) {
|
||||
return NVME_TCP_PDU_FATAL;
|
||||
} else if (rc > 0) {
|
||||
spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE,
|
||||
0, rc, 0, 0);
|
||||
spdk_trace_record(TRACE_TCP_READ_FROM_SOCKET_DONE, 0, rc, 0);
|
||||
pdu->psh_valid_bytes += rc;
|
||||
}
|
||||
|
||||
@ -2453,7 +2452,7 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
* to escape this state. */
|
||||
break;
|
||||
case TCP_REQUEST_STATE_NEW:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEW, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEW, 0, 0, (uintptr_t)tcp_req);
|
||||
|
||||
/* copy the cmd from the receive pdu */
|
||||
tcp_req->cmd = tqpair->pdu_in_progress->hdr.capsule_cmd.ccsqe;
|
||||
@ -2492,7 +2491,7 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
STAILQ_INSERT_TAIL(&group->pending_buf_queue, &tcp_req->req, buf_link);
|
||||
break;
|
||||
case TCP_REQUEST_STATE_NEED_BUFFER:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEED_BUFFER, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEED_BUFFER, 0, 0, (uintptr_t)tcp_req);
|
||||
|
||||
assert(tcp_req->req.xfer != SPDK_NVME_DATA_NONE);
|
||||
|
||||
@ -2547,18 +2546,18 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_EXECUTE);
|
||||
break;
|
||||
case TCP_REQUEST_STATE_AWAITING_R2T_ACK:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_AWAIT_R2T_ACK, 0, 0, (uintptr_t)tcp_req);
|
||||
/* The R2T completion or the h2c data incoming will kick it out of this state. */
|
||||
break;
|
||||
case TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
|
||||
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 0, 0,
|
||||
(uintptr_t)tcp_req, 0);
|
||||
(uintptr_t)tcp_req);
|
||||
/* Some external code must kick a request into TCP_REQUEST_STATE_READY_TO_EXECUTE
|
||||
* to escape this state. */
|
||||
break;
|
||||
case TCP_REQUEST_STATE_READY_TO_EXECUTE:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_EXECUTE, 0, 0, (uintptr_t)tcp_req);
|
||||
|
||||
if (spdk_unlikely(tcp_req->req.dif_enabled)) {
|
||||
assert(tcp_req->req.dif.elba_length >= tcp_req->req.length);
|
||||
@ -2569,12 +2568,12 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
spdk_nvmf_request_exec(&tcp_req->req);
|
||||
break;
|
||||
case TCP_REQUEST_STATE_EXECUTING:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTING, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTING, 0, 0, (uintptr_t)tcp_req);
|
||||
/* Some external code must kick a request into TCP_REQUEST_STATE_EXECUTED
|
||||
* to escape this state. */
|
||||
break;
|
||||
case TCP_REQUEST_STATE_EXECUTED:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTED, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_EXECUTED, 0, 0, (uintptr_t)tcp_req);
|
||||
|
||||
if (spdk_unlikely(tcp_req->req.dif_enabled)) {
|
||||
tcp_req->req.length = tcp_req->req.dif.orig_length;
|
||||
@ -2583,19 +2582,18 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_READY_TO_COMPLETE);
|
||||
break;
|
||||
case TCP_REQUEST_STATE_READY_TO_COMPLETE:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_READY_TO_COMPLETE, 0, 0, (uintptr_t)tcp_req);
|
||||
rc = request_transfer_out(&tcp_req->req);
|
||||
assert(rc == 0); /* No good way to handle this currently */
|
||||
break;
|
||||
case TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 0, 0,
|
||||
(uintptr_t)tcp_req,
|
||||
0);
|
||||
(uintptr_t)tcp_req);
|
||||
/* Some external code must kick a request into TCP_REQUEST_STATE_COMPLETED
|
||||
* to escape this state. */
|
||||
break;
|
||||
case TCP_REQUEST_STATE_COMPLETED:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, 0, 0, (uintptr_t)tcp_req);
|
||||
if (tcp_req->req.data_from_pool) {
|
||||
spdk_nvmf_request_free_buffers(&tcp_req->req, group, transport);
|
||||
} else if (spdk_unlikely(tcp_req->has_incapsule_data && (tcp_req->cmd.opc == SPDK_NVME_OPC_FABRIC ||
|
||||
|
@ -47,7 +47,7 @@ scsi_lun_complete_task(struct spdk_scsi_lun *lun, struct spdk_scsi_task *task)
|
||||
{
|
||||
if (lun) {
|
||||
TAILQ_REMOVE(&lun->tasks, task, scsi_link);
|
||||
spdk_trace_record(TRACE_SCSI_TASK_DONE, lun->dev->id, 0, (uintptr_t)task, 0);
|
||||
spdk_trace_record(TRACE_SCSI_TASK_DONE, lun->dev->id, 0, (uintptr_t)task);
|
||||
}
|
||||
task->cpl_fn(task);
|
||||
}
|
||||
@ -193,7 +193,7 @@ _scsi_lun_execute_task(struct spdk_scsi_lun *lun, struct spdk_scsi_task *task)
|
||||
int rc;
|
||||
|
||||
task->status = SPDK_SCSI_STATUS_GOOD;
|
||||
spdk_trace_record(TRACE_SCSI_TASK_START, lun->dev->id, task->length, (uintptr_t)task, 0);
|
||||
spdk_trace_record(TRACE_SCSI_TASK_START, lun->dev->id, task->length, (uintptr_t)task);
|
||||
TAILQ_INSERT_TAIL(&lun->tasks, task, scsi_link);
|
||||
if (!lun->removed) {
|
||||
/* Check the command is allowed or not when reservation is exist */
|
||||
|
@ -79,12 +79,8 @@ _spdk_trace_record(uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, uint32_
|
||||
next_entry->object_id = object_id;
|
||||
|
||||
tpoint = &g_trace_flags->tpoint[tpoint_id];
|
||||
/* Make sure that the number of arguments passed matches tracepoint definition. For now,
|
||||
* allow passing extra arguments (which will be silently discard), as some traces that don't
|
||||
* have any arguments pass 0 as an argument. Once they're fixed, change the condition to
|
||||
* "!=".
|
||||
*/
|
||||
if (tpoint->num_args > num_args) {
|
||||
/* Make sure that the number of arguments passed match tracepoint definition */
|
||||
if (tpoint->num_args != num_args) {
|
||||
assert(0 && "Unexpected number of tracepoint arguments");
|
||||
return;
|
||||
}
|
||||
|
@ -511,7 +511,7 @@ iscsi_fuzz_read_pdu(struct spdk_iscsi_conn *conn)
|
||||
rc = 0;
|
||||
}
|
||||
if (rc == 0) {
|
||||
spdk_trace_record(TRACE_ISCSI_TASK_EXECUTED, 0, 0, (uintptr_t)pdu, 0);
|
||||
spdk_trace_record(TRACE_ISCSI_TASK_EXECUTED, 0, 0, (uintptr_t)pdu);
|
||||
conn->pdu_in_progress = NULL;
|
||||
conn->pdu_recv_state = ISCSI_PDU_RECV_STATE_AWAIT_PDU_READY;
|
||||
return 1;
|
||||
|
Loading…
Reference in New Issue
Block a user