nvmf/tcp: Use STAILQ for queued_c2h_data_tcp_req and pending_data_buf_queue
This is a small performance optimization and an effort to unify I/O buffer management further among transports. It is ensured that the request is the first of STAILQ when spdk_nvmf_tcp_send_c2h_data() is called or the case TCP_REQUEST_STATE_NEED_BUFFER is executed in spdk_nvmf_tcp_req_process(). Hence change TAILQ_REMOVE to STAILQ_REMOVE_HEAD for these two cases. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: I0b195874ac22a8d5ecfb283a9865d2615b7d5912 Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/466637 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Ziye Yang <ziye.yang@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
de7568531d
commit
2bc819dd52
@ -192,7 +192,7 @@ struct spdk_nvmf_tcp_req {
|
||||
uint32_t elba_length;
|
||||
uint32_t orig_length;
|
||||
|
||||
TAILQ_ENTRY(spdk_nvmf_tcp_req) link;
|
||||
STAILQ_ENTRY(spdk_nvmf_tcp_req) link;
|
||||
TAILQ_ENTRY(spdk_nvmf_tcp_req) state_link;
|
||||
};
|
||||
|
||||
@ -228,7 +228,7 @@ struct spdk_nvmf_tcp_qpair {
|
||||
/* Number of requests in each state */
|
||||
int32_t state_cntr[TCP_REQUEST_NUM_STATES];
|
||||
|
||||
TAILQ_HEAD(, spdk_nvmf_tcp_req) queued_c2h_data_tcp_req;
|
||||
STAILQ_HEAD(, spdk_nvmf_tcp_req) queued_c2h_data_tcp_req;
|
||||
|
||||
uint8_t cpda;
|
||||
|
||||
@ -274,7 +274,7 @@ struct spdk_nvmf_tcp_poll_group {
|
||||
struct spdk_sock_group *sock_group;
|
||||
|
||||
/* Requests that are waiting to obtain a data buffer */
|
||||
TAILQ_HEAD(, spdk_nvmf_tcp_req) pending_data_buf_queue;
|
||||
STAILQ_HEAD(, spdk_nvmf_tcp_req) pending_data_buf_queue;
|
||||
|
||||
TAILQ_HEAD(, spdk_nvmf_tcp_qpair) qpairs;
|
||||
};
|
||||
@ -432,8 +432,8 @@ spdk_nvmf_tcp_cleanup_all_states(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
spdk_nvmf_tcp_pdu_put(tqpair, pdu);
|
||||
}
|
||||
|
||||
TAILQ_FOREACH_SAFE(tcp_req, &tqpair->queued_c2h_data_tcp_req, link, req_tmp) {
|
||||
TAILQ_REMOVE(&tqpair->queued_c2h_data_tcp_req, tcp_req, link);
|
||||
while (!STAILQ_EMPTY(&tqpair->queued_c2h_data_tcp_req)) {
|
||||
STAILQ_REMOVE_HEAD(&tqpair->queued_c2h_data_tcp_req, link);
|
||||
}
|
||||
spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
|
||||
|
||||
@ -442,7 +442,7 @@ spdk_nvmf_tcp_cleanup_all_states(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
/* Wipe the requests waiting for buffer from the global list */
|
||||
TAILQ_FOREACH_SAFE(tcp_req, &tqpair->state_queue[TCP_REQUEST_STATE_NEED_BUFFER], state_link,
|
||||
req_tmp) {
|
||||
TAILQ_REMOVE(&tqpair->group->pending_data_buf_queue, tcp_req, link);
|
||||
STAILQ_REMOVE(&tqpair->group->pending_data_buf_queue, tcp_req, spdk_nvmf_tcp_req, link);
|
||||
}
|
||||
|
||||
spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEED_BUFFER);
|
||||
@ -1061,7 +1061,7 @@ spdk_nvmf_tcp_qpair_init(struct spdk_nvmf_qpair *qpair)
|
||||
|
||||
TAILQ_INIT(&tqpair->send_queue);
|
||||
TAILQ_INIT(&tqpair->free_queue);
|
||||
TAILQ_INIT(&tqpair->queued_c2h_data_tcp_req);
|
||||
STAILQ_INIT(&tqpair->queued_c2h_data_tcp_req);
|
||||
|
||||
/* Initialise request state queues of the qpair */
|
||||
for (i = TCP_REQUEST_STATE_FREE; i < TCP_REQUEST_NUM_STATES; i++) {
|
||||
@ -1211,7 +1211,7 @@ spdk_nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport)
|
||||
}
|
||||
|
||||
TAILQ_INIT(&tgroup->qpairs);
|
||||
TAILQ_INIT(&tgroup->pending_data_buf_queue);
|
||||
STAILQ_INIT(&tgroup->pending_data_buf_queue);
|
||||
|
||||
return &tgroup->group;
|
||||
|
||||
@ -1244,7 +1244,7 @@ spdk_nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
|
||||
tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
|
||||
spdk_sock_group_close(&tgroup->sock_group);
|
||||
|
||||
if (!TAILQ_EMPTY(&tgroup->pending_data_buf_queue)) {
|
||||
if (!STAILQ_EMPTY(&tgroup->pending_data_buf_queue)) {
|
||||
SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
|
||||
}
|
||||
|
||||
@ -2351,6 +2351,8 @@ spdk_nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
|
||||
uint32_t plen, pdo, alignment;
|
||||
int rc;
|
||||
|
||||
assert(tcp_req == STAILQ_FIRST(&tqpair->queued_c2h_data_tcp_req));
|
||||
|
||||
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "enter\n");
|
||||
|
||||
rsp_pdu = spdk_nvmf_tcp_pdu_get(tqpair);
|
||||
@ -2420,7 +2422,7 @@ spdk_nvmf_tcp_send_c2h_data(struct spdk_nvmf_tcp_qpair *tqpair,
|
||||
if (tqpair->qpair.transport->opts.c2h_success) {
|
||||
c2h_data->common.flags |= SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS;
|
||||
}
|
||||
TAILQ_REMOVE(&tqpair->queued_c2h_data_tcp_req, tcp_req, link);
|
||||
STAILQ_REMOVE_HEAD(&tqpair->queued_c2h_data_tcp_req, link);
|
||||
}
|
||||
|
||||
tqpair->c2h_data_pdu_cnt += 1;
|
||||
@ -2439,9 +2441,9 @@ spdk_nvmf_tcp_handle_pending_c2h_data_queue(struct spdk_nvmf_tcp_qpair *tqpair)
|
||||
{
|
||||
struct spdk_nvmf_tcp_req *tcp_req;
|
||||
|
||||
while (!TAILQ_EMPTY(&tqpair->queued_c2h_data_tcp_req) &&
|
||||
while (!STAILQ_EMPTY(&tqpair->queued_c2h_data_tcp_req) &&
|
||||
(tqpair->c2h_data_pdu_cnt < NVMF_TCP_QPAIR_MAX_C2H_PDU_NUM)) {
|
||||
tcp_req = TAILQ_FIRST(&tqpair->queued_c2h_data_tcp_req);
|
||||
tcp_req = STAILQ_FIRST(&tqpair->queued_c2h_data_tcp_req);
|
||||
spdk_nvmf_tcp_send_c2h_data(tqpair, tcp_req);
|
||||
}
|
||||
}
|
||||
@ -2454,7 +2456,7 @@ spdk_nvmf_tcp_queue_c2h_data(struct spdk_nvmf_tcp_req *tcp_req,
|
||||
|
||||
assert(tcp_req->c2h_data_pdu_num < NVMF_TCP_QPAIR_MAX_C2H_PDU_NUM);
|
||||
|
||||
TAILQ_INSERT_TAIL(&tqpair->queued_c2h_data_tcp_req, tcp_req, link);
|
||||
STAILQ_INSERT_TAIL(&tqpair->queued_c2h_data_tcp_req, tcp_req, link);
|
||||
spdk_nvmf_tcp_handle_pending_c2h_data_queue(tqpair);
|
||||
}
|
||||
|
||||
@ -2588,14 +2590,14 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
}
|
||||
|
||||
spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEED_BUFFER);
|
||||
TAILQ_INSERT_TAIL(&tqpair->group->pending_data_buf_queue, tcp_req, link);
|
||||
STAILQ_INSERT_TAIL(&tqpair->group->pending_data_buf_queue, tcp_req, link);
|
||||
break;
|
||||
case TCP_REQUEST_STATE_NEED_BUFFER:
|
||||
spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEED_BUFFER, 0, 0, (uintptr_t)tcp_req, 0);
|
||||
|
||||
assert(tcp_req->req.xfer != SPDK_NVME_DATA_NONE);
|
||||
|
||||
if (tcp_req != TAILQ_FIRST(&tqpair->group->pending_data_buf_queue)) {
|
||||
if (tcp_req != STAILQ_FIRST(&tqpair->group->pending_data_buf_queue)) {
|
||||
SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP,
|
||||
"Not the first element to wait for the buf for tcp_req(%p) on tqpair=%p\n",
|
||||
tcp_req, tqpair);
|
||||
@ -2606,7 +2608,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
/* Try to get a data buffer */
|
||||
rc = spdk_nvmf_tcp_req_parse_sgl(ttransport, tcp_req);
|
||||
if (rc < 0) {
|
||||
TAILQ_REMOVE(&tqpair->group->pending_data_buf_queue, tcp_req, link);
|
||||
STAILQ_REMOVE_HEAD(&tqpair->group->pending_data_buf_queue, link);
|
||||
rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
|
||||
/* Reset the tqpair receving pdu state */
|
||||
spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
|
||||
@ -2621,7 +2623,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
|
||||
break;
|
||||
}
|
||||
|
||||
TAILQ_REMOVE(&tqpair->group->pending_data_buf_queue, tcp_req, link);
|
||||
STAILQ_REMOVE_HEAD(&tqpair->group->pending_data_buf_queue, link);
|
||||
|
||||
/* If data is transferring from host to controller, we need to do a transfer from the host. */
|
||||
if (tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
|
||||
@ -2833,7 +2835,7 @@ spdk_nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
|
||||
return 0;
|
||||
}
|
||||
|
||||
TAILQ_FOREACH_SAFE(tcp_req, &tgroup->pending_data_buf_queue, link, req_tmp) {
|
||||
STAILQ_FOREACH_SAFE(tcp_req, &tgroup->pending_data_buf_queue, link, req_tmp) {
|
||||
if (spdk_nvmf_tcp_req_process(ttransport, tcp_req) == false) {
|
||||
break;
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ test_nvmf_tcp_send_c2h_data(void)
|
||||
tqpair.qpair.transport = &ttransport.transport;
|
||||
TAILQ_INIT(&tqpair.free_queue);
|
||||
TAILQ_INIT(&tqpair.send_queue);
|
||||
TAILQ_INIT(&tqpair.queued_c2h_data_tcp_req);
|
||||
STAILQ_INIT(&tqpair.queued_c2h_data_tcp_req);
|
||||
|
||||
/* Set qpair state to make unrelated operations NOP */
|
||||
tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
|
||||
@ -446,7 +446,7 @@ test_nvmf_tcp_send_c2h_data(void)
|
||||
|
||||
CU_ASSERT(spdk_nvmf_tcp_calc_c2h_data_pdu_num(&tcp_req) == 3);
|
||||
|
||||
TAILQ_INSERT_TAIL(&tqpair.queued_c2h_data_tcp_req, &tcp_req, link);
|
||||
STAILQ_INSERT_TAIL(&tqpair.queued_c2h_data_tcp_req, &tcp_req, link);
|
||||
|
||||
tcp_req.c2h_data_offset = NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2;
|
||||
|
||||
@ -471,7 +471,7 @@ test_nvmf_tcp_send_c2h_data(void)
|
||||
CU_ASSERT(pdu.data_iov[1].iov_len == NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
|
||||
|
||||
CU_ASSERT(tcp_req.c2h_data_offset == (NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2) * 3);
|
||||
CU_ASSERT(TAILQ_FIRST(&tqpair.queued_c2h_data_tcp_req) == &tcp_req);
|
||||
CU_ASSERT(STAILQ_FIRST(&tqpair.queued_c2h_data_tcp_req) == &tcp_req);
|
||||
|
||||
/* 2nd C2H */
|
||||
spdk_nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
|
||||
@ -494,7 +494,7 @@ test_nvmf_tcp_send_c2h_data(void)
|
||||
CU_ASSERT(pdu.data_iov[1].iov_len == NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
|
||||
|
||||
CU_ASSERT(tcp_req.c2h_data_offset == (NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2) * 5);
|
||||
CU_ASSERT(TAILQ_FIRST(&tqpair.queued_c2h_data_tcp_req) == &tcp_req);
|
||||
CU_ASSERT(STAILQ_FIRST(&tqpair.queued_c2h_data_tcp_req) == &tcp_req);
|
||||
|
||||
/* 3rd C2H */
|
||||
spdk_nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
|
||||
@ -515,7 +515,7 @@ test_nvmf_tcp_send_c2h_data(void)
|
||||
|
||||
CU_ASSERT(tcp_req.c2h_data_offset == NVMF_TCP_PDU_MAX_C2H_DATA_SIZE * 3);
|
||||
CU_ASSERT(tqpair.c2h_data_pdu_cnt == 3);
|
||||
CU_ASSERT(TAILQ_EMPTY(&tqpair.queued_c2h_data_tcp_req));
|
||||
CU_ASSERT(STAILQ_EMPTY(&tqpair.queued_c2h_data_tcp_req));
|
||||
|
||||
spdk_poller_unregister(&tqpair.flush_poller);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user