diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index 9f7efd742..b76301e91 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -269,7 +269,6 @@ struct spdk_nvmf_rdma_request { uint32_t num_outstanding_data_wr; - TAILQ_ENTRY(spdk_nvmf_rdma_request) link; STAILQ_ENTRY(spdk_nvmf_rdma_request) state_link; }; @@ -417,7 +416,7 @@ struct spdk_nvmf_rdma_poll_group { struct spdk_nvmf_transport_poll_group group; /* Requests that are waiting to obtain a data buffer */ - TAILQ_HEAD(, spdk_nvmf_rdma_request) pending_data_buf_queue; + STAILQ_HEAD(, spdk_nvmf_rdma_request) pending_data_buf_queue; TAILQ_HEAD(, spdk_nvmf_rdma_poller) pollers; }; @@ -1816,7 +1815,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport, * to release resources. */ if (rqpair->ibv_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) { if (rdma_req->state == RDMA_REQUEST_STATE_NEED_BUFFER) { - TAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, link); + STAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, spdk_nvmf_rdma_request, state_link); } else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING) { STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req, spdk_nvmf_rdma_request, state_link); } else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING) { @@ -1860,7 +1859,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport, } rdma_req->state = RDMA_REQUEST_STATE_NEED_BUFFER; - TAILQ_INSERT_TAIL(&rgroup->pending_data_buf_queue, rdma_req, link); + STAILQ_INSERT_TAIL(&rgroup->pending_data_buf_queue, rdma_req, state_link); break; case RDMA_REQUEST_STATE_NEED_BUFFER: spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEED_BUFFER, 0, 0, @@ -1868,7 +1867,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport, assert(rdma_req->req.xfer != SPDK_NVME_DATA_NONE); - if (rdma_req != TAILQ_FIRST(&rgroup->pending_data_buf_queue)) { + if (rdma_req != STAILQ_FIRST(&rgroup->pending_data_buf_queue)) { /* This request needs to wait in line to obtain a buffer */ break; } @@ -1876,7 +1875,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport, /* Try to get a data buffer */ rc = spdk_nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req); if (rc < 0) { - TAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, link); + STAILQ_REMOVE_HEAD(&rgroup->pending_data_buf_queue, state_link); rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE; break; @@ -1887,7 +1886,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport, break; } - TAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, link); + STAILQ_REMOVE_HEAD(&rgroup->pending_data_buf_queue, state_link); /* If data is transferring from host to controller and the data didn't * arrive using in capsule data, we need to do a transfer from the host. @@ -2526,8 +2525,8 @@ spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport } /* The second highest priority is I/O waiting on memory buffers. */ - TAILQ_FOREACH_SAFE(rdma_req, &rqpair->poller->group->pending_data_buf_queue, link, - req_tmp) { + STAILQ_FOREACH_SAFE(rdma_req, &rqpair->poller->group->pending_data_buf_queue, state_link, + req_tmp) { if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) { break; } @@ -2902,7 +2901,7 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport) } TAILQ_INIT(&rgroup->pollers); - TAILQ_INIT(&rgroup->pending_data_buf_queue); + STAILQ_INIT(&rgroup->pending_data_buf_queue); pthread_mutex_lock(&rtransport->lock); TAILQ_FOREACH(device, &rtransport->devices, link) { @@ -3008,7 +3007,7 @@ spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) free(poller); } - if (!TAILQ_EMPTY(&rgroup->pending_data_buf_queue)) { + if (!STAILQ_EMPTY(&rgroup->pending_data_buf_queue)) { SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n"); }