nvmf/rdma: Simplify spdk_nvmf_rdma_qp_drained

This was the only call point of two very small static functions,
so merge them into the main body.

Change-Id: Ifdd3355ffd500ac5ad4fcf69feace65b35132906
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/420935
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Seth Howell <seth.howell5141@gmail.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Ben Walker 2018-07-31 14:39:16 -07:00 committed by Jim Harris
parent c3756ae387
commit 13a887f1e2

View File

@ -2003,37 +2003,29 @@ spdk_nvmf_rdma_drain_state_queue(struct spdk_nvmf_rdma_qpair *rqpair,
}
}
static void spdk_nvmf_rdma_drain_rw_reqs(struct spdk_nvmf_rdma_qpair *rqpair)
{
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING);
}
static void spdk_nvmf_rdma_drain_pending_reqs(struct spdk_nvmf_rdma_qpair *rqpair)
{
struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);
/* First wipe the requests waiting for buffer from the global list */
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_NEED_BUFFER], link, req_tmp) {
TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
}
/* Then drain the requests through the rdma queue */
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEED_BUFFER);
}
static void
spdk_nvmf_rdma_qp_drained(struct spdk_nvmf_rdma_qpair *rqpair)
{
struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
SPDK_NOTICELOG("IBV QP#%u drained\n", rqpair->qpair.qid);
if (spdk_nvmf_qpair_is_admin_queue(&rqpair->qpair)) {
spdk_nvmf_ctrlr_abort_aer(rqpair->qpair.ctrlr);
}
spdk_nvmf_rdma_drain_pending_reqs(rqpair);
spdk_nvmf_rdma_drain_rw_reqs(rqpair);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);
/* First wipe the requests waiting for buffer from the global list */
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_NEED_BUFFER], link, req_tmp) {
TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
}
/* Then drain the requests through the rdma queue */
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEED_BUFFER);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING);
if (!spdk_nvmf_rdma_qpair_is_idle(&rqpair->qpair)) {
/* There must be outstanding requests down to media.