nvmf/rdma: cleanup qpairs and reqs on poll group deletion.

Change-Id: I6dedf295b80148f37f75ebd5553f18dae76b2ab8
Signed-off-by: Seth Howell <seth.howell@intel.com>
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/421166
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Seth Howell 2018-08-02 09:55:13 -07:00 committed by Jim Harris
parent ed60507d5e
commit 54c394c483

View File

@ -2004,11 +2004,31 @@ error:
spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
}
static void
_spdk_nvmf_rdma_qp_cleanup_all_states(struct spdk_nvmf_rdma_qpair *rqpair)
{
struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEW);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);
/* First wipe the requests waiting for buffer from the global list */
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_NEED_BUFFER], link, req_tmp) {
TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
}
/* Then drain the requests through the rdma queue */
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEED_BUFFER);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_EXECUTING);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING);
}
static void
_spdk_nvmf_rdma_qp_error(void *arg)
{
struct spdk_nvmf_rdma_qpair *rqpair = arg;
struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
enum ibv_qp_state state;
state = spdk_nvmf_rdma_update_ibv_state(rqpair);
@ -2020,20 +2040,7 @@ _spdk_nvmf_rdma_qp_error(void *arg)
if (spdk_nvmf_qpair_is_admin_queue(&rqpair->qpair)) {
spdk_nvmf_ctrlr_abort_aer(rqpair->qpair.ctrlr);
}
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);
/* First wipe the requests waiting for buffer from the global list */
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_NEED_BUFFER], link, req_tmp) {
TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
}
/* Then drain the requests through the rdma queue */
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEED_BUFFER);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING);
_spdk_nvmf_rdma_qp_cleanup_all_states(rqpair);
spdk_nvmf_rdma_qpair_recover(rqpair);
}
@ -2216,6 +2223,7 @@ spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
{
struct spdk_nvmf_rdma_poll_group *rgroup;
struct spdk_nvmf_rdma_poller *poller, *tmp;
struct spdk_nvmf_rdma_qpair *qpair, *tmp_qpair;
rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
@ -2229,6 +2237,10 @@ spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
if (poller->cq) {
ibv_destroy_cq(poller->cq);
}
TAILQ_FOREACH_SAFE(qpair, &poller->qpairs, link, tmp_qpair) {
_spdk_nvmf_rdma_qp_cleanup_all_states(qpair);
spdk_nvmf_rdma_qpair_destroy(qpair);
}
free(poller);
}