nvmf/rdma: Combine spdk_nvmf_rdma_qp_drained and spdk_nvmf_rdma_recover
recover was only called by drained, and they're relatively small Change-Id: I65002cfe13d0045a37609be5b85be087402b4a65 Signed-off-by: Ben Walker <benjamin.walker@intel.com> Reviewed-on: https://review.gerrithub.io/421043 Reviewed-by: Seth Howell <seth.howell5141@gmail.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
This commit is contained in:
parent
12444f400d
commit
65a512c6cd
@ -1918,12 +1918,45 @@ _spdk_nvmf_rdma_qpair_process_pending(void *arg)
|
|||||||
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
|
spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static void
|
||||||
spdk_nvmf_rdma_recover(struct spdk_nvmf_rdma_qpair *rqpair)
|
spdk_nvmf_rdma_drain_state_queue(struct spdk_nvmf_rdma_qpair *rqpair,
|
||||||
|
enum spdk_nvmf_rdma_request_state state)
|
||||||
|
{
|
||||||
|
struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
|
||||||
|
struct spdk_nvmf_rdma_transport *rtransport;
|
||||||
|
|
||||||
|
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[state], state_link, req_tmp) {
|
||||||
|
rtransport = SPDK_CONTAINEROF(rdma_req->req.qpair->transport,
|
||||||
|
struct spdk_nvmf_rdma_transport, transport);
|
||||||
|
spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
|
||||||
|
spdk_nvmf_rdma_request_process(rtransport, rdma_req);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
spdk_nvmf_rdma_qp_drained(struct spdk_nvmf_rdma_qpair *rqpair)
|
||||||
{
|
{
|
||||||
int recovered;
|
int recovered;
|
||||||
enum ibv_qp_state state, next_state;
|
enum ibv_qp_state state, next_state;
|
||||||
|
|
||||||
|
SPDK_NOTICELOG("IBV QP#%u drained\n", rqpair->qpair.qid);
|
||||||
|
|
||||||
|
if (!spdk_nvmf_rdma_qpair_is_idle(&rqpair->qpair)) {
|
||||||
|
/* There must be outstanding requests down to media.
|
||||||
|
* If so, wait till they're complete.
|
||||||
|
*/
|
||||||
|
assert(!TAILQ_EMPTY(&rqpair->qpair.outstanding));
|
||||||
|
SPDK_DEBUGLOG(SPDK_LOG_RDMA,
|
||||||
|
"QP#%u (%p): wait for outstanding requests...\n",
|
||||||
|
rqpair->qpair.qid, &rqpair->qpair);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rqpair->qpair.state != SPDK_NVMF_QPAIR_ERROR) {
|
||||||
|
/* Do not start recovery if qp is not in error state. */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
state = spdk_nvmf_rdma_get_ibv_state(rqpair);
|
state = spdk_nvmf_rdma_get_ibv_state(rqpair);
|
||||||
next_state = state;
|
next_state = state;
|
||||||
|
|
||||||
@ -1935,7 +1968,8 @@ spdk_nvmf_rdma_recover(struct spdk_nvmf_rdma_qpair *rqpair)
|
|||||||
SPDK_ERRLOG("Can't recover IBV qp#%u from the state: %s\n",
|
SPDK_ERRLOG("Can't recover IBV qp#%u from the state: %s\n",
|
||||||
rqpair->qpair.qid,
|
rqpair->qpair.qid,
|
||||||
str_ibv_qp_state[state]);
|
str_ibv_qp_state[state]);
|
||||||
return -1;
|
spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
rqpair->qpair.state = SPDK_NVMF_QPAIR_INACTIVE;
|
rqpair->qpair.state = SPDK_NVMF_QPAIR_INACTIVE;
|
||||||
@ -1977,56 +2011,13 @@ spdk_nvmf_rdma_recover(struct spdk_nvmf_rdma_qpair *rqpair)
|
|||||||
rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
|
rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
|
||||||
spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qpair_process_pending, rqpair);
|
spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qpair_process_pending, rqpair);
|
||||||
|
|
||||||
return 0;
|
return;
|
||||||
error:
|
error:
|
||||||
SPDK_ERRLOG("IBV qp#%u recovery failed\n", rqpair->qpair.qid);
|
SPDK_ERRLOG("IBV qp#%u recovery failed\n", rqpair->qpair.qid);
|
||||||
/* Put NVMf qpair back into error state so recovery
|
/* Put NVMf qpair back into error state so recovery
|
||||||
will trigger disconnect */
|
will trigger disconnect */
|
||||||
rqpair->qpair.state = SPDK_NVMF_QPAIR_ERROR;
|
rqpair->qpair.state = SPDK_NVMF_QPAIR_ERROR;
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
spdk_nvmf_rdma_drain_state_queue(struct spdk_nvmf_rdma_qpair *rqpair,
|
|
||||||
enum spdk_nvmf_rdma_request_state state)
|
|
||||||
{
|
|
||||||
struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
|
|
||||||
struct spdk_nvmf_rdma_transport *rtransport;
|
|
||||||
|
|
||||||
TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[state], state_link, req_tmp) {
|
|
||||||
rtransport = SPDK_CONTAINEROF(rdma_req->req.qpair->transport,
|
|
||||||
struct spdk_nvmf_rdma_transport, transport);
|
|
||||||
spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
|
|
||||||
spdk_nvmf_rdma_request_process(rtransport, rdma_req);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
spdk_nvmf_rdma_qp_drained(struct spdk_nvmf_rdma_qpair *rqpair)
|
|
||||||
{
|
|
||||||
SPDK_NOTICELOG("IBV QP#%u drained\n", rqpair->qpair.qid);
|
|
||||||
|
|
||||||
if (!spdk_nvmf_rdma_qpair_is_idle(&rqpair->qpair)) {
|
|
||||||
/* There must be outstanding requests down to media.
|
|
||||||
* If so, wait till they're complete.
|
|
||||||
*/
|
|
||||||
assert(!TAILQ_EMPTY(&rqpair->qpair.outstanding));
|
|
||||||
SPDK_DEBUGLOG(SPDK_LOG_RDMA,
|
|
||||||
"QP#%u (%p): wait for outstanding requests...\n",
|
|
||||||
rqpair->qpair.qid, &rqpair->qpair);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rqpair->qpair.state != SPDK_NVMF_QPAIR_ERROR) {
|
|
||||||
/* Do not start recovery if qp is not in error state. */
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (spdk_nvmf_rdma_recover(rqpair) != 0) {
|
|
||||||
SPDK_NOTICELOG("QP#%u (%p): recovery failed, disconnecting...\n",
|
|
||||||
rqpair->qpair.qid, &rqpair->qpair);
|
|
||||||
spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
|
spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
Loading…
Reference in New Issue
Block a user