nvmf,rdma: only call spdk_nvmf_rdma_conn_destroy to free rdma_conn

Previously, we mixed use free and spdk_nvmf_rdma_conn_destroy to
free allocated spdk_nvmf_rdma_conn structure, which sounds not
exactly free all the resources.

Change-Id: I2917b442c34d63ba5c014add58f429ae4b831595
Signed-off-by: Ziye Yang <ziye.yang@intel.com>
This commit is contained in:
Ziye Yang 2016-10-13 12:36:04 +08:00 committed by Daniel Verkamp
parent 769468fc52
commit 379ebca018

View File

@ -238,7 +238,7 @@ spdk_nvmf_rdma_conn_create(struct rdma_cm_id *id, struct ibv_comp_channel *chann
SPDK_ERRLOG("Unable to create completion queue\n"); SPDK_ERRLOG("Unable to create completion queue\n");
SPDK_ERRLOG("Completion Channel: %p Id: %p Verbs: %p\n", channel, id, id->verbs); SPDK_ERRLOG("Completion Channel: %p Id: %p Verbs: %p\n", channel, id, id->verbs);
SPDK_ERRLOG("Errno %d: %s\n", errno, strerror(errno)); SPDK_ERRLOG("Errno %d: %s\n", errno, strerror(errno));
free(rdma_conn); spdk_nvmf_rdma_conn_destroy(rdma_conn);
return NULL; return NULL;
} }
@ -691,7 +691,6 @@ nvmf_rdma_connect(struct rdma_cm_event *event)
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Final Negotiated Queue Depth: %d R/W Depth: %d\n", SPDK_TRACELOG(SPDK_TRACE_RDMA, "Final Negotiated Queue Depth: %d R/W Depth: %d\n",
max_queue_depth, max_rw_depth); max_queue_depth, max_rw_depth);
/* Init the NVMf rdma transport connection */ /* Init the NVMf rdma transport connection */
rdma_conn = spdk_nvmf_rdma_conn_create(event->id, addr->comp_channel, max_queue_depth, rdma_conn = spdk_nvmf_rdma_conn_create(event->id, addr->comp_channel, max_queue_depth,
max_rw_depth); max_rw_depth);
@ -700,10 +699,6 @@ nvmf_rdma_connect(struct rdma_cm_event *event)
goto err1; goto err1;
} }
/* Add this RDMA connection to the global list until a CONNECT capsule
* is received. */
TAILQ_INSERT_TAIL(&g_pending_conns, rdma_conn, link);
accept_data.recfmt = 0; accept_data.recfmt = 0;
accept_data.crqsize = max_queue_depth; accept_data.crqsize = max_queue_depth;
ctrlr_event_data = *rdma_param; ctrlr_event_data = *rdma_param;
@ -717,18 +712,24 @@ nvmf_rdma_connect(struct rdma_cm_event *event)
rc = rdma_accept(event->id, &ctrlr_event_data); rc = rdma_accept(event->id, &ctrlr_event_data);
if (rc) { if (rc) {
SPDK_ERRLOG("Error on rdma_accept\n"); SPDK_ERRLOG("Error on rdma_accept\n");
goto err1; goto err2;
} }
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Sent back the accept\n"); SPDK_TRACELOG(SPDK_TRACE_RDMA, "Sent back the accept\n");
/* Add this RDMA connection to the global list until a CONNECT capsule
* is received. */
TAILQ_INSERT_TAIL(&g_pending_conns, rdma_conn, link);
return 0; return 0;
err2:
spdk_nvmf_rdma_conn_destroy(rdma_conn);
err1: { err1: {
struct spdk_nvmf_rdma_reject_private_data rej_data; struct spdk_nvmf_rdma_reject_private_data rej_data;
rej_data.status.sc = sts; rej_data.status.sc = sts;
rdma_reject(event->id, &ctrlr_event_data, sizeof(rej_data)); rdma_reject(event->id, &ctrlr_event_data, sizeof(rej_data));
free(rdma_conn);
} }
err0: err0:
return -1; return -1;