From 5518a327a86a44741de8221d072c77a6e4281e31 Mon Sep 17 00:00:00 2001 From: Daniel Verkamp Date: Tue, 10 Jul 2018 10:23:11 -0700 Subject: [PATCH] nvmf/rdma: fix error paths in spdk_nvmf_rdma_create Most of the error paths in this function leaked resources. Make them all use spdk_nvmf_rdma_destroy() so all resources are consistently freed. The spdk_io_device_register() call is moved to the top of the function so that the io_device is always valid when calling the destroy function. Change-Id: Ic92f09f157ee8245fb962d8bc3330aadd87b294a Signed-off-by: Daniel Verkamp Reviewed-on: https://review.gerrithub.io/418869 Tested-by: SPDK Automated Test System Reviewed-by: Xiaodong Liu Reviewed-by: Changpeng Liu Reviewed-by: Shuhei Matsumoto Reviewed-by: Ziye Yang Reviewed-by: Ben Walker Reviewed-by: Seth Howell --- lib/nvmf/rdma.c | 75 +++++++++++++++++++++++++------------------------ 1 file changed, 39 insertions(+), 36 deletions(-) diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index 3dd63e359..7f86b65b8 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -1237,6 +1237,8 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport, /* Public API callbacks begin here */ +static int spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport); + static struct spdk_nvmf_transport * spdk_nvmf_rdma_create(struct spdk_nvmf_tgt *tgt) { @@ -1253,7 +1255,16 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_tgt *tgt) return NULL; } - pthread_mutex_init(&rtransport->lock, NULL); + if (pthread_mutex_init(&rtransport->lock, NULL)) { + SPDK_ERRLOG("pthread_mutex_init() failed\n"); + free(rtransport); + return NULL; + } + + spdk_io_device_register(rtransport, spdk_nvmf_rdma_mgmt_channel_create, + spdk_nvmf_rdma_mgmt_channel_destroy, + sizeof(struct spdk_nvmf_rdma_mgmt_channel)); + TAILQ_INIT(&rtransport->devices); TAILQ_INIT(&rtransport->ports); @@ -1275,14 +1286,14 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_tgt *tgt) sge_count = rtransport->max_io_size / rtransport->io_unit_size; if (sge_count > SPDK_NVMF_MAX_SGL_ENTRIES) { SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", rtransport->io_unit_size); - free(rtransport); + spdk_nvmf_rdma_destroy(&rtransport->transport); return NULL; } rtransport->event_channel = rdma_create_event_channel(); if (rtransport->event_channel == NULL) { SPDK_ERRLOG("rdma_create_event_channel() failed, %s\n", spdk_strerror(errno)); - free(rtransport); + spdk_nvmf_rdma_destroy(&rtransport->transport); return NULL; } @@ -1290,7 +1301,7 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_tgt *tgt) if (fcntl(rtransport->event_channel->fd, F_SETFL, flag | O_NONBLOCK) < 0) { SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%s)\n", rtransport->event_channel->fd, spdk_strerror(errno)); - free(rtransport); + spdk_nvmf_rdma_destroy(&rtransport->transport); return NULL; } @@ -1301,21 +1312,14 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_tgt *tgt) SPDK_ENV_SOCKET_ID_ANY); if (!rtransport->data_buf_pool) { SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n"); - free(rtransport); + spdk_nvmf_rdma_destroy(&rtransport->transport); return NULL; } - spdk_io_device_register(rtransport, spdk_nvmf_rdma_mgmt_channel_create, - spdk_nvmf_rdma_mgmt_channel_destroy, - sizeof(struct spdk_nvmf_rdma_mgmt_channel)); - contexts = rdma_get_devices(NULL); if (contexts == NULL) { SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno); - rdma_destroy_event_channel(rtransport->event_channel); - spdk_mempool_free(rtransport->data_buf_pool); - spdk_io_device_unregister(rtransport, NULL); - free(rtransport); + spdk_nvmf_rdma_destroy(&rtransport->transport); return NULL; } @@ -1351,34 +1355,32 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_tgt *tgt) TAILQ_INSERT_TAIL(&rtransport->devices, device, link); i++; } + rdma_free_devices(contexts); if (rc < 0) { - TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) { - TAILQ_REMOVE(&rtransport->devices, device, link); - free(device); - } - spdk_mempool_free(rtransport->data_buf_pool); - rdma_destroy_event_channel(rtransport->event_channel); - free(rtransport); - rdma_free_devices(contexts); + spdk_nvmf_rdma_destroy(&rtransport->transport); return NULL; - } else { - /* Set up poll descriptor array to monitor events from RDMA and IB - * in a single poll syscall - */ - rtransport->npoll_fds = i + 1; - i = 0; - rtransport->poll_fds = calloc(rtransport->npoll_fds, sizeof(struct pollfd)); - rtransport->poll_fds[i].fd = rtransport->event_channel->fd; - rtransport->poll_fds[i++].events = POLLIN; - - TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) { - rtransport->poll_fds[i].fd = device->context->async_fd; - rtransport->poll_fds[i++].events = POLLIN; - } } - rdma_free_devices(contexts); + /* Set up poll descriptor array to monitor events from RDMA and IB + * in a single poll syscall + */ + rtransport->npoll_fds = i + 1; + i = 0; + rtransport->poll_fds = calloc(rtransport->npoll_fds, sizeof(struct pollfd)); + if (rtransport->poll_fds == NULL) { + SPDK_ERRLOG("poll_fds allocation failed\n"); + spdk_nvmf_rdma_destroy(&rtransport->transport); + return NULL; + } + + rtransport->poll_fds[i].fd = rtransport->event_channel->fd; + rtransport->poll_fds[i++].events = POLLIN; + + TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) { + rtransport->poll_fds[i].fd = device->context->async_fd; + rtransport->poll_fds[i++].events = POLLIN; + } return &rtransport->transport; } @@ -1422,6 +1424,7 @@ spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport) spdk_mempool_free(rtransport->data_buf_pool); spdk_io_device_unregister(rtransport, NULL); + pthread_mutex_destroy(&rtransport->lock); free(rtransport); return 0;