From cd1b7ab0e7709a117ed6d0a727695e0d2749eda6 Mon Sep 17 00:00:00 2001 From: liuqinfei <18138800392@163.com> Date: Fri, 12 Aug 2022 10:43:24 +0800 Subject: [PATCH] nvmf: balance the get optimal poll group Fixes #issue 2636. The existing allocation method (nvmf_rdma_get_optimal_poll_group()) is traversal and unperceived link disconnection. A more fair method considering the number of real-time connections to allocate a poll group is implemented. Signed-off-by: liuqinfei <18138800392@163.com> Signed-off-by: luo rixin Change-Id: Ic1e6283e386dbb0dd6655bedebe26aeedb16c333 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14002 Community-CI: Mellanox Build Bot Tested-by: SPDK CI Jenkins Reviewed-by: Aleksey Marchuk Reviewed-by: Ben Walker --- lib/nvmf/rdma.c | 24 ++++++++++++++++++++++++ test/unit/lib/nvmf/rdma.c/rdma_ut.c | 2 ++ 2 files changed, 26 insertions(+) diff --git a/lib/nvmf/rdma.c b/lib/nvmf/rdma.c index 38a9881a1..b7a614b41 100644 --- a/lib/nvmf/rdma.c +++ b/lib/nvmf/rdma.c @@ -3508,7 +3508,31 @@ nvmf_rdma_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair) if (qpair->qid == 0) { pg = &rtransport->conn_sched.next_admin_pg; } else { + struct spdk_nvmf_rdma_poll_group *pg_min, *pg_start, *pg_current; + uint32_t min_value; + pg = &rtransport->conn_sched.next_io_pg; + pg_min = *pg; + pg_start = *pg; + pg_current = *pg; + min_value = (*pg)->group.group->stat.current_io_qpairs; + + while (pg_current->group.group->stat.current_io_qpairs) { + pg_current = TAILQ_NEXT(pg_current, link); + if (pg_current == NULL) { + pg_current = TAILQ_FIRST(&rtransport->poll_groups); + } + + if (pg_current->group.group->stat.current_io_qpairs < min_value) { + min_value = pg_current->group.group->stat.current_io_qpairs; + pg_min = pg_current; + } + + if (pg_current == pg_start) { + break; + } + } + *pg = pg_min; } assert(*pg != NULL); diff --git a/test/unit/lib/nvmf/rdma.c/rdma_ut.c b/test/unit/lib/nvmf/rdma.c/rdma_ut.c index 06992fe13..ee16f8267 100644 --- a/test/unit/lib/nvmf/rdma.c/rdma_ut.c +++ b/test/unit/lib/nvmf/rdma.c/rdma_ut.c @@ -789,6 +789,7 @@ test_nvmf_rdma_get_optimal_poll_group(void) struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT]; struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT]; struct spdk_nvmf_transport_poll_group *result; + struct spdk_nvmf_poll_group group = {}; uint32_t i; rqpair.qpair.transport = transport; @@ -797,6 +798,7 @@ test_nvmf_rdma_get_optimal_poll_group(void) for (i = 0; i < TEST_GROUPS_COUNT; i++) { groups[i] = nvmf_rdma_poll_group_create(transport, NULL); CU_ASSERT(groups[i] != NULL); + groups[i]->group = &group; rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group); groups[i]->transport = transport; }