nvmf: make poll groups count unassociated qpairs

We make decisions on how to pick a poll group for a new
qpair by looking at each poll group's current_io_qpairs
count.  But this count isn't always accurate since it
doesn't get updated until after the CONNECT has
been received.

This means that if we accept a bunch of connections
all at once, they may all get assigned the same poll
group, because the target poll groups counter doesn't
get immediately incremented.

So add a new counter, current_unassociated_qpairs,
to account for these qpairs.  We protect this counter
with a lock, since the accept thread will increment
the counter, and the poll group thread will
decrement it when the qpair receives the CONNECT
allowing us to associated with a subsystem/controller..

If the qpair gets destroyed before the CONNECT is
received, we can use the qpair->connect_received
flag to decrement current_unassociated_qpairs.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I8bba8da2abfe225b3b9f981cd71b6f49e2b87391
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15693
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
This commit is contained in:
Jim Harris 2022-11-24 20:02:48 +00:00 committed by Konrad Sztyber
parent f3e197ff18
commit bb926e803d
3 changed files with 21 additions and 0 deletions

View File

@ -157,6 +157,12 @@ struct spdk_nvmf_poll_group {
struct spdk_nvmf_subsystem_poll_group *sgroups;
uint32_t num_sgroups;
/* Protected by mutex. Counts qpairs that have connected at a
* transport level, but are not associated with a subsystem
* or controller yet (because the CONNECT capsule hasn't
* been received). */
uint32_t current_unassociated_qpairs;
/* All of the queue pairs that belong to this poll group */
TAILQ_HEAD(, spdk_nvmf_qpair) qpairs;
@ -167,6 +173,8 @@ struct spdk_nvmf_poll_group {
void *destroy_cb_arg;
TAILQ_ENTRY(spdk_nvmf_poll_group) link;
pthread_mutex_t mutex;
};
struct spdk_nvmf_listener {

View File

@ -729,6 +729,10 @@ _nvmf_ctrlr_connect(struct spdk_nvmf_request *req)
qpair->qid = cmd->qid;
qpair->connect_received = true;
pthread_mutex_lock(&qpair->group->mutex);
qpair->group->current_unassociated_qpairs--;
pthread_mutex_unlock(&qpair->group->mutex);
if (0 == qpair->qid) {
qpair->group->stat.admin_qpairs++;
qpair->group->stat.current_admin_qpairs++;

View File

@ -175,6 +175,7 @@ nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
TAILQ_INIT(&group->tgroups);
TAILQ_INIT(&group->qpairs);
group->thread = thread;
pthread_mutex_init(&group->mutex, NULL);
group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0);
@ -887,6 +888,10 @@ spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair
ctx->qpair = qpair;
ctx->group = group;
pthread_mutex_lock(&group->mutex);
group->current_unassociated_qpairs++;
pthread_mutex_unlock(&group->mutex);
spdk_thread_send_msg(group->thread, _nvmf_poll_group_add, ctx);
}
@ -1054,6 +1059,10 @@ _nvmf_qpair_destroy(void *ctx, int status)
assert(qpair->group->stat.current_io_qpairs > 0);
qpair->group->stat.current_io_qpairs--;
}
} else {
pthread_mutex_lock(&qpair->group->mutex);
qpair->group->current_unassociated_qpairs--;
pthread_mutex_unlock(&qpair->group->mutex);
}
if (ctrlr) {