vfio-user: correct accept poller return code

Return the number of events handled as expected by the poller.

Signed-off-by: John Levon <john.levon@nutanix.com>
Change-Id: I70c4d32bf091b2c1a293eaa41f00869a3a7303f2
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8563
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: Thanos Makatos <thanos.makatos@nutanix.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
John Levon 2021-06-29 15:23:36 +00:00 committed by Tomasz Zawadzki
parent f1c8170632
commit 923bab5f12

View File

@ -2095,15 +2095,26 @@ nvmf_vfio_user_listen_associate(struct spdk_nvmf_transport *transport,
} }
/* /*
* Executed periodically. * Executed periodically at a default SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US
* frequency.
*
* For each transport endpoint (which at the libvfio-user level corresponds to
* a socket), if we don't currently have a controller set up, peek to see if the
* socket is able to accept a new connection.
*
* This poller also takes care of handling the creation of any pending new
* qpairs.
*
* Returns the number of events handled.
*/ */
static uint32_t static uint32_t
nvmf_vfio_user_accept(struct spdk_nvmf_transport *transport) nvmf_vfio_user_accept(struct spdk_nvmf_transport *transport)
{ {
int err;
struct nvmf_vfio_user_transport *vu_transport; struct nvmf_vfio_user_transport *vu_transport;
struct nvmf_vfio_user_qpair *qp, *tmp_qp; struct nvmf_vfio_user_qpair *qp, *tmp_qp;
struct nvmf_vfio_user_endpoint *endpoint; struct nvmf_vfio_user_endpoint *endpoint;
uint32_t count = 0;
int err;
vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport, vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport,
transport); transport);
@ -2111,7 +2122,6 @@ nvmf_vfio_user_accept(struct spdk_nvmf_transport *transport)
pthread_mutex_lock(&vu_transport->lock); pthread_mutex_lock(&vu_transport->lock);
TAILQ_FOREACH(endpoint, &vu_transport->endpoints, link) { TAILQ_FOREACH(endpoint, &vu_transport->endpoints, link) {
/* try to attach a new controller */
if (endpoint->ctrlr != NULL) { if (endpoint->ctrlr != NULL) {
continue; continue;
} }
@ -2123,21 +2133,24 @@ nvmf_vfio_user_accept(struct spdk_nvmf_transport *transport)
} }
pthread_mutex_unlock(&vu_transport->lock); pthread_mutex_unlock(&vu_transport->lock);
return -EFAULT; return 1;
} }
count++;
/* Construct a controller */ /* Construct a controller */
nvmf_vfio_user_create_ctrlr(vu_transport, endpoint); nvmf_vfio_user_create_ctrlr(vu_transport, endpoint);
} }
TAILQ_FOREACH_SAFE(qp, &vu_transport->new_qps, link, tmp_qp) { TAILQ_FOREACH_SAFE(qp, &vu_transport->new_qps, link, tmp_qp) {
count++;
TAILQ_REMOVE(&vu_transport->new_qps, qp, link); TAILQ_REMOVE(&vu_transport->new_qps, qp, link);
spdk_nvmf_tgt_new_qpair(transport->tgt, &qp->qpair); spdk_nvmf_tgt_new_qpair(transport->tgt, &qp->qpair);
} }
pthread_mutex_unlock(&vu_transport->lock); pthread_mutex_unlock(&vu_transport->lock);
return 0; return count;
} }
static void static void