nvmf/vfio-user: don't blindly drain poll group eventfd

This eventfd may be passed by libvfio-user to the remote process which
might remove the EFD_NONBLOCK flag, in which case we would block
indefinitely.

Signed-off-by: Thanos Makatos <thanos.makatos@nutanix.com>
Change-Id: If9826cd700b4a7b3458a0a8278a96322d99ac08e
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15385
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: John Levon <levon@movementarian.org>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Thanos Makatos 2022-10-12 09:08:19 +00:00 committed by Tomasz Zawadzki
parent 7f23638550
commit 25440c3bdb

View File

@ -4810,20 +4810,13 @@ _post_completion_msg(void *ctx)
static int nvmf_vfio_user_poll_group_poll(struct spdk_nvmf_transport_poll_group *group);
static int
vfio_user_poll_group_intr(void *ctx)
vfio_user_poll_group_process(void *ctx)
{
struct nvmf_vfio_user_poll_group *vu_group = ctx;
eventfd_t val;
int ret = 0;
SPDK_DEBUGLOG(vfio_user_db, "pg:%p got intr\n", vu_group);
/*
* NB: this might fail if called from vfio_user_ctrlr_intr(), but it's
* non-blocking, so not an issue.
*/
eventfd_read(vu_group->intr_fd, &val);
ret |= nvmf_vfio_user_poll_group_poll(&vu_group->group);
/*
@ -4835,6 +4828,16 @@ vfio_user_poll_group_intr(void *ctx)
return ret != 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
}
static int
vfio_user_poll_group_intr(void *ctx)
{
struct nvmf_vfio_user_poll_group *vu_group = ctx;
eventfd_t val;
eventfd_read(vu_group->intr_fd, &val);
return vfio_user_poll_group_process(ctx);
}
/*
* Handle an interrupt for the given controller: we must poll the vfu_ctx, and
* the SQs assigned to our own poll group. Other poll groups are handled via
@ -4880,7 +4883,7 @@ vfio_user_ctrlr_intr(void *ctx)
}
}
ret |= vfio_user_poll_group_intr(vu_ctrlr_group);
ret |= vfio_user_poll_group_process(vu_ctrlr_group);
return ret;
}