lib/vhost: move vhost_user related fields from spdk_vhost_dev

spdk_vhost_dev structure should only contain generic fields
that are to be used by either vhost, vhost_blk or vhost_scsi
layer.

The vhost_user backend can hold its properties in
spdk_vhost_user_dev, which is maintained within rte_vhost.

Both structures contain references back to each other.
The reference in spdk_vhost_dev is a void pointer to
allow future transports to keep the reference
to their own structures.

Signed-off-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: I68640c524426d885c20242146365ba242fa9df8e
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11813
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Tomasz Zawadzki 2022-03-03 10:31:02 +01:00
parent bfcfdb7903
commit 6f89388ed3
4 changed files with 101 additions and 57 deletions

View File

@ -81,6 +81,13 @@ struct vhost_session_fn_ctx {
void *user_ctx;
};
static struct spdk_vhost_user_dev *
to_user_dev(struct spdk_vhost_dev *vdev)
{
assert(vdev != NULL);
return vdev->ctxt;
}
static void __attribute__((constructor))
_vhost_user_sem_init(void)
{
@ -829,11 +836,12 @@ static int
_stop_session(struct spdk_vhost_session *vsession)
{
struct spdk_vhost_dev *vdev = vsession->vdev;
struct spdk_vhost_user_dev *user_vdev = to_user_dev(vdev);
struct spdk_vhost_virtqueue *q;
int rc;
uint16_t i;
rc = vdev->user_backend->stop_session(vsession);
rc = user_vdev->user_backend->stop_session(vsession);
if (rc != 0) {
SPDK_ERRLOG("Couldn't stop device with vid %d.\n", vsession->vid);
return rc;
@ -872,6 +880,7 @@ static int
new_connection(int vid)
{
struct spdk_vhost_dev *vdev;
struct spdk_vhost_user_dev *user_dev;
struct spdk_vhost_session *vsession;
size_t dev_dirname_len;
char ifname[PATH_MAX];
@ -896,28 +905,29 @@ new_connection(int vid)
spdk_vhost_unlock();
return -1;
}
user_dev = to_user_dev(vdev);
/* We expect sessions inside vdev->vsessions to be sorted in ascending
/* We expect sessions inside user_dev->vsessions to be sorted in ascending
* order in regard of vsession->id. For now we always set id = vsessions_cnt++
* and append each session to the very end of the vsessions list.
* This is required for vhost_user_dev_foreach_session() to work.
*/
if (vdev->vsessions_num == UINT_MAX) {
if (user_dev->vsessions_num == UINT_MAX) {
assert(false);
return -EINVAL;
}
if (posix_memalign((void **)&vsession, SPDK_CACHE_LINE_SIZE, sizeof(*vsession) +
vdev->user_backend->session_ctx_size)) {
user_dev->user_backend->session_ctx_size)) {
SPDK_ERRLOG("vsession alloc failed\n");
spdk_vhost_unlock();
return -1;
}
memset(vsession, 0, sizeof(*vsession) + vdev->user_backend->session_ctx_size);
memset(vsession, 0, sizeof(*vsession) + user_dev->user_backend->session_ctx_size);
vsession->vdev = vdev;
vsession->vid = vid;
vsession->id = vdev->vsessions_num++;
vsession->id = user_dev->vsessions_num++;
vsession->name = spdk_sprintf_alloc("%ss%u", vdev->name, vsession->vid);
if (vsession->name == NULL) {
SPDK_ERRLOG("vsession alloc failed\n");
@ -930,7 +940,7 @@ new_connection(int vid)
vsession->next_stats_check_time = 0;
vsession->stats_check_interval = SPDK_VHOST_STATS_CHECK_INTERVAL_MS *
spdk_get_ticks_hz() / 1000UL;
TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq);
TAILQ_INSERT_TAIL(&user_dev->vsessions, vsession, tailq);
vhost_session_install_rte_compat_hooks(vsession);
spdk_vhost_unlock();
@ -1058,7 +1068,7 @@ start_device(int vid)
vhost_user_session_set_coalescing(vdev, vsession, NULL);
vhost_session_mem_register(vsession->mem);
vsession->initialized = true;
rc = vdev->user_backend->start_session(vsession);
rc = to_user_dev(vdev)->user_backend->start_session(vsession);
if (rc != 0) {
vhost_session_mem_unregister(vsession->mem);
free(vsession->mem);
@ -1115,7 +1125,7 @@ destroy_connection(int vid)
}
}
TAILQ_REMOVE(&vsession->vdev->vsessions, vsession, tailq);
TAILQ_REMOVE(&to_user_dev(vsession->vdev)->vsessions, vsession, tailq);
free(vsession->name);
free(vsession);
spdk_vhost_unlock();
@ -1137,7 +1147,7 @@ vhost_session_find_by_id(struct spdk_vhost_dev *vdev, unsigned id)
{
struct spdk_vhost_session *vsession;
TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) {
TAILQ_FOREACH(vsession, &to_user_dev(vdev)->vsessions, tailq) {
if (vsession->id == id) {
return vsession;
}
@ -1154,7 +1164,7 @@ vhost_session_find_by_vid(int vid)
for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
vdev = spdk_vhost_dev_next(vdev)) {
TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) {
TAILQ_FOREACH(vsession, &to_user_dev(vdev)->vsessions, tailq) {
if (vsession->vid == vid) {
return vsession;
}
@ -1189,11 +1199,12 @@ vhost_session_cb_done(int rc)
void
vhost_user_session_start_done(struct spdk_vhost_session *vsession, int response)
{
struct spdk_vhost_user_dev *user_dev = to_user_dev(vsession->vdev);
if (response == 0) {
vsession->started = true;
assert(vsession->vdev->active_session_num < UINT32_MAX);
vsession->vdev->active_session_num++;
assert(user_dev->active_session_num < UINT32_MAX);
user_dev->active_session_num++;
}
vhost_session_cb_done(response);
@ -1202,11 +1213,13 @@ vhost_user_session_start_done(struct spdk_vhost_session *vsession, int response)
void
vhost_user_session_stop_done(struct spdk_vhost_session *vsession, int response)
{
struct spdk_vhost_user_dev *user_dev = to_user_dev(vsession->vdev);
if (response == 0) {
vsession->started = false;
assert(vsession->vdev->active_session_num > 0);
vsession->vdev->active_session_num--;
assert(user_dev->active_session_num > 0);
user_dev->active_session_num--;
}
vhost_session_cb_done(response);
@ -1254,6 +1267,7 @@ foreach_session_finish_cb(void *arg1)
{
struct vhost_session_fn_ctx *ev_ctx = arg1;
struct spdk_vhost_dev *vdev = ev_ctx->vdev;
struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
if (spdk_vhost_trylock() != 0) {
spdk_thread_send_msg(spdk_get_thread(),
@ -1261,8 +1275,8 @@ foreach_session_finish_cb(void *arg1)
return;
}
assert(vdev->pending_async_op_num > 0);
vdev->pending_async_op_num--;
assert(user_dev->pending_async_op_num > 0);
user_dev->pending_async_op_num--;
if (ev_ctx->cpl_fn != NULL) {
ev_ctx->cpl_fn(vdev, ev_ctx->user_ctx);
}
@ -1284,7 +1298,7 @@ foreach_session(void *arg1)
return;
}
TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) {
TAILQ_FOREACH(vsession, &to_user_dev(vdev)->vsessions, tailq) {
if (vsession->initialized) {
rc = ev_ctx->cb_fn(vdev, vsession, ev_ctx->user_ctx);
if (rc < 0) {
@ -1306,6 +1320,7 @@ vhost_user_dev_foreach_session(struct spdk_vhost_dev *vdev,
void *arg)
{
struct vhost_session_fn_ctx *ev_ctx;
struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
ev_ctx = calloc(1, sizeof(*ev_ctx));
if (ev_ctx == NULL) {
@ -1319,8 +1334,8 @@ vhost_user_dev_foreach_session(struct spdk_vhost_dev *vdev,
ev_ctx->cpl_fn = cpl_fn;
ev_ctx->user_ctx = arg;
assert(vdev->pending_async_op_num < UINT32_MAX);
vdev->pending_async_op_num++;
assert(user_dev->pending_async_op_num < UINT32_MAX);
user_dev->pending_async_op_num++;
spdk_thread_send_msg(vdev->thread, foreach_session, ev_ctx);
}
@ -1621,7 +1636,7 @@ vhost_get_negotiated_features(int vid, uint64_t *negotiated_features)
}
int
vhost_user_dev_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
vhost_user_dev_set_coalescing(struct spdk_vhost_user_dev *user_dev, uint32_t delay_base_us,
uint32_t iops_threshold)
{
uint64_t delay_time_base = delay_base_us * spdk_get_ticks_hz() / 1000000ULL;
@ -1636,8 +1651,8 @@ vhost_user_dev_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_u
return -EINVAL;
}
vdev->coalescing_delay_us = delay_base_us;
vdev->coalescing_iops_threshold = iops_threshold;
user_dev->coalescing_delay_us = delay_base_us;
user_dev->coalescing_iops_threshold = iops_threshold;
return 0;
}
@ -1646,9 +1661,9 @@ vhost_user_session_set_coalescing(struct spdk_vhost_dev *vdev,
struct spdk_vhost_session *vsession, void *ctx)
{
vsession->coalescing_delay_time_base =
vdev->coalescing_delay_us * spdk_get_ticks_hz() / 1000000ULL;
to_user_dev(vdev)->coalescing_delay_us * spdk_get_ticks_hz() / 1000000ULL;
vsession->coalescing_io_rate_threshold =
vdev->coalescing_iops_threshold * SPDK_VHOST_STATS_CHECK_INTERVAL_MS / 1000U;
to_user_dev(vdev)->coalescing_iops_threshold * SPDK_VHOST_STATS_CHECK_INTERVAL_MS / 1000U;
return 0;
}
@ -1658,7 +1673,7 @@ spdk_vhost_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
{
int rc;
rc = vhost_user_dev_set_coalescing(vdev, delay_base_us, iops_threshold);
rc = vhost_user_dev_set_coalescing(to_user_dev(vdev), delay_base_us, iops_threshold);
if (rc != 0) {
return rc;
}
@ -1671,12 +1686,14 @@ void
spdk_vhost_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
uint32_t *iops_threshold)
{
struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
if (delay_base_us) {
*delay_base_us = vdev->coalescing_delay_us;
*delay_base_us = user_dev->coalescing_delay_us;
}
if (iops_threshold) {
*iops_threshold = vdev->coalescing_iops_threshold;
*iops_threshold = user_dev->coalescing_iops_threshold;
}
}
@ -1715,6 +1732,7 @@ vhost_user_dev_register(struct spdk_vhost_dev *vdev, const char *name, struct sp
const struct spdk_vhost_user_dev_backend *user_backend)
{
char path[PATH_MAX];
struct spdk_vhost_user_dev *user_dev;
if (snprintf(path, sizeof(path), "%s%s", g_vhost_user_dev_dirname, name) >= (int)sizeof(path)) {
SPDK_ERRLOG("Resulting socket path for controller %s is too long: %s%s\n",
@ -1727,23 +1745,33 @@ vhost_user_dev_register(struct spdk_vhost_dev *vdev, const char *name, struct sp
return -EIO;
}
user_dev = calloc(1, sizeof(*user_dev));
if (user_dev == NULL) {
free(vdev->path);
return -ENOMEM;
}
vdev->ctxt = user_dev;
vdev->thread = spdk_thread_create(vdev->name, cpumask);
if (vdev->thread == NULL) {
free(user_dev);
free(vdev->path);
SPDK_ERRLOG("Failed to create thread for vhost controller %s.\n", name);
return -EIO;
}
vdev->registered = true;
vdev->user_backend = user_backend;
TAILQ_INIT(&vdev->vsessions);
user_dev->user_backend = user_backend;
user_dev->vdev = vdev;
TAILQ_INIT(&user_dev->vsessions);
vhost_user_dev_set_coalescing(vdev, SPDK_VHOST_COALESCING_DELAY_BASE_US,
vhost_user_dev_set_coalescing(user_dev, SPDK_VHOST_COALESCING_DELAY_BASE_US,
SPDK_VHOST_VQ_IOPS_COALESCING_THRESHOLD);
if (vhost_register_unix_socket(path, name, vdev->virtio_features, vdev->disabled_features,
vdev->protocol_features)) {
spdk_thread_send_msg(vdev->thread, vhost_dev_thread_exit, NULL);
free(user_dev);
free(vdev->path);
return -EIO;
}
@ -1754,11 +1782,13 @@ vhost_user_dev_register(struct spdk_vhost_dev *vdev, const char *name, struct sp
int
vhost_user_dev_unregister(struct spdk_vhost_dev *vdev)
{
if (vdev->pending_async_op_num) {
struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
if (user_dev->pending_async_op_num) {
return -EBUSY;
}
if (!TAILQ_EMPTY(&vdev->vsessions)) {
if (!TAILQ_EMPTY(&user_dev->vsessions)) {
SPDK_ERRLOG("Controller %s has still valid connection.\n", vdev->name);
return -EBUSY;
}
@ -1771,6 +1801,7 @@ vhost_user_dev_unregister(struct spdk_vhost_dev *vdev)
}
spdk_thread_send_msg(vdev->thread, vhost_dev_thread_exit, NULL);
free(user_dev);
free(vdev->path);
return 0;
@ -1818,7 +1849,7 @@ vhost_user_session_shutdown(void *arg)
for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
vdev = spdk_vhost_dev_next(vdev)) {
spdk_vhost_lock();
TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) {
TAILQ_FOREACH(vsession, &to_user_dev(vdev)->vsessions, tailq) {
if (vsession->started) {
_stop_session(vsession);
}

View File

@ -172,19 +172,9 @@ struct spdk_vhost_session {
TAILQ_ENTRY(spdk_vhost_session) tailq;
};
struct spdk_vhost_dev {
char *name;
char *path;
struct spdk_vhost_user_dev {
struct spdk_vhost_dev *vdev;
struct spdk_thread *thread;
bool registered;
uint64_t virtio_features;
uint64_t disabled_features;
uint64_t protocol_features;
bool packed_ring_recovery;
const struct spdk_vhost_dev_backend *backend;
const struct spdk_vhost_user_dev_backend *user_backend;
/* Saved original values used to setup coalescing to avoid integer
@ -204,6 +194,24 @@ struct spdk_vhost_dev {
/* Number of pending asynchronous operations */
uint32_t pending_async_op_num;
};
struct spdk_vhost_dev {
char *name;
char *path;
struct spdk_thread *thread;
bool registered;
uint64_t virtio_features;
uint64_t disabled_features;
uint64_t protocol_features;
bool packed_ring_recovery;
const struct spdk_vhost_dev_backend *backend;
/* Context passed from transport */
void *ctxt;
TAILQ_ENTRY(spdk_vhost_dev) tailq;
};
@ -503,9 +511,9 @@ int remove_vhost_controller(struct spdk_vhost_dev *vdev);
/* Function calls from vhost.c to rte_vhost_user.c,
* shall removed once virtio transport abstraction is complete. */
int vhost_user_session_set_coalescing(struct spdk_vhost_dev *vdev,
int vhost_user_session_set_coalescing(struct spdk_vhost_dev *dev,
struct spdk_vhost_session *vsession, void *ctx);
int vhost_user_dev_set_coalescing(struct spdk_vhost_dev *vdev, uint32_t delay_base_us,
int vhost_user_dev_set_coalescing(struct spdk_vhost_user_dev *user_dev, uint32_t delay_base_us,
uint32_t iops_threshold);
int vhost_user_dev_register(struct spdk_vhost_dev *vdev, const char *name,
struct spdk_cpuset *cpumask, const struct spdk_vhost_user_dev_backend *user_backend);

View File

@ -897,9 +897,10 @@ static int
vhost_scsi_dev_remove(struct spdk_vhost_dev *vdev)
{
struct spdk_vhost_scsi_dev *svdev = to_scsi_dev(vdev);
struct spdk_vhost_user_dev *user_dev = vdev->ctxt;
int rc, i;
if (vdev->pending_async_op_num) {
if (user_dev->pending_async_op_num) {
return -EBUSY;
}

View File

@ -135,6 +135,7 @@ alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask
static void
start_vdev(struct spdk_vhost_dev *vdev)
{
struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
struct rte_vhost_memory *mem;
struct spdk_vhost_session *vsession = NULL;
int rc;
@ -149,7 +150,7 @@ start_vdev(struct spdk_vhost_dev *vdev)
mem->regions[1].size = 0x400000; /* 4 MB */
mem->regions[1].host_user_addr = 0x2000000;
assert(TAILQ_EMPTY(&vdev->vsessions));
assert(TAILQ_EMPTY(&user_dev->vsessions));
/* spdk_vhost_dev must be allocated on a cache line boundary. */
rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
CU_ASSERT(rc == 0);
@ -157,15 +158,16 @@ start_vdev(struct spdk_vhost_dev *vdev)
vsession->started = true;
vsession->vid = 0;
vsession->mem = mem;
TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq);
TAILQ_INSERT_TAIL(&user_dev->vsessions, vsession, tailq);
}
static void
stop_vdev(struct spdk_vhost_dev *vdev)
{
struct spdk_vhost_session *vsession = TAILQ_FIRST(&vdev->vsessions);
struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
struct spdk_vhost_session *vsession = TAILQ_FIRST(&user_dev->vsessions);
TAILQ_REMOVE(&vdev->vsessions, vsession, tailq);
TAILQ_REMOVE(&user_dev->vsessions, vsession, tailq);
free(vsession->mem);
free(vsession);
}
@ -173,7 +175,9 @@ stop_vdev(struct spdk_vhost_dev *vdev)
static void
cleanup_vdev(struct spdk_vhost_dev *vdev)
{
if (!TAILQ_EMPTY(&vdev->vsessions)) {
struct spdk_vhost_user_dev *user_dev = to_user_dev(vdev);
if (!TAILQ_EMPTY(&user_dev->vsessions)) {
stop_vdev(vdev);
}
vhost_dev_unregister(vdev);
@ -196,7 +200,7 @@ desc_to_iov_test(void)
SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
start_vdev(vdev);
vsession = TAILQ_FIRST(&vdev->vsessions);
vsession = TAILQ_FIRST(&to_user_dev(vdev)->vsessions);
/* Test simple case where iov falls fully within a 2MB page. */
desc.addr = 0x110000;
@ -339,7 +343,7 @@ session_find_by_vid_test(void)
SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
start_vdev(vdev);
vsession = TAILQ_FIRST(&vdev->vsessions);
vsession = TAILQ_FIRST(&to_user_dev(vdev)->vsessions);
tmp = vhost_session_find_by_vid(vsession->vid);
CU_ASSERT(tmp == vsession);
@ -362,7 +366,7 @@ remove_controller_test(void)
/* Remove device when controller is in use */
start_vdev(vdev);
SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&vdev->vsessions));
SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&to_user_dev(vdev)->vsessions));
ret = vhost_dev_unregister(vdev);
CU_ASSERT(ret != 0);