2017-03-02 14:12:20 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright(c) Intel Corporation. All rights reserved.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2017-05-02 18:18:25 +00:00
|
|
|
#include "spdk/stdinc.h"
|
|
|
|
|
2017-03-02 14:12:20 +00:00
|
|
|
#include "spdk/env.h"
|
2017-05-24 12:52:07 +00:00
|
|
|
#include "spdk/likely.h"
|
2017-08-11 23:27:04 +00:00
|
|
|
#include "spdk/string.h"
|
2017-05-26 12:00:56 +00:00
|
|
|
#include "spdk/util.h"
|
2020-02-27 13:38:02 +00:00
|
|
|
#include "spdk/memory.h"
|
2017-08-09 17:05:06 +00:00
|
|
|
#include "spdk/barrier.h"
|
2017-03-02 14:12:20 +00:00
|
|
|
#include "spdk/vhost.h"
|
2017-05-22 12:53:27 +00:00
|
|
|
#include "vhost_internal.h"
|
2017-03-02 14:12:20 +00:00
|
|
|
|
2020-08-19 10:02:07 +00:00
|
|
|
bool g_packed_ring_recovery = false;
|
|
|
|
|
2020-03-24 10:42:40 +00:00
|
|
|
static struct spdk_cpuset g_vhost_core_mask;
|
|
|
|
|
2019-06-24 07:00:19 +00:00
|
|
|
/* Thread performing all vhost management operations */
|
|
|
|
static struct spdk_thread *g_vhost_init_thread;
|
|
|
|
|
2019-04-29 09:42:29 +00:00
|
|
|
static spdk_vhost_fini_cb g_fini_cpl_cb;
|
|
|
|
|
2019-06-24 07:59:37 +00:00
|
|
|
/** Return code for the current DPDK callback */
|
|
|
|
static int g_dpdk_response;
|
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
struct vhost_session_fn_ctx {
|
2021-11-25 01:40:58 +00:00
|
|
|
/** Device pointer obtained before enqueueing the event */
|
2017-07-26 10:00:26 +00:00
|
|
|
struct spdk_vhost_dev *vdev;
|
|
|
|
|
2018-12-17 15:33:30 +00:00
|
|
|
/** ID of the session to send event to. */
|
|
|
|
uint32_t vsession_id;
|
|
|
|
|
2019-07-20 21:47:55 +00:00
|
|
|
/** User provided function to be executed on session's thread. */
|
2019-01-07 23:30:57 +00:00
|
|
|
spdk_vhost_session_fn cb_fn;
|
2017-08-03 09:24:00 +00:00
|
|
|
|
2019-07-20 21:47:55 +00:00
|
|
|
/**
|
|
|
|
* User provided function to be called on the init thread
|
|
|
|
* after iterating through all sessions.
|
|
|
|
*/
|
|
|
|
spdk_vhost_dev_fn cpl_fn;
|
|
|
|
|
2019-04-29 07:05:55 +00:00
|
|
|
/** Custom user context */
|
|
|
|
void *user_ctx;
|
2017-08-03 09:24:00 +00:00
|
|
|
};
|
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
static TAILQ_HEAD(, spdk_vhost_dev) g_vhost_devices = TAILQ_HEAD_INITIALIZER(
|
|
|
|
g_vhost_devices);
|
|
|
|
static pthread_mutex_t g_vhost_mutex = PTHREAD_MUTEX_INITIALIZER;
|
2017-05-18 13:57:09 +00:00
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
void *vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len)
|
2017-05-25 15:52:16 +00:00
|
|
|
{
|
2018-04-17 20:30:55 +00:00
|
|
|
void *vva;
|
|
|
|
uint64_t newlen;
|
|
|
|
|
|
|
|
newlen = len;
|
2018-12-13 10:51:34 +00:00
|
|
|
vva = (void *)rte_vhost_va_from_guest_pa(vsession->mem, addr, &newlen);
|
2018-04-17 20:30:55 +00:00
|
|
|
if (newlen != len) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vva;
|
|
|
|
|
2017-05-25 15:52:16 +00:00
|
|
|
}
|
|
|
|
|
2018-01-03 13:24:38 +00:00
|
|
|
static void
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_log_req_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue,
|
|
|
|
uint16_t req_id)
|
2018-01-03 13:24:38 +00:00
|
|
|
{
|
|
|
|
struct vring_desc *desc, *desc_table;
|
|
|
|
uint32_t desc_table_size;
|
|
|
|
int rc;
|
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
|
2018-01-03 13:24:38 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
rc = vhost_vq_get_desc(vsession, virtqueue, req_id, &desc, &desc_table, &desc_table_size);
|
2018-01-03 13:24:38 +00:00
|
|
|
if (spdk_unlikely(rc != 0)) {
|
|
|
|
SPDK_ERRLOG("Can't log used ring descriptors!\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
2019-07-20 21:06:19 +00:00
|
|
|
if (vhost_vring_desc_is_wr(desc)) {
|
2018-01-03 13:24:38 +00:00
|
|
|
/* To be honest, only pages realy touched should be logged, but
|
|
|
|
* doing so would require tracking those changes in each backed.
|
|
|
|
* Also backend most likely will touch all/most of those pages so
|
|
|
|
* for lets assume we touched all pages passed to as writeable buffers. */
|
2018-12-13 12:07:11 +00:00
|
|
|
rte_vhost_log_write(vsession->vid, desc->addr, desc->len);
|
2018-01-03 13:24:38 +00:00
|
|
|
}
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_vring_desc_get_next(&desc, desc_table, desc_table_size);
|
2018-01-03 13:24:38 +00:00
|
|
|
} while (desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_log_used_vring_elem(struct spdk_vhost_session *vsession,
|
|
|
|
struct spdk_vhost_virtqueue *virtqueue,
|
|
|
|
uint16_t idx)
|
2018-01-03 13:24:38 +00:00
|
|
|
{
|
|
|
|
uint64_t offset, len;
|
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
|
2018-01-03 13:24:38 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
if (spdk_unlikely(virtqueue->packed.packed_ring)) {
|
|
|
|
offset = idx * sizeof(struct vring_packed_desc);
|
|
|
|
len = sizeof(struct vring_packed_desc);
|
|
|
|
} else {
|
|
|
|
offset = offsetof(struct vring_used, ring[idx]);
|
|
|
|
len = sizeof(virtqueue->vring.used->ring[idx]);
|
|
|
|
}
|
2018-01-03 13:24:38 +00:00
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
rte_vhost_log_used_vring(vsession->vid, virtqueue->vring_idx, offset, len);
|
2018-01-03 13:24:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_log_used_vring_idx(struct spdk_vhost_session *vsession,
|
|
|
|
struct spdk_vhost_virtqueue *virtqueue)
|
2018-01-03 13:24:38 +00:00
|
|
|
{
|
|
|
|
uint64_t offset, len;
|
|
|
|
uint16_t vq_idx;
|
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
|
2018-01-03 13:24:38 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = offsetof(struct vring_used, idx);
|
|
|
|
len = sizeof(virtqueue->vring.used->idx);
|
2018-12-13 10:51:34 +00:00
|
|
|
vq_idx = virtqueue - vsession->virtqueue;
|
2018-01-03 13:24:38 +00:00
|
|
|
|
2018-12-13 12:07:11 +00:00
|
|
|
rte_vhost_log_used_vring(vsession->vid, vq_idx, offset, len);
|
2018-01-03 13:24:38 +00:00
|
|
|
}
|
|
|
|
|
2017-05-24 12:52:07 +00:00
|
|
|
/*
|
|
|
|
* Get available requests from avail ring.
|
|
|
|
*/
|
|
|
|
uint16_t
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *virtqueue, uint16_t *reqs,
|
|
|
|
uint16_t reqs_len)
|
2017-05-24 12:52:07 +00:00
|
|
|
{
|
2017-08-31 14:38:35 +00:00
|
|
|
struct rte_vhost_vring *vring = &virtqueue->vring;
|
|
|
|
struct vring_avail *avail = vring->avail;
|
|
|
|
uint16_t size_mask = vring->size - 1;
|
2019-02-24 23:46:37 +00:00
|
|
|
uint16_t last_idx = virtqueue->last_avail_idx, avail_idx = avail->idx;
|
2018-01-28 09:53:03 +00:00
|
|
|
uint16_t count, i;
|
2020-12-31 14:33:24 +00:00
|
|
|
int rc;
|
2021-01-06 08:26:18 +00:00
|
|
|
uint64_t u64_value;
|
2017-05-24 12:52:07 +00:00
|
|
|
|
2020-10-13 09:57:54 +00:00
|
|
|
spdk_smp_rmb();
|
|
|
|
|
2020-12-31 14:33:24 +00:00
|
|
|
if (virtqueue->vsession && spdk_unlikely(virtqueue->vsession->interrupt_mode)) {
|
2021-01-06 08:26:18 +00:00
|
|
|
/* Read to clear vring's kickfd */
|
|
|
|
rc = read(vring->kickfd, &u64_value, sizeof(u64_value));
|
2020-12-31 14:33:24 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("failed to acknowledge kickfd: %s.\n", spdk_strerror(errno));
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-28 09:53:03 +00:00
|
|
|
count = avail_idx - last_idx;
|
2017-05-24 12:52:07 +00:00
|
|
|
if (spdk_likely(count == 0)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-28 09:53:03 +00:00
|
|
|
if (spdk_unlikely(count > vring->size)) {
|
|
|
|
/* TODO: the queue is unrecoverably broken and should be marked so.
|
|
|
|
* For now we will fail silently and report there are no new avail entries.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
count = spdk_min(count, reqs_len);
|
2021-01-06 08:26:18 +00:00
|
|
|
|
|
|
|
virtqueue->last_avail_idx += count;
|
|
|
|
/* Check whether there are unprocessed reqs in vq, then kick vq manually */
|
2020-12-31 14:33:24 +00:00
|
|
|
if (virtqueue->vsession && spdk_unlikely(virtqueue->vsession->interrupt_mode)) {
|
2021-01-06 08:26:18 +00:00
|
|
|
/* If avail_idx is larger than virtqueue's last_avail_idx, then there is unprocessed reqs.
|
|
|
|
* avail_idx should get updated here from memory, in case of race condition with guest.
|
|
|
|
*/
|
|
|
|
avail_idx = * (volatile uint16_t *) &avail->idx;
|
|
|
|
if (avail_idx > virtqueue->last_avail_idx) {
|
|
|
|
/* Write to notify vring's kickfd */
|
|
|
|
rc = write(vring->kickfd, &u64_value, sizeof(u64_value));
|
2020-10-24 04:41:30 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("failed to kick vring: %s.\n", spdk_strerror(errno));
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-24 12:52:07 +00:00
|
|
|
for (i = 0; i < count; i++) {
|
2017-08-31 14:38:35 +00:00
|
|
|
reqs[i] = vring->avail->ring[(last_idx + i) & size_mask];
|
2017-05-24 12:52:07 +00:00
|
|
|
}
|
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(vhost_ring,
|
2017-05-24 12:52:07 +00:00
|
|
|
"AVAIL: last_idx=%"PRIu16" avail_idx=%"PRIu16" count=%"PRIu16"\n",
|
|
|
|
last_idx, avail_idx, count);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2017-10-09 16:55:00 +00:00
|
|
|
static bool
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_vring_desc_is_indirect(struct vring_desc *cur_desc)
|
2017-10-09 16:55:00 +00:00
|
|
|
{
|
|
|
|
return !!(cur_desc->flags & VRING_DESC_F_INDIRECT);
|
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
static bool
|
|
|
|
vhost_vring_packed_desc_is_indirect(struct vring_packed_desc *cur_desc)
|
|
|
|
{
|
|
|
|
return (cur_desc->flags & VRING_DESC_F_INDIRECT) != 0;
|
|
|
|
}
|
|
|
|
|
2020-08-18 14:30:31 +00:00
|
|
|
static bool
|
|
|
|
vhost_inflight_packed_desc_is_indirect(spdk_vhost_inflight_desc *cur_desc)
|
|
|
|
{
|
|
|
|
return (cur_desc->flags & VRING_DESC_F_INDIRECT) != 0;
|
|
|
|
}
|
|
|
|
|
2017-10-09 15:28:00 +00:00
|
|
|
int
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue,
|
|
|
|
uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
|
|
|
|
uint32_t *desc_table_size)
|
2017-05-24 12:52:07 +00:00
|
|
|
{
|
2017-08-31 14:38:35 +00:00
|
|
|
if (spdk_unlikely(req_idx >= virtqueue->vring.size)) {
|
2017-10-09 15:28:00 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-08-31 14:38:35 +00:00
|
|
|
*desc = &virtqueue->vring.desc[req_idx];
|
2017-10-09 16:55:00 +00:00
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
if (vhost_vring_desc_is_indirect(*desc)) {
|
2017-10-09 16:55:00 +00:00
|
|
|
*desc_table_size = (*desc)->len / sizeof(**desc);
|
2019-07-20 21:06:19 +00:00
|
|
|
*desc_table = vhost_gpa_to_vva(vsession, (*desc)->addr,
|
|
|
|
sizeof(**desc) * *desc_table_size);
|
2017-10-09 16:55:00 +00:00
|
|
|
*desc = *desc_table;
|
2017-10-13 13:30:05 +00:00
|
|
|
if (*desc == NULL) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-10-09 16:55:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-31 14:38:35 +00:00
|
|
|
*desc_table = virtqueue->vring.desc;
|
|
|
|
*desc_table_size = virtqueue->vring.size;
|
2017-10-09 15:28:00 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-05-24 12:52:07 +00:00
|
|
|
}
|
|
|
|
|
2020-08-18 14:30:31 +00:00
|
|
|
static bool
|
|
|
|
vhost_packed_desc_indirect_to_desc_table(struct spdk_vhost_session *vsession,
|
|
|
|
uint64_t addr, uint32_t len,
|
|
|
|
struct vring_packed_desc **desc_table,
|
|
|
|
uint32_t *desc_table_size)
|
|
|
|
{
|
|
|
|
*desc_table_size = len / sizeof(struct vring_packed_desc);
|
|
|
|
|
|
|
|
*desc_table = vhost_gpa_to_vva(vsession, addr, len);
|
|
|
|
if (spdk_unlikely(*desc_table == NULL)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
int
|
|
|
|
vhost_vq_get_desc_packed(struct spdk_vhost_session *vsession,
|
|
|
|
struct spdk_vhost_virtqueue *virtqueue,
|
|
|
|
uint16_t req_idx, struct vring_packed_desc **desc,
|
|
|
|
struct vring_packed_desc **desc_table, uint32_t *desc_table_size)
|
|
|
|
{
|
|
|
|
*desc = &virtqueue->vring.desc_packed[req_idx];
|
|
|
|
|
|
|
|
/* In packed ring when the desc is non-indirect we get next desc
|
|
|
|
* by judging (desc->flag & VRING_DESC_F_NEXT) != 0. When the desc
|
|
|
|
* is indirect we get next desc by idx and desc_table_size. It's
|
|
|
|
* different from split ring.
|
|
|
|
*/
|
|
|
|
if (vhost_vring_packed_desc_is_indirect(*desc)) {
|
2020-08-18 14:30:31 +00:00
|
|
|
if (!vhost_packed_desc_indirect_to_desc_table(vsession, (*desc)->addr, (*desc)->len,
|
|
|
|
desc_table, desc_table_size)) {
|
2019-12-06 12:45:41 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2020-08-18 14:30:31 +00:00
|
|
|
|
|
|
|
*desc = *desc_table;
|
2019-12-06 12:45:41 +00:00
|
|
|
} else {
|
|
|
|
*desc_table = NULL;
|
|
|
|
*desc_table_size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-08-18 14:30:31 +00:00
|
|
|
int
|
|
|
|
vhost_inflight_queue_get_desc(struct spdk_vhost_session *vsession,
|
|
|
|
spdk_vhost_inflight_desc *desc_array,
|
|
|
|
uint16_t req_idx, spdk_vhost_inflight_desc **desc,
|
|
|
|
struct vring_packed_desc **desc_table, uint32_t *desc_table_size)
|
|
|
|
{
|
|
|
|
*desc = &desc_array[req_idx];
|
|
|
|
|
|
|
|
if (vhost_inflight_packed_desc_is_indirect(*desc)) {
|
|
|
|
if (!vhost_packed_desc_indirect_to_desc_table(vsession, (*desc)->addr, (*desc)->len,
|
|
|
|
desc_table, desc_table_size)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This desc is the inflight desc not the packed desc.
|
|
|
|
* When set the F_INDIRECT the table entry should be the packed desc
|
|
|
|
* so set the inflight desc NULL.
|
|
|
|
*/
|
|
|
|
*desc = NULL;
|
|
|
|
} else {
|
|
|
|
/* When not set the F_INDIRECT means there is no packed desc table */
|
|
|
|
*desc_table = NULL;
|
|
|
|
*desc_table_size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-11 17:45:56 +00:00
|
|
|
int
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_vq_used_signal(struct spdk_vhost_session *vsession,
|
|
|
|
struct spdk_vhost_virtqueue *virtqueue)
|
2017-09-11 17:45:56 +00:00
|
|
|
{
|
|
|
|
if (virtqueue->used_req_cnt == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtqueue->req_cnt += virtqueue->used_req_cnt;
|
|
|
|
virtqueue->used_req_cnt = 0;
|
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(vhost_ring,
|
2017-09-11 17:45:56 +00:00
|
|
|
"Queue %td - USED RING: sending IRQ: last used %"PRIu16"\n",
|
2019-02-24 23:46:37 +00:00
|
|
|
virtqueue - vsession->virtqueue, virtqueue->last_used_idx);
|
2017-09-11 17:45:56 +00:00
|
|
|
|
2019-05-10 11:37:54 +00:00
|
|
|
if (rte_vhost_vring_call(vsession->vid, virtqueue->vring_idx) == 0) {
|
|
|
|
/* interrupt signalled */
|
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
/* interrupt not signalled */
|
|
|
|
return 0;
|
|
|
|
}
|
2017-09-11 17:45:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-10-20 12:31:17 +00:00
|
|
|
session_vq_io_stats_update(struct spdk_vhost_session *vsession,
|
|
|
|
struct spdk_vhost_virtqueue *virtqueue, uint64_t now)
|
2017-09-11 17:45:56 +00:00
|
|
|
{
|
2018-12-17 02:45:35 +00:00
|
|
|
uint32_t irq_delay_base = vsession->coalescing_delay_time_base;
|
|
|
|
uint32_t io_threshold = vsession->coalescing_io_rate_threshold;
|
2018-08-09 10:15:11 +00:00
|
|
|
int32_t irq_delay;
|
|
|
|
uint32_t req_cnt;
|
2020-10-20 12:31:17 +00:00
|
|
|
|
|
|
|
req_cnt = virtqueue->req_cnt + virtqueue->used_req_cnt;
|
|
|
|
if (req_cnt <= io_threshold) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
irq_delay = (irq_delay_base * (req_cnt - io_threshold)) / io_threshold;
|
|
|
|
virtqueue->irq_delay_time = (uint32_t) spdk_max(0, irq_delay);
|
|
|
|
|
|
|
|
virtqueue->req_cnt = 0;
|
|
|
|
virtqueue->next_event_time = now;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-10-24 04:56:24 +00:00
|
|
|
check_session_vq_io_stats(struct spdk_vhost_session *vsession,
|
|
|
|
struct spdk_vhost_virtqueue *virtqueue, uint64_t now)
|
2020-10-20 12:31:17 +00:00
|
|
|
{
|
2018-12-17 02:45:35 +00:00
|
|
|
if (now < vsession->next_stats_check_time) {
|
2017-09-11 17:45:56 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-12-17 02:45:35 +00:00
|
|
|
vsession->next_stats_check_time = now + vsession->stats_check_interval;
|
2020-10-24 04:56:24 +00:00
|
|
|
session_vq_io_stats_update(vsession, virtqueue, now);
|
2017-09-11 17:45:56 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
static inline bool
|
|
|
|
vhost_vq_event_is_suppressed(struct spdk_vhost_virtqueue *vq)
|
|
|
|
{
|
|
|
|
if (spdk_unlikely(vq->packed.packed_ring)) {
|
|
|
|
if (vq->vring.driver_event->flags & VRING_PACKED_EVENT_FLAG_DISABLE) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-09-11 17:45:56 +00:00
|
|
|
void
|
2020-10-24 04:56:24 +00:00
|
|
|
vhost_session_vq_used_signal(struct spdk_vhost_virtqueue *virtqueue)
|
2017-09-11 17:45:56 +00:00
|
|
|
{
|
2020-10-24 04:56:24 +00:00
|
|
|
struct spdk_vhost_session *vsession = virtqueue->vsession;
|
2017-09-11 17:45:56 +00:00
|
|
|
uint64_t now;
|
|
|
|
|
2018-12-17 02:45:35 +00:00
|
|
|
if (vsession->coalescing_delay_time_base == 0) {
|
2020-10-24 04:56:24 +00:00
|
|
|
if (virtqueue->vring.desc == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
2017-09-11 17:45:56 +00:00
|
|
|
|
2020-10-24 04:56:24 +00:00
|
|
|
if (vhost_vq_event_is_suppressed(virtqueue)) {
|
|
|
|
return;
|
2017-09-11 17:45:56 +00:00
|
|
|
}
|
2020-10-24 04:56:24 +00:00
|
|
|
|
|
|
|
vhost_vq_used_signal(vsession, virtqueue);
|
2017-09-11 17:45:56 +00:00
|
|
|
} else {
|
|
|
|
now = spdk_get_ticks();
|
2020-10-24 04:56:24 +00:00
|
|
|
check_session_vq_io_stats(vsession, virtqueue, now);
|
2017-09-11 17:45:56 +00:00
|
|
|
|
2020-10-24 04:56:24 +00:00
|
|
|
/* No need for event right now */
|
|
|
|
if (now < virtqueue->next_event_time) {
|
|
|
|
return;
|
|
|
|
}
|
2017-09-11 17:45:56 +00:00
|
|
|
|
2020-10-24 04:56:24 +00:00
|
|
|
if (vhost_vq_event_is_suppressed(virtqueue)) {
|
|
|
|
return;
|
|
|
|
}
|
2019-12-06 12:45:41 +00:00
|
|
|
|
2020-10-24 04:56:24 +00:00
|
|
|
if (!vhost_vq_used_signal(vsession, virtqueue)) {
|
|
|
|
return;
|
|
|
|
}
|
2017-09-11 17:45:56 +00:00
|
|
|
|
2020-10-24 04:56:24 +00:00
|
|
|
/* Syscall is quite long so update time */
|
|
|
|
now = spdk_get_ticks();
|
|
|
|
virtqueue->next_event_time = now + virtqueue->irq_delay_time;
|
|
|
|
}
|
|
|
|
}
|
2017-09-11 17:45:56 +00:00
|
|
|
|
2020-10-24 04:56:24 +00:00
|
|
|
void
|
|
|
|
vhost_session_used_signal(struct spdk_vhost_session *vsession)
|
|
|
|
{
|
|
|
|
struct spdk_vhost_virtqueue *virtqueue;
|
|
|
|
uint16_t q_idx;
|
|
|
|
|
|
|
|
for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) {
|
|
|
|
virtqueue = &vsession->virtqueue[q_idx];
|
|
|
|
vhost_session_vq_used_signal(virtqueue);
|
2017-09-11 17:45:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-24 12:52:07 +00:00
|
|
|
/*
|
|
|
|
* Enqueue id and len to used ring.
|
|
|
|
*/
|
|
|
|
void
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession,
|
|
|
|
struct spdk_vhost_virtqueue *virtqueue,
|
|
|
|
uint16_t id, uint32_t len)
|
2017-05-24 12:52:07 +00:00
|
|
|
{
|
2017-08-31 14:38:35 +00:00
|
|
|
struct rte_vhost_vring *vring = &virtqueue->vring;
|
|
|
|
struct vring_used *used = vring->used;
|
2019-02-24 23:46:37 +00:00
|
|
|
uint16_t last_idx = virtqueue->last_used_idx & (vring->size - 1);
|
2019-10-14 16:43:37 +00:00
|
|
|
uint16_t vq_idx = virtqueue->vring_idx;
|
2017-05-24 12:52:07 +00:00
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(vhost_ring,
|
2017-09-11 17:45:56 +00:00
|
|
|
"Queue %td - USED RING: last_idx=%"PRIu16" req id=%"PRIu16" len=%"PRIu32"\n",
|
2019-02-24 23:46:37 +00:00
|
|
|
virtqueue - vsession->virtqueue, virtqueue->last_used_idx, id, len);
|
2017-05-24 12:52:07 +00:00
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_log_req_desc(vsession, virtqueue, id);
|
2018-01-03 13:24:38 +00:00
|
|
|
|
2019-02-24 23:46:37 +00:00
|
|
|
virtqueue->last_used_idx++;
|
2017-05-24 12:52:07 +00:00
|
|
|
used->ring[last_idx].id = id;
|
|
|
|
used->ring[last_idx].len = len;
|
|
|
|
|
2018-02-04 13:44:59 +00:00
|
|
|
/* Ensure the used ring is updated before we log it or increment used->idx. */
|
2018-01-29 16:32:00 +00:00
|
|
|
spdk_smp_wmb();
|
2018-02-04 13:44:59 +00:00
|
|
|
|
2019-10-14 16:43:37 +00:00
|
|
|
rte_vhost_set_last_inflight_io_split(vsession->vid, vq_idx, id);
|
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_log_used_vring_elem(vsession, virtqueue, last_idx);
|
2019-02-24 23:46:37 +00:00
|
|
|
* (volatile uint16_t *) &used->idx = virtqueue->last_used_idx;
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_log_used_vring_idx(vsession, virtqueue);
|
2017-07-19 11:46:20 +00:00
|
|
|
|
2019-10-14 16:43:37 +00:00
|
|
|
rte_vhost_clr_inflight_desc_split(vsession->vid, vq_idx, virtqueue->last_used_idx, id);
|
|
|
|
|
2017-09-11 17:45:56 +00:00
|
|
|
virtqueue->used_req_cnt++;
|
2020-10-24 04:41:30 +00:00
|
|
|
|
|
|
|
if (vsession->interrupt_mode) {
|
|
|
|
if (virtqueue->vring.desc == NULL || vhost_vq_event_is_suppressed(virtqueue)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
vhost_vq_used_signal(vsession, virtqueue);
|
|
|
|
}
|
2017-05-24 12:52:07 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
void
|
|
|
|
vhost_vq_packed_ring_enqueue(struct spdk_vhost_session *vsession,
|
|
|
|
struct spdk_vhost_virtqueue *virtqueue,
|
|
|
|
uint16_t num_descs, uint16_t buffer_id,
|
2020-08-17 16:31:32 +00:00
|
|
|
uint32_t length, uint16_t inflight_head)
|
2017-05-24 12:52:07 +00:00
|
|
|
{
|
2019-12-06 12:45:41 +00:00
|
|
|
struct vring_packed_desc *desc = &virtqueue->vring.desc_packed[virtqueue->last_used_idx];
|
|
|
|
bool used, avail;
|
2017-05-24 12:52:07 +00:00
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_DEBUGLOG(vhost_ring,
|
2019-12-06 12:45:41 +00:00
|
|
|
"Queue %td - RING: buffer_id=%"PRIu16"\n",
|
|
|
|
virtqueue - vsession->virtqueue, buffer_id);
|
|
|
|
|
|
|
|
/* When the descriptor is used, two flags in descriptor
|
|
|
|
* avail flag and used flag are set to equal
|
|
|
|
* and used flag value == used_wrap_counter.
|
|
|
|
*/
|
|
|
|
used = !!(desc->flags & VRING_DESC_F_USED);
|
|
|
|
avail = !!(desc->flags & VRING_DESC_F_AVAIL);
|
|
|
|
if (spdk_unlikely(used == virtqueue->packed.used_phase && used == avail)) {
|
|
|
|
SPDK_ERRLOG("descriptor has been used before\n");
|
|
|
|
return;
|
2017-10-09 15:28:00 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
/* In used desc addr is unused and len specifies the buffer length
|
|
|
|
* that has been written to by the device.
|
|
|
|
*/
|
|
|
|
desc->addr = 0;
|
|
|
|
desc->len = length;
|
|
|
|
|
|
|
|
/* This bit specifies whether any data has been written by the device */
|
|
|
|
if (length != 0) {
|
|
|
|
desc->flags |= VRING_DESC_F_WRITE;
|
2017-10-09 15:28:00 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
/* Buffer ID is included in the last descriptor in the list.
|
|
|
|
* The driver needs to keep track of the size of the list corresponding
|
|
|
|
* to each buffer ID.
|
|
|
|
*/
|
|
|
|
desc->id = buffer_id;
|
|
|
|
|
|
|
|
/* A device MUST NOT make the descriptor used before buffer_id is
|
|
|
|
* written to the descriptor.
|
|
|
|
*/
|
|
|
|
spdk_smp_wmb();
|
2020-08-17 16:31:32 +00:00
|
|
|
|
|
|
|
rte_vhost_set_last_inflight_io_packed(vsession->vid, virtqueue->vring_idx, inflight_head);
|
2019-12-06 12:45:41 +00:00
|
|
|
/* To mark a desc as used, the device sets the F_USED bit in flags to match
|
|
|
|
* the internal Device ring wrap counter. It also sets the F_AVAIL bit to
|
|
|
|
* match the same value.
|
|
|
|
*/
|
|
|
|
if (virtqueue->packed.used_phase) {
|
|
|
|
desc->flags |= VRING_DESC_F_AVAIL_USED;
|
|
|
|
} else {
|
|
|
|
desc->flags &= ~VRING_DESC_F_AVAIL_USED;
|
|
|
|
}
|
2020-08-17 16:31:32 +00:00
|
|
|
rte_vhost_clr_inflight_desc_packed(vsession->vid, virtqueue->vring_idx, inflight_head);
|
2019-12-06 12:45:41 +00:00
|
|
|
|
|
|
|
vhost_log_used_vring_elem(vsession, virtqueue, virtqueue->last_used_idx);
|
|
|
|
virtqueue->last_used_idx += num_descs;
|
|
|
|
if (virtqueue->last_used_idx >= virtqueue->vring.size) {
|
|
|
|
virtqueue->last_used_idx -= virtqueue->vring.size;
|
|
|
|
virtqueue->packed.used_phase = !virtqueue->packed.used_phase;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtqueue->used_req_cnt++;
|
2017-05-24 12:52:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-12-06 12:45:41 +00:00
|
|
|
vhost_vq_packed_ring_is_avail(struct spdk_vhost_virtqueue *virtqueue)
|
2017-05-24 12:52:07 +00:00
|
|
|
{
|
2019-12-06 12:45:41 +00:00
|
|
|
uint16_t flags = virtqueue->vring.desc_packed[virtqueue->last_avail_idx].flags;
|
|
|
|
|
|
|
|
/* To mark a desc as available, the driver sets the F_AVAIL bit in flags
|
|
|
|
* to match the internal avail wrap counter. It also sets the F_USED bit to
|
|
|
|
* match the inverse value but it's not mandatory.
|
|
|
|
*/
|
|
|
|
return (!!(flags & VRING_DESC_F_AVAIL) == virtqueue->packed.avail_phase);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
vhost_vring_packed_desc_is_wr(struct vring_packed_desc *cur_desc)
|
|
|
|
{
|
|
|
|
return (cur_desc->flags & VRING_DESC_F_WRITE) != 0;
|
|
|
|
}
|
|
|
|
|
2020-08-18 14:30:31 +00:00
|
|
|
bool
|
|
|
|
vhost_vring_inflight_desc_is_wr(spdk_vhost_inflight_desc *cur_desc)
|
|
|
|
{
|
|
|
|
return (cur_desc->flags & VRING_DESC_F_WRITE) != 0;
|
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
int
|
|
|
|
vhost_vring_packed_desc_get_next(struct vring_packed_desc **desc, uint16_t *req_idx,
|
|
|
|
struct spdk_vhost_virtqueue *vq,
|
|
|
|
struct vring_packed_desc *desc_table,
|
|
|
|
uint32_t desc_table_size)
|
|
|
|
{
|
|
|
|
if (desc_table != NULL) {
|
|
|
|
/* When the desc_table isn't NULL means it's indirect and we get the next
|
|
|
|
* desc by req_idx and desc_table_size. The return value is NULL means
|
|
|
|
* we reach the last desc of this request.
|
|
|
|
*/
|
|
|
|
(*req_idx)++;
|
|
|
|
if (*req_idx < desc_table_size) {
|
|
|
|
*desc = &desc_table[*req_idx];
|
|
|
|
} else {
|
|
|
|
*desc = NULL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* When the desc_table is NULL means it's non-indirect and we get the next
|
|
|
|
* desc by req_idx and F_NEXT in flags. The return value is NULL means
|
|
|
|
* we reach the last desc of this request. When return new desc
|
|
|
|
* we update the req_idx too.
|
|
|
|
*/
|
|
|
|
if (((*desc)->flags & VRING_DESC_F_NEXT) == 0) {
|
|
|
|
*desc = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
*req_idx = (*req_idx + 1) % vq->vring.size;
|
|
|
|
*desc = &vq->vring.desc_packed[*req_idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2017-05-24 12:52:07 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 10:54:11 +00:00
|
|
|
static int
|
|
|
|
vhost_vring_desc_payload_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
|
|
|
|
uint16_t *iov_index, uintptr_t payload, uint64_t remaining)
|
2017-06-02 17:48:11 +00:00
|
|
|
{
|
2017-07-28 15:36:53 +00:00
|
|
|
uintptr_t vva;
|
2020-02-25 10:54:11 +00:00
|
|
|
uint64_t len;
|
2017-07-28 15:36:53 +00:00
|
|
|
|
2018-12-05 10:46:08 +00:00
|
|
|
do {
|
2017-07-28 15:36:53 +00:00
|
|
|
if (*iov_index >= SPDK_VHOST_IOVS_MAX) {
|
|
|
|
SPDK_ERRLOG("SPDK_VHOST_IOVS_MAX(%d) reached\n", SPDK_VHOST_IOVS_MAX);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-05-17 08:39:24 +00:00
|
|
|
len = remaining;
|
|
|
|
vva = (uintptr_t)rte_vhost_va_from_guest_pa(vsession->mem, payload, &len);
|
|
|
|
if (vva == 0 || len == 0) {
|
2017-07-28 15:36:53 +00:00
|
|
|
SPDK_ERRLOG("gpa_to_vva(%p) == NULL\n", (void *)payload);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
iov[*iov_index].iov_base = (void *)vva;
|
|
|
|
iov[*iov_index].iov_len = len;
|
|
|
|
remaining -= len;
|
|
|
|
payload += len;
|
|
|
|
(*iov_index)++;
|
2018-12-05 10:46:08 +00:00
|
|
|
} while (remaining);
|
2017-07-28 15:36:53 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-06-02 17:48:11 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
int
|
|
|
|
vhost_vring_packed_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
|
|
|
|
uint16_t *iov_index, const struct vring_packed_desc *desc)
|
|
|
|
{
|
|
|
|
return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index,
|
|
|
|
desc->addr, desc->len);
|
|
|
|
}
|
|
|
|
|
2020-08-18 14:30:31 +00:00
|
|
|
int
|
|
|
|
vhost_vring_inflight_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
|
|
|
|
uint16_t *iov_index, const spdk_vhost_inflight_desc *desc)
|
|
|
|
{
|
|
|
|
return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index,
|
|
|
|
desc->addr, desc->len);
|
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
/* 1, Traverse the desc chain to get the buffer_id and return buffer_id as task_idx.
|
|
|
|
* 2, Update the vq->last_avail_idx to point next available desc chain.
|
|
|
|
* 3, Update the avail_wrap_counter if last_avail_idx overturn.
|
|
|
|
*/
|
|
|
|
uint16_t
|
|
|
|
vhost_vring_packed_desc_get_buffer_id(struct spdk_vhost_virtqueue *vq, uint16_t req_idx,
|
|
|
|
uint16_t *num_descs)
|
|
|
|
{
|
|
|
|
struct vring_packed_desc *desc;
|
|
|
|
uint16_t desc_head = req_idx;
|
|
|
|
|
|
|
|
*num_descs = 1;
|
|
|
|
|
|
|
|
desc = &vq->vring.desc_packed[req_idx];
|
|
|
|
if (!vhost_vring_packed_desc_is_indirect(desc)) {
|
|
|
|
while ((desc->flags & VRING_DESC_F_NEXT) != 0) {
|
|
|
|
req_idx = (req_idx + 1) % vq->vring.size;
|
|
|
|
desc = &vq->vring.desc_packed[req_idx];
|
|
|
|
(*num_descs)++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Queue Size doesn't have to be a power of 2
|
|
|
|
* Device maintains last_avail_idx so we can make sure
|
|
|
|
* the value is valid(0 ~ vring.size - 1)
|
|
|
|
*/
|
|
|
|
vq->last_avail_idx = (req_idx + 1) % vq->vring.size;
|
|
|
|
if (vq->last_avail_idx < desc_head) {
|
|
|
|
vq->packed.avail_phase = !vq->packed.avail_phase;
|
|
|
|
}
|
|
|
|
|
|
|
|
return desc->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
vhost_vring_desc_get_next(struct vring_desc **desc,
|
|
|
|
struct vring_desc *desc_table, uint32_t desc_table_size)
|
|
|
|
{
|
|
|
|
struct vring_desc *old_desc = *desc;
|
|
|
|
uint16_t next_idx;
|
|
|
|
|
|
|
|
if ((old_desc->flags & VRING_DESC_F_NEXT) == 0) {
|
|
|
|
*desc = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
next_idx = old_desc->next;
|
|
|
|
if (spdk_unlikely(next_idx >= desc_table_size)) {
|
|
|
|
*desc = NULL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*desc = &desc_table[next_idx];
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-02-25 10:54:11 +00:00
|
|
|
int
|
|
|
|
vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
|
|
|
|
uint16_t *iov_index, const struct vring_desc *desc)
|
|
|
|
{
|
|
|
|
return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index,
|
|
|
|
desc->addr, desc->len);
|
|
|
|
}
|
|
|
|
|
2018-12-17 15:33:30 +00:00
|
|
|
static struct spdk_vhost_session *
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_session_find_by_id(struct spdk_vhost_dev *vdev, unsigned id)
|
2018-12-17 15:33:30 +00:00
|
|
|
{
|
|
|
|
struct spdk_vhost_session *vsession;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) {
|
|
|
|
if (vsession->id == id) {
|
|
|
|
return vsession;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-02-23 11:59:34 +00:00
|
|
|
struct spdk_vhost_session *
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_session_find_by_vid(int vid)
|
2017-05-18 13:57:09 +00:00
|
|
|
{
|
|
|
|
struct spdk_vhost_dev *vdev;
|
2018-12-17 15:33:30 +00:00
|
|
|
struct spdk_vhost_session *vsession;
|
2017-05-18 13:57:09 +00:00
|
|
|
|
2022-01-11 10:48:33 +00:00
|
|
|
for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
|
|
|
|
vdev = spdk_vhost_dev_next(vdev)) {
|
2018-12-17 15:33:30 +00:00
|
|
|
TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) {
|
|
|
|
if (vsession->vid == vid) {
|
|
|
|
return vsession;
|
|
|
|
}
|
2017-05-18 13:57:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-01-13 14:13:46 +00:00
|
|
|
struct spdk_vhost_dev *
|
|
|
|
spdk_vhost_dev_next(struct spdk_vhost_dev *vdev)
|
|
|
|
{
|
|
|
|
if (vdev == NULL) {
|
2019-07-20 21:06:19 +00:00
|
|
|
return TAILQ_FIRST(&g_vhost_devices);
|
2019-01-13 14:13:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return TAILQ_NEXT(vdev, tailq);
|
|
|
|
}
|
|
|
|
|
2018-02-09 19:08:07 +00:00
|
|
|
struct spdk_vhost_dev *
|
|
|
|
spdk_vhost_dev_find(const char *ctrlr_name)
|
2017-03-02 14:12:20 +00:00
|
|
|
{
|
2018-02-12 20:02:15 +00:00
|
|
|
struct spdk_vhost_dev *vdev;
|
2017-03-02 14:12:20 +00:00
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
TAILQ_FOREACH(vdev, &g_vhost_devices, tailq) {
|
2018-02-12 20:02:15 +00:00
|
|
|
if (strcmp(vdev->name, ctrlr_name) == 0) {
|
|
|
|
return vdev;
|
2017-03-02 14:12:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-09 19:08:07 +00:00
|
|
|
return NULL;
|
2017-03-02 14:12:20 +00:00
|
|
|
}
|
|
|
|
|
2017-08-10 18:11:14 +00:00
|
|
|
static int
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
|
2017-08-10 18:11:14 +00:00
|
|
|
{
|
2017-12-21 04:29:21 +00:00
|
|
|
int rc;
|
2021-02-01 12:32:46 +00:00
|
|
|
struct spdk_cpuset negative_vhost_mask;
|
2017-12-21 04:29:21 +00:00
|
|
|
|
|
|
|
if (cpumask == NULL) {
|
|
|
|
return -1;
|
|
|
|
}
|
2017-08-10 18:11:14 +00:00
|
|
|
|
2017-08-31 20:01:14 +00:00
|
|
|
if (mask == NULL) {
|
2020-03-24 10:42:40 +00:00
|
|
|
spdk_cpuset_copy(cpumask, &g_vhost_core_mask);
|
2017-08-10 18:11:14 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-24 10:42:40 +00:00
|
|
|
rc = spdk_cpuset_parse(cpumask, mask);
|
2017-12-21 16:48:31 +00:00
|
|
|
if (rc < 0) {
|
2017-12-21 04:29:21 +00:00
|
|
|
SPDK_ERRLOG("invalid cpumask %s\n", mask);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-08-10 18:11:14 +00:00
|
|
|
|
2021-02-01 12:32:46 +00:00
|
|
|
spdk_cpuset_copy(&negative_vhost_mask, &g_vhost_core_mask);
|
|
|
|
spdk_cpuset_negate(&negative_vhost_mask);
|
|
|
|
spdk_cpuset_and(&negative_vhost_mask, cpumask);
|
|
|
|
|
|
|
|
if (spdk_cpuset_count(&negative_vhost_mask) != 0) {
|
|
|
|
SPDK_ERRLOG("one of selected cpu is outside of core mask(=%s)\n",
|
|
|
|
spdk_cpuset_fmt(&g_vhost_core_mask));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-03-24 10:42:40 +00:00
|
|
|
spdk_cpuset_and(cpumask, &g_vhost_core_mask);
|
|
|
|
|
2017-12-21 16:48:31 +00:00
|
|
|
if (spdk_cpuset_count(cpumask) == 0) {
|
2020-03-24 10:42:40 +00:00
|
|
|
SPDK_ERRLOG("no cpu is selected among core mask(=%s)\n",
|
|
|
|
spdk_cpuset_fmt(&g_vhost_core_mask));
|
2017-08-10 18:11:14 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-05 21:36:13 +00:00
|
|
|
static void
|
|
|
|
vhost_dev_thread_exit(void *arg1)
|
|
|
|
{
|
2020-04-04 23:19:43 +00:00
|
|
|
spdk_thread_exit(spdk_get_thread());
|
2020-03-05 21:36:13 +00:00
|
|
|
}
|
|
|
|
|
2017-05-22 12:53:27 +00:00
|
|
|
int
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
|
|
|
|
const struct spdk_vhost_dev_backend *backend)
|
2017-03-02 14:12:20 +00:00
|
|
|
{
|
|
|
|
char path[PATH_MAX];
|
2019-12-20 07:56:57 +00:00
|
|
|
struct spdk_cpuset cpumask = {};
|
2017-12-21 16:48:31 +00:00
|
|
|
int rc;
|
2017-03-02 14:12:20 +00:00
|
|
|
|
2017-06-02 17:51:19 +00:00
|
|
|
assert(vdev);
|
|
|
|
if (name == NULL) {
|
2017-05-18 15:39:28 +00:00
|
|
|
SPDK_ERRLOG("Can't register controller with no name\n");
|
2017-03-02 14:12:20 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-12-20 07:56:57 +00:00
|
|
|
if (vhost_parse_core_mask(mask_str, &cpumask) != 0) {
|
2020-03-24 10:42:40 +00:00
|
|
|
SPDK_ERRLOG("cpumask %s is invalid (core mask is 0x%s)\n",
|
|
|
|
mask_str, spdk_cpuset_fmt(&g_vhost_core_mask));
|
2020-03-05 03:32:16 +00:00
|
|
|
return -EINVAL;
|
2017-06-02 17:51:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (spdk_vhost_dev_find(name)) {
|
|
|
|
SPDK_ERRLOG("vhost controller %s already exists.\n", name);
|
2020-03-05 03:32:16 +00:00
|
|
|
return -EEXIST;
|
2017-03-02 14:12:20 +00:00
|
|
|
}
|
|
|
|
|
2021-12-08 10:47:32 +00:00
|
|
|
if (snprintf(path, sizeof(path), "%s%s", g_vhost_user_dev_dirname, name) >= (int)sizeof(path)) {
|
|
|
|
SPDK_ERRLOG("Resulting socket path for controller %s is too long: %s%s\n",
|
|
|
|
name, g_vhost_user_dev_dirname, name);
|
2020-03-05 03:32:16 +00:00
|
|
|
return -EINVAL;
|
2017-03-02 14:12:20 +00:00
|
|
|
}
|
|
|
|
|
2019-03-04 22:44:43 +00:00
|
|
|
vdev->name = strdup(name);
|
|
|
|
vdev->path = strdup(path);
|
2019-03-05 06:38:37 +00:00
|
|
|
if (vdev->name == NULL || vdev->path == NULL) {
|
|
|
|
rc = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-03-05 21:36:13 +00:00
|
|
|
vdev->thread = spdk_thread_create(vdev->name, &cpumask);
|
|
|
|
if (vdev->thread == NULL) {
|
|
|
|
SPDK_ERRLOG("Failed to create thread for vhost controller %s.\n", name);
|
|
|
|
rc = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-03-04 22:44:43 +00:00
|
|
|
vdev->registered = true;
|
|
|
|
vdev->backend = backend;
|
|
|
|
TAILQ_INIT(&vdev->vsessions);
|
|
|
|
|
2021-12-08 12:27:42 +00:00
|
|
|
vhost_user_dev_set_coalescing(vdev, SPDK_VHOST_COALESCING_DELAY_BASE_US,
|
|
|
|
SPDK_VHOST_VQ_IOPS_COALESCING_THRESHOLD);
|
2019-03-04 22:44:43 +00:00
|
|
|
|
2019-12-25 10:01:18 +00:00
|
|
|
if (vhost_register_unix_socket(path, name, vdev->virtio_features, vdev->disabled_features,
|
|
|
|
vdev->protocol_features)) {
|
2020-03-05 21:36:13 +00:00
|
|
|
spdk_thread_send_msg(vdev->thread, vhost_dev_thread_exit, NULL);
|
2018-04-09 16:15:01 +00:00
|
|
|
rc = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-03-05 03:32:16 +00:00
|
|
|
TAILQ_INSERT_TAIL(&g_vhost_devices, vdev, tailq);
|
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_INFOLOG(vhost, "Controller %s: new controller added\n", vdev->name);
|
2017-05-18 15:39:28 +00:00
|
|
|
return 0;
|
2017-12-21 16:48:31 +00:00
|
|
|
|
|
|
|
out:
|
2020-03-05 03:32:16 +00:00
|
|
|
free(vdev->name);
|
|
|
|
free(vdev->path);
|
2017-12-21 16:48:31 +00:00
|
|
|
return rc;
|
2017-05-18 15:39:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_dev_unregister(struct spdk_vhost_dev *vdev)
|
2017-03-15 16:00:55 +00:00
|
|
|
{
|
2018-12-17 15:33:30 +00:00
|
|
|
if (!TAILQ_EMPTY(&vdev->vsessions)) {
|
2017-08-01 16:08:32 +00:00
|
|
|
SPDK_ERRLOG("Controller %s has still valid connection.\n", vdev->name);
|
2018-08-14 11:56:50 +00:00
|
|
|
return -EBUSY;
|
2017-03-15 16:00:55 +00:00
|
|
|
}
|
|
|
|
|
2019-10-03 16:36:30 +00:00
|
|
|
if (vdev->registered && vhost_driver_unregister(vdev->path) != 0) {
|
2017-05-22 13:34:45 +00:00
|
|
|
SPDK_ERRLOG("Could not unregister controller %s with vhost library\n"
|
2017-12-05 11:06:19 +00:00
|
|
|
"Check if domain socket %s still exists\n",
|
|
|
|
vdev->name, vdev->path);
|
2017-05-22 13:34:45 +00:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_INFOLOG(vhost, "Controller %s: removed\n", vdev->name);
|
2017-05-22 13:34:45 +00:00
|
|
|
|
2020-03-05 21:36:13 +00:00
|
|
|
spdk_thread_send_msg(vdev->thread, vhost_dev_thread_exit, NULL);
|
|
|
|
|
2017-06-02 17:51:19 +00:00
|
|
|
free(vdev->name);
|
2017-06-19 14:37:51 +00:00
|
|
|
free(vdev->path);
|
2019-07-20 21:06:19 +00:00
|
|
|
TAILQ_REMOVE(&g_vhost_devices, vdev, tailq);
|
2017-05-22 13:34:45 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-02 14:12:20 +00:00
|
|
|
const char *
|
2017-05-17 12:42:20 +00:00
|
|
|
spdk_vhost_dev_get_name(struct spdk_vhost_dev *vdev)
|
2017-03-02 14:12:20 +00:00
|
|
|
{
|
2017-05-17 12:42:20 +00:00
|
|
|
assert(vdev != NULL);
|
|
|
|
return vdev->name;
|
2017-03-02 14:12:20 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 20:58:50 +00:00
|
|
|
const struct spdk_cpuset *
|
|
|
|
spdk_vhost_dev_get_cpumask(struct spdk_vhost_dev *vdev)
|
2017-03-02 14:12:20 +00:00
|
|
|
{
|
2017-05-17 12:42:20 +00:00
|
|
|
assert(vdev != NULL);
|
2020-03-05 21:36:13 +00:00
|
|
|
return spdk_thread_get_cpumask(vdev->thread);
|
2019-07-30 11:13:03 +00:00
|
|
|
}
|
|
|
|
|
2019-08-06 06:57:41 +00:00
|
|
|
static void
|
|
|
|
wait_for_semaphore(int timeout_sec, const char *errmsg)
|
|
|
|
{
|
|
|
|
struct timespec timeout;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
clock_gettime(CLOCK_REALTIME, &timeout);
|
|
|
|
timeout.tv_sec += timeout_sec;
|
|
|
|
rc = sem_timedwait(&g_dpdk_sem, &timeout);
|
|
|
|
if (rc != 0) {
|
|
|
|
SPDK_ERRLOG("Timeout waiting for event: %s.\n", errmsg);
|
|
|
|
sem_wait(&g_dpdk_sem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vhost_session_cb_done(int rc)
|
|
|
|
{
|
|
|
|
g_dpdk_response = rc;
|
|
|
|
sem_post(&g_dpdk_sem);
|
|
|
|
}
|
|
|
|
|
2019-03-17 12:08:25 +00:00
|
|
|
void
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_session_start_done(struct spdk_vhost_session *vsession, int response)
|
2019-03-17 12:08:25 +00:00
|
|
|
{
|
2019-03-17 11:13:01 +00:00
|
|
|
if (response == 0) {
|
2019-04-29 07:26:10 +00:00
|
|
|
vsession->started = true;
|
2019-07-30 09:23:06 +00:00
|
|
|
|
2019-03-17 11:13:01 +00:00
|
|
|
assert(vsession->vdev->active_session_num < UINT32_MAX);
|
|
|
|
vsession->vdev->active_session_num++;
|
|
|
|
}
|
2019-06-24 07:59:37 +00:00
|
|
|
|
2019-08-06 06:57:41 +00:00
|
|
|
vhost_session_cb_done(response);
|
2019-03-17 12:08:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_session_stop_done(struct spdk_vhost_session *vsession, int response)
|
2019-03-17 12:08:25 +00:00
|
|
|
{
|
2019-03-17 11:13:01 +00:00
|
|
|
if (response == 0) {
|
2019-04-29 07:26:10 +00:00
|
|
|
vsession->started = false;
|
2019-07-30 09:23:06 +00:00
|
|
|
|
2019-03-17 11:13:01 +00:00
|
|
|
assert(vsession->vdev->active_session_num > 0);
|
|
|
|
vsession->vdev->active_session_num--;
|
|
|
|
}
|
2019-06-24 07:59:37 +00:00
|
|
|
|
2019-08-06 06:57:41 +00:00
|
|
|
vhost_session_cb_done(response);
|
2019-03-17 12:08:25 +00:00
|
|
|
}
|
|
|
|
|
2017-09-04 18:48:49 +00:00
|
|
|
static void
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_event_cb(void *arg1)
|
2017-09-04 18:48:49 +00:00
|
|
|
{
|
2019-07-20 21:06:19 +00:00
|
|
|
struct vhost_session_fn_ctx *ctx = arg1;
|
2018-12-17 15:33:30 +00:00
|
|
|
struct spdk_vhost_session *vsession;
|
2019-03-16 23:50:51 +00:00
|
|
|
|
2022-01-11 10:10:07 +00:00
|
|
|
if (spdk_vhost_trylock() != 0) {
|
2019-07-20 21:06:19 +00:00
|
|
|
spdk_thread_send_msg(spdk_get_thread(), vhost_event_cb, arg1);
|
2019-03-16 23:50:51 +00:00
|
|
|
return;
|
|
|
|
}
|
2017-09-04 18:48:49 +00:00
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
vsession = vhost_session_find_by_id(ctx->vdev, ctx->vsession_id);
|
2019-03-16 23:00:57 +00:00
|
|
|
ctx->cb_fn(ctx->vdev, vsession, NULL);
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2017-09-07 19:15:03 +00:00
|
|
|
}
|
|
|
|
|
2019-06-24 10:54:24 +00:00
|
|
|
int
|
2020-03-05 21:36:13 +00:00
|
|
|
vhost_session_send_event(struct spdk_vhost_session *vsession,
|
2019-07-20 21:06:19 +00:00
|
|
|
spdk_vhost_session_fn cb_fn, unsigned timeout_sec,
|
|
|
|
const char *errmsg)
|
2019-06-24 10:54:24 +00:00
|
|
|
{
|
2019-07-20 21:06:19 +00:00
|
|
|
struct vhost_session_fn_ctx ev_ctx = {0};
|
2020-03-05 21:36:13 +00:00
|
|
|
struct spdk_vhost_dev *vdev = vsession->vdev;
|
2019-06-24 10:54:24 +00:00
|
|
|
|
2020-03-05 21:36:13 +00:00
|
|
|
ev_ctx.vdev = vdev;
|
2019-06-24 10:54:24 +00:00
|
|
|
ev_ctx.vsession_id = vsession->id;
|
|
|
|
ev_ctx.cb_fn = cb_fn;
|
|
|
|
|
2020-03-05 21:36:13 +00:00
|
|
|
spdk_thread_send_msg(vdev->thread, vhost_event_cb, &ev_ctx);
|
2019-06-24 10:54:24 +00:00
|
|
|
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2019-08-06 06:57:41 +00:00
|
|
|
wait_for_semaphore(timeout_sec, errmsg);
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_lock();
|
2019-08-06 06:57:41 +00:00
|
|
|
|
2019-06-24 10:54:24 +00:00
|
|
|
return g_dpdk_response;
|
|
|
|
}
|
|
|
|
|
2019-06-24 14:07:53 +00:00
|
|
|
static void
|
|
|
|
foreach_session_finish_cb(void *arg1)
|
|
|
|
{
|
2020-03-24 06:08:38 +00:00
|
|
|
struct vhost_session_fn_ctx *ev_ctx = arg1;
|
|
|
|
struct spdk_vhost_dev *vdev = ev_ctx->vdev;
|
2019-06-24 14:07:53 +00:00
|
|
|
|
2022-01-11 10:10:07 +00:00
|
|
|
if (spdk_vhost_trylock() != 0) {
|
2019-06-24 14:07:53 +00:00
|
|
|
spdk_thread_send_msg(spdk_get_thread(),
|
|
|
|
foreach_session_finish_cb, arg1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(vdev->pending_async_op_num > 0);
|
|
|
|
vdev->pending_async_op_num--;
|
2020-03-24 06:08:38 +00:00
|
|
|
if (ev_ctx->cpl_fn != NULL) {
|
|
|
|
ev_ctx->cpl_fn(vdev, ev_ctx->user_ctx);
|
2019-07-20 21:47:55 +00:00
|
|
|
}
|
2019-06-24 14:07:53 +00:00
|
|
|
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2020-03-24 06:08:38 +00:00
|
|
|
free(ev_ctx);
|
2019-06-24 14:07:53 +00:00
|
|
|
}
|
|
|
|
|
2017-08-08 16:52:53 +00:00
|
|
|
static void
|
2020-03-05 06:03:10 +00:00
|
|
|
foreach_session(void *arg1)
|
2017-08-08 16:52:53 +00:00
|
|
|
{
|
2020-03-24 06:08:38 +00:00
|
|
|
struct vhost_session_fn_ctx *ev_ctx = arg1;
|
2020-03-05 06:03:10 +00:00
|
|
|
struct spdk_vhost_session *vsession;
|
2020-03-24 06:08:38 +00:00
|
|
|
struct spdk_vhost_dev *vdev = ev_ctx->vdev;
|
2019-01-07 23:38:48 +00:00
|
|
|
int rc;
|
2017-08-08 16:52:53 +00:00
|
|
|
|
2022-01-11 10:10:07 +00:00
|
|
|
if (spdk_vhost_trylock() != 0) {
|
2020-03-05 06:03:10 +00:00
|
|
|
spdk_thread_send_msg(spdk_get_thread(), foreach_session, arg1);
|
2017-08-08 16:52:53 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-03-05 06:03:10 +00:00
|
|
|
TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) {
|
2019-06-24 10:57:32 +00:00
|
|
|
if (vsession->initialized) {
|
2020-03-24 06:08:38 +00:00
|
|
|
rc = ev_ctx->cb_fn(vdev, vsession, ev_ctx->user_ctx);
|
2019-06-24 10:57:32 +00:00
|
|
|
if (rc < 0) {
|
2020-03-05 06:03:10 +00:00
|
|
|
goto out;
|
2019-06-24 10:57:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-05 06:03:10 +00:00
|
|
|
out:
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2020-03-05 06:03:10 +00:00
|
|
|
|
|
|
|
spdk_thread_send_msg(g_vhost_init_thread, foreach_session_finish_cb, arg1);
|
2019-06-24 10:57:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_dev_foreach_session(struct spdk_vhost_dev *vdev,
|
2019-07-20 21:47:55 +00:00
|
|
|
spdk_vhost_session_fn fn,
|
|
|
|
spdk_vhost_dev_fn cpl_fn,
|
|
|
|
void *arg)
|
2019-06-24 10:57:32 +00:00
|
|
|
{
|
2019-07-20 21:06:19 +00:00
|
|
|
struct vhost_session_fn_ctx *ev_ctx;
|
2019-06-24 15:21:48 +00:00
|
|
|
|
|
|
|
ev_ctx = calloc(1, sizeof(*ev_ctx));
|
|
|
|
if (ev_ctx == NULL) {
|
|
|
|
SPDK_ERRLOG("Failed to alloc vhost event.\n");
|
|
|
|
assert(false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ev_ctx->vdev = vdev;
|
|
|
|
ev_ctx->cb_fn = fn;
|
2019-07-20 21:47:55 +00:00
|
|
|
ev_ctx->cpl_fn = cpl_fn;
|
2019-06-24 15:21:48 +00:00
|
|
|
ev_ctx->user_ctx = arg;
|
2019-06-24 10:57:32 +00:00
|
|
|
|
|
|
|
assert(vdev->pending_async_op_num < UINT32_MAX);
|
|
|
|
vdev->pending_async_op_num++;
|
2020-03-05 06:03:10 +00:00
|
|
|
|
|
|
|
spdk_thread_send_msg(vdev->thread, foreach_session, ev_ctx);
|
2019-06-24 10:57:32 +00:00
|
|
|
}
|
|
|
|
|
2019-10-14 10:01:41 +00:00
|
|
|
static int
|
2019-03-04 22:44:43 +00:00
|
|
|
_stop_session(struct spdk_vhost_session *vsession)
|
2017-05-25 14:12:31 +00:00
|
|
|
{
|
2019-03-04 22:44:43 +00:00
|
|
|
struct spdk_vhost_dev *vdev = vsession->vdev;
|
2019-02-24 23:46:37 +00:00
|
|
|
struct spdk_vhost_virtqueue *q;
|
2017-08-04 10:02:03 +00:00
|
|
|
int rc;
|
2017-05-25 14:12:31 +00:00
|
|
|
uint16_t i;
|
|
|
|
|
2019-01-22 11:57:09 +00:00
|
|
|
rc = vdev->backend->stop_session(vsession);
|
2017-08-04 10:02:03 +00:00
|
|
|
if (rc != 0) {
|
2019-03-04 22:44:43 +00:00
|
|
|
SPDK_ERRLOG("Couldn't stop device with vid %d.\n", vsession->vid);
|
2019-10-14 10:01:41 +00:00
|
|
|
return rc;
|
2017-08-01 13:46:51 +00:00
|
|
|
}
|
|
|
|
|
2018-12-13 10:51:34 +00:00
|
|
|
for (i = 0; i < vsession->max_queues; i++) {
|
2019-02-24 23:46:37 +00:00
|
|
|
q = &vsession->virtqueue[i];
|
2019-12-06 12:45:41 +00:00
|
|
|
|
|
|
|
/* vring.desc and vring.desc_packed are in a union struct
|
|
|
|
* so q->vring.desc can replace q->vring.desc_packed.
|
|
|
|
*/
|
2019-02-24 23:46:37 +00:00
|
|
|
if (q->vring.desc == NULL) {
|
2018-04-23 11:04:18 +00:00
|
|
|
continue;
|
|
|
|
}
|
2019-12-06 12:45:41 +00:00
|
|
|
|
|
|
|
/* Packed virtqueues support up to 2^15 entries each
|
|
|
|
* so left one bit can be used as wrap counter.
|
|
|
|
*/
|
|
|
|
if (q->packed.packed_ring) {
|
|
|
|
q->last_avail_idx = q->last_avail_idx |
|
|
|
|
((uint16_t)q->packed.avail_phase << 15);
|
|
|
|
q->last_used_idx = q->last_used_idx |
|
|
|
|
((uint16_t)q->packed.used_phase << 15);
|
|
|
|
}
|
|
|
|
|
2019-02-24 23:46:37 +00:00
|
|
|
rte_vhost_set_vring_base(vsession->vid, i, q->last_avail_idx, q->last_used_idx);
|
2017-05-25 14:12:31 +00:00
|
|
|
}
|
|
|
|
|
2019-08-06 07:14:48 +00:00
|
|
|
vhost_session_mem_unregister(vsession->mem);
|
2018-12-13 10:51:34 +00:00
|
|
|
free(vsession->mem);
|
2019-10-14 10:01:41 +00:00
|
|
|
|
|
|
|
return 0;
|
2019-03-04 22:44:43 +00:00
|
|
|
}
|
|
|
|
|
2019-10-14 10:01:41 +00:00
|
|
|
int
|
2019-09-27 14:13:04 +00:00
|
|
|
vhost_stop_device_cb(int vid)
|
2019-03-04 22:44:43 +00:00
|
|
|
{
|
|
|
|
struct spdk_vhost_session *vsession;
|
2019-10-14 10:01:41 +00:00
|
|
|
int rc;
|
2019-03-04 22:44:43 +00:00
|
|
|
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_lock();
|
2019-07-20 21:06:19 +00:00
|
|
|
vsession = vhost_session_find_by_vid(vid);
|
2019-03-04 22:44:43 +00:00
|
|
|
if (vsession == NULL) {
|
|
|
|
SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid);
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2019-10-14 10:01:41 +00:00
|
|
|
return -EINVAL;
|
2019-03-04 22:44:43 +00:00
|
|
|
}
|
|
|
|
|
2019-04-29 07:26:10 +00:00
|
|
|
if (!vsession->started) {
|
2019-03-04 22:44:43 +00:00
|
|
|
/* already stopped, nothing to do */
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2019-10-14 10:01:41 +00:00
|
|
|
return -EALREADY;
|
2019-03-04 22:44:43 +00:00
|
|
|
}
|
|
|
|
|
2019-10-14 10:01:41 +00:00
|
|
|
rc = _stop_session(vsession);
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2019-10-14 10:01:41 +00:00
|
|
|
|
|
|
|
return rc;
|
2017-05-25 14:12:31 +00:00
|
|
|
}
|
|
|
|
|
2019-09-27 14:13:04 +00:00
|
|
|
int
|
|
|
|
vhost_start_device_cb(int vid)
|
2017-05-25 14:12:31 +00:00
|
|
|
{
|
|
|
|
struct spdk_vhost_dev *vdev;
|
2018-12-13 10:51:34 +00:00
|
|
|
struct spdk_vhost_session *vsession;
|
2017-07-25 18:40:17 +00:00
|
|
|
int rc = -1;
|
2017-05-25 14:12:31 +00:00
|
|
|
uint16_t i;
|
2019-12-06 12:45:41 +00:00
|
|
|
bool packed_ring;
|
2017-05-25 14:12:31 +00:00
|
|
|
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_lock();
|
2017-07-25 18:40:17 +00:00
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
vsession = vhost_session_find_by_vid(vid);
|
2018-12-13 12:07:11 +00:00
|
|
|
if (vsession == NULL) {
|
|
|
|
SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid);
|
2017-07-25 18:40:17 +00:00
|
|
|
goto out;
|
2017-05-25 14:12:31 +00:00
|
|
|
}
|
|
|
|
|
2018-12-13 12:07:11 +00:00
|
|
|
vdev = vsession->vdev;
|
2019-04-29 07:26:10 +00:00
|
|
|
if (vsession->started) {
|
2019-02-23 11:59:34 +00:00
|
|
|
/* already started, nothing to do */
|
|
|
|
rc = 0;
|
2017-07-25 18:40:17 +00:00
|
|
|
goto out;
|
2017-05-25 14:12:31 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
if (vhost_get_negotiated_features(vid, &vsession->negotiated_features) != 0) {
|
|
|
|
SPDK_ERRLOG("vhost device %d: Failed to get negotiated driver features\n", vid);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
packed_ring = ((vsession->negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) != 0);
|
|
|
|
|
2018-12-13 10:51:34 +00:00
|
|
|
vsession->max_queues = 0;
|
|
|
|
memset(vsession->virtqueue, 0, sizeof(vsession->virtqueue));
|
2018-04-23 11:04:18 +00:00
|
|
|
for (i = 0; i < SPDK_VHOST_MAX_VQUEUES; i++) {
|
2019-02-24 23:46:37 +00:00
|
|
|
struct spdk_vhost_virtqueue *q = &vsession->virtqueue[i];
|
|
|
|
|
2020-10-24 04:56:24 +00:00
|
|
|
q->vsession = vsession;
|
2019-05-10 11:37:54 +00:00
|
|
|
q->vring_idx = -1;
|
2019-02-24 23:46:37 +00:00
|
|
|
if (rte_vhost_get_vhost_vring(vid, i, &q->vring)) {
|
|
|
|
continue;
|
|
|
|
}
|
2019-05-10 11:37:54 +00:00
|
|
|
q->vring_idx = i;
|
2019-10-14 16:43:37 +00:00
|
|
|
rte_vhost_get_vhost_ring_inflight(vid, i, &q->vring_inflight);
|
2019-02-24 23:46:37 +00:00
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
/* vring.desc and vring.desc_packed are in a union struct
|
|
|
|
* so q->vring.desc can replace q->vring.desc_packed.
|
|
|
|
*/
|
2019-02-24 23:46:37 +00:00
|
|
|
if (q->vring.desc == NULL || q->vring.size == 0) {
|
2018-04-23 11:04:18 +00:00
|
|
|
continue;
|
2017-05-25 14:12:31 +00:00
|
|
|
}
|
|
|
|
|
2019-02-24 23:46:37 +00:00
|
|
|
if (rte_vhost_get_vring_base(vsession->vid, i, &q->last_avail_idx, &q->last_used_idx)) {
|
|
|
|
q->vring.desc = NULL;
|
2018-04-23 11:04:18 +00:00
|
|
|
continue;
|
2017-10-10 20:19:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
if (packed_ring) {
|
2020-08-19 10:02:07 +00:00
|
|
|
/* Use the inflight mem to restore the last_avail_idx and last_used_idx.
|
|
|
|
* When the vring format is packed, there is no used_idx in the
|
|
|
|
* used ring, so VM can't resend the used_idx to VHOST when reconnect.
|
|
|
|
* QEMU version 5.2.0 supports the packed inflight before that it only
|
|
|
|
* supports split ring inflight because it doesn't send negotiated features
|
|
|
|
* before get inflight fd. Users can use RPC to enable this function.
|
|
|
|
*/
|
|
|
|
if (spdk_unlikely(g_packed_ring_recovery)) {
|
|
|
|
rte_vhost_get_vring_base_from_inflight(vsession->vid, i,
|
|
|
|
&q->last_avail_idx,
|
|
|
|
&q->last_used_idx);
|
|
|
|
}
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
/* Packed virtqueues support up to 2^15 entries each
|
|
|
|
* so left one bit can be used as wrap counter.
|
|
|
|
*/
|
|
|
|
q->packed.avail_phase = q->last_avail_idx >> 15;
|
|
|
|
q->last_avail_idx = q->last_avail_idx & 0x7FFF;
|
|
|
|
q->packed.used_phase = q->last_used_idx >> 15;
|
|
|
|
q->last_used_idx = q->last_used_idx & 0x7FFF;
|
|
|
|
|
2020-10-24 04:41:30 +00:00
|
|
|
if (!vsession->interrupt_mode) {
|
|
|
|
/* Disable I/O submission notifications, we'll be polling. */
|
|
|
|
q->vring.device_event->flags = VRING_PACKED_EVENT_FLAG_DISABLE;
|
|
|
|
}
|
2019-12-06 12:45:41 +00:00
|
|
|
} else {
|
2020-10-24 04:41:30 +00:00
|
|
|
if (!vsession->interrupt_mode) {
|
|
|
|
/* Disable I/O submission notifications, we'll be polling. */
|
|
|
|
q->vring.used->flags = VRING_USED_F_NO_NOTIFY;
|
|
|
|
}
|
2019-12-06 12:45:41 +00:00
|
|
|
}
|
2017-05-25 14:12:31 +00:00
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
q->packed.packed_ring = packed_ring;
|
|
|
|
vsession->max_queues = i + 1;
|
2017-05-25 14:12:31 +00:00
|
|
|
}
|
|
|
|
|
2019-10-03 16:36:30 +00:00
|
|
|
if (vhost_get_mem_table(vid, &vsession->mem) != 0) {
|
2017-05-25 14:12:31 +00:00
|
|
|
SPDK_ERRLOG("vhost device %d: Failed to get guest memory table\n", vid);
|
2017-07-25 18:40:17 +00:00
|
|
|
goto out;
|
2017-05-25 14:12:31 +00:00
|
|
|
}
|
|
|
|
|
2018-01-18 18:27:41 +00:00
|
|
|
/*
|
|
|
|
* Not sure right now but this look like some kind of QEMU bug and guest IO
|
|
|
|
* might be frozed without kicking all queues after live-migration. This look like
|
|
|
|
* the previous vhost instance failed to effectively deliver all interrupts before
|
|
|
|
* the GET_VRING_BASE message. This shouldn't harm guest since spurious interrupts
|
|
|
|
* should be ignored by guest virtio driver.
|
|
|
|
*
|
|
|
|
* Tested on QEMU 2.10.91 and 2.11.50.
|
|
|
|
*/
|
2018-12-13 10:51:34 +00:00
|
|
|
for (i = 0; i < vsession->max_queues; i++) {
|
2019-06-07 11:45:37 +00:00
|
|
|
struct spdk_vhost_virtqueue *q = &vsession->virtqueue[i];
|
|
|
|
|
2019-12-06 12:45:41 +00:00
|
|
|
/* vring.desc and vring.desc_packed are in a union struct
|
|
|
|
* so q->vring.desc can replace q->vring.desc_packed.
|
|
|
|
*/
|
2019-06-07 11:45:37 +00:00
|
|
|
if (q->vring.desc != NULL && q->vring.size > 0) {
|
|
|
|
rte_vhost_vring_call(vsession->vid, q->vring_idx);
|
|
|
|
}
|
2018-01-18 18:27:41 +00:00
|
|
|
}
|
|
|
|
|
2021-12-08 12:27:42 +00:00
|
|
|
vhost_user_session_set_coalescing(vdev, vsession, NULL);
|
2019-08-06 07:14:48 +00:00
|
|
|
vhost_session_mem_register(vsession->mem);
|
2019-03-27 13:59:45 +00:00
|
|
|
vsession->initialized = true;
|
2019-01-22 11:57:09 +00:00
|
|
|
rc = vdev->backend->start_session(vsession);
|
2017-08-01 13:46:51 +00:00
|
|
|
if (rc != 0) {
|
2019-08-06 07:14:48 +00:00
|
|
|
vhost_session_mem_unregister(vsession->mem);
|
2018-12-13 10:51:34 +00:00
|
|
|
free(vsession->mem);
|
2019-01-23 12:39:40 +00:00
|
|
|
goto out;
|
2017-08-01 13:46:51 +00:00
|
|
|
}
|
2017-05-25 14:12:31 +00:00
|
|
|
|
2017-07-25 18:40:17 +00:00
|
|
|
out:
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2017-07-25 18:40:17 +00:00
|
|
|
return rc;
|
2017-05-25 14:12:31 +00:00
|
|
|
}
|
|
|
|
|
2020-12-31 13:07:31 +00:00
|
|
|
void
|
|
|
|
vhost_session_set_interrupt_mode(struct spdk_vhost_session *vsession, bool interrupt_mode)
|
|
|
|
{
|
|
|
|
uint16_t i;
|
|
|
|
bool packed_ring;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
packed_ring = ((vsession->negotiated_features & (1ULL << VIRTIO_F_RING_PACKED)) != 0);
|
|
|
|
|
|
|
|
for (i = 0; i < vsession->max_queues; i++) {
|
|
|
|
struct spdk_vhost_virtqueue *q = &vsession->virtqueue[i];
|
|
|
|
uint64_t num_events = 1;
|
|
|
|
|
|
|
|
/* vring.desc and vring.desc_packed are in a union struct
|
|
|
|
* so q->vring.desc can replace q->vring.desc_packed.
|
|
|
|
*/
|
|
|
|
if (q->vring.desc == NULL || q->vring.size == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (interrupt_mode) {
|
|
|
|
/* Enable I/O submission notifications, we'll be interrupting. */
|
|
|
|
if (packed_ring) {
|
|
|
|
* (volatile uint16_t *) &q->vring.device_event->flags = VRING_PACKED_EVENT_FLAG_ENABLE;
|
|
|
|
} else {
|
|
|
|
* (volatile uint16_t *) &q->vring.used->flags = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* In case of race condition, always kick vring when switch to intr */
|
|
|
|
rc = write(q->vring.kickfd, &num_events, sizeof(num_events));
|
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("failed to kick vring: %s.\n", spdk_strerror(errno));
|
|
|
|
}
|
|
|
|
|
|
|
|
vsession->interrupt_mode = true;
|
|
|
|
} else {
|
|
|
|
/* Disable I/O submission notifications, we'll be polling. */
|
|
|
|
if (packed_ring) {
|
|
|
|
* (volatile uint16_t *) &q->vring.device_event->flags = VRING_PACKED_EVENT_FLAG_DISABLE;
|
|
|
|
} else {
|
|
|
|
* (volatile uint16_t *) &q->vring.used->flags = VRING_USED_F_NO_NOTIFY;
|
|
|
|
}
|
|
|
|
|
|
|
|
vsession->interrupt_mode = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-13 14:37:50 +00:00
|
|
|
void
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_dump_info_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w)
|
2017-07-13 14:37:50 +00:00
|
|
|
{
|
2018-03-19 18:01:57 +00:00
|
|
|
assert(vdev->backend->dump_info_json != NULL);
|
|
|
|
vdev->backend->dump_info_json(vdev, w);
|
2017-07-13 14:37:50 +00:00
|
|
|
}
|
|
|
|
|
2017-09-05 18:23:43 +00:00
|
|
|
int
|
2018-01-26 13:09:39 +00:00
|
|
|
spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev)
|
2017-09-05 18:23:43 +00:00
|
|
|
{
|
2019-01-07 23:30:57 +00:00
|
|
|
if (vdev->pending_async_op_num) {
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2018-01-26 13:09:39 +00:00
|
|
|
return vdev->backend->remove_device(vdev);
|
2017-09-05 18:23:43 +00:00
|
|
|
}
|
|
|
|
|
2019-09-27 14:13:04 +00:00
|
|
|
int
|
|
|
|
vhost_new_connection_cb(int vid, const char *ifname)
|
2017-08-01 16:08:32 +00:00
|
|
|
{
|
|
|
|
struct spdk_vhost_dev *vdev;
|
2018-12-13 12:07:11 +00:00
|
|
|
struct spdk_vhost_session *vsession;
|
2021-11-26 09:15:01 +00:00
|
|
|
size_t dev_dirname_len;
|
2017-08-01 16:08:32 +00:00
|
|
|
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_lock();
|
2018-12-17 15:33:30 +00:00
|
|
|
|
2021-12-08 10:47:32 +00:00
|
|
|
dev_dirname_len = strlen(g_vhost_user_dev_dirname);
|
|
|
|
if (strncmp(ifname, g_vhost_user_dev_dirname, dev_dirname_len) == 0) {
|
2021-11-26 09:15:01 +00:00
|
|
|
ifname += dev_dirname_len;
|
|
|
|
}
|
|
|
|
|
2017-08-01 16:08:32 +00:00
|
|
|
vdev = spdk_vhost_dev_find(ifname);
|
|
|
|
if (vdev == NULL) {
|
|
|
|
SPDK_ERRLOG("Couldn't find device with vid %d to create connection for.\n", vid);
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2017-08-01 16:08:32 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-12-17 15:33:30 +00:00
|
|
|
/* We expect sessions inside vdev->vsessions to be sorted in ascending
|
|
|
|
* order in regard of vsession->id. For now we always set id = vsessions_cnt++
|
|
|
|
* and append each session to the very end of the vsessions list.
|
|
|
|
* This is required for spdk_vhost_dev_foreach_session() to work.
|
|
|
|
*/
|
|
|
|
if (vdev->vsessions_num == UINT_MAX) {
|
|
|
|
assert(false);
|
|
|
|
return -EINVAL;
|
2018-12-14 13:44:46 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 21:39:30 +00:00
|
|
|
if (posix_memalign((void **)&vsession, SPDK_CACHE_LINE_SIZE, sizeof(*vsession) +
|
|
|
|
vdev->backend->session_ctx_size)) {
|
|
|
|
SPDK_ERRLOG("vsession alloc failed\n");
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2017-08-01 16:08:32 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2019-04-08 21:39:30 +00:00
|
|
|
memset(vsession, 0, sizeof(*vsession) + vdev->backend->session_ctx_size);
|
2017-08-01 16:08:32 +00:00
|
|
|
|
2018-12-13 12:07:11 +00:00
|
|
|
vsession->vdev = vdev;
|
|
|
|
vsession->vid = vid;
|
2019-07-20 09:55:19 +00:00
|
|
|
vsession->id = vdev->vsessions_num++;
|
|
|
|
vsession->name = spdk_sprintf_alloc("%ss%u", vdev->name, vsession->vid);
|
|
|
|
if (vsession->name == NULL) {
|
|
|
|
SPDK_ERRLOG("vsession alloc failed\n");
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2019-07-20 09:55:19 +00:00
|
|
|
free(vsession);
|
|
|
|
return -1;
|
|
|
|
}
|
2019-04-29 07:26:10 +00:00
|
|
|
vsession->started = false;
|
2019-03-27 13:59:45 +00:00
|
|
|
vsession->initialized = false;
|
2018-12-17 02:45:35 +00:00
|
|
|
vsession->next_stats_check_time = 0;
|
|
|
|
vsession->stats_check_interval = SPDK_VHOST_STATS_CHECK_INTERVAL_MS *
|
|
|
|
spdk_get_ticks_hz() / 1000UL;
|
2018-12-17 15:33:30 +00:00
|
|
|
TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq);
|
2019-02-23 11:59:34 +00:00
|
|
|
|
2019-07-20 21:06:19 +00:00
|
|
|
vhost_session_install_rte_compat_hooks(vsession);
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2017-08-01 16:08:32 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-14 10:01:41 +00:00
|
|
|
int
|
2019-09-27 14:13:04 +00:00
|
|
|
vhost_destroy_connection_cb(int vid)
|
2017-08-01 16:08:32 +00:00
|
|
|
{
|
2018-12-13 12:07:11 +00:00
|
|
|
struct spdk_vhost_session *vsession;
|
2019-10-14 10:01:41 +00:00
|
|
|
int rc = 0;
|
2017-08-01 16:08:32 +00:00
|
|
|
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_lock();
|
2019-07-20 21:06:19 +00:00
|
|
|
vsession = vhost_session_find_by_vid(vid);
|
2018-12-13 12:07:11 +00:00
|
|
|
if (vsession == NULL) {
|
|
|
|
SPDK_ERRLOG("Couldn't find session with vid %d.\n", vid);
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2019-10-14 10:01:41 +00:00
|
|
|
return -EINVAL;
|
2017-08-01 16:08:32 +00:00
|
|
|
}
|
|
|
|
|
2019-04-29 07:26:10 +00:00
|
|
|
if (vsession->started) {
|
2019-10-14 10:01:41 +00:00
|
|
|
rc = _stop_session(vsession);
|
2021-11-24 20:05:55 +00:00
|
|
|
if (rc != 0) {
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2021-11-24 20:05:55 +00:00
|
|
|
return rc;
|
|
|
|
}
|
2019-03-04 22:44:43 +00:00
|
|
|
}
|
|
|
|
|
2018-12-17 15:33:30 +00:00
|
|
|
TAILQ_REMOVE(&vsession->vdev->vsessions, vsession, tailq);
|
2019-07-20 09:55:19 +00:00
|
|
|
free(vsession->name);
|
2019-04-08 21:39:30 +00:00
|
|
|
free(vsession);
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2019-10-14 10:01:41 +00:00
|
|
|
|
2021-11-24 20:05:55 +00:00
|
|
|
return 0;
|
2017-08-01 16:08:32 +00:00
|
|
|
}
|
|
|
|
|
2017-09-08 11:06:41 +00:00
|
|
|
void
|
|
|
|
spdk_vhost_lock(void)
|
|
|
|
{
|
2019-07-20 21:06:19 +00:00
|
|
|
pthread_mutex_lock(&g_vhost_mutex);
|
2017-09-08 11:06:41 +00:00
|
|
|
}
|
|
|
|
|
2019-03-17 00:57:42 +00:00
|
|
|
int
|
|
|
|
spdk_vhost_trylock(void)
|
|
|
|
{
|
2019-07-20 21:06:19 +00:00
|
|
|
return -pthread_mutex_trylock(&g_vhost_mutex);
|
2019-03-17 00:57:42 +00:00
|
|
|
}
|
|
|
|
|
2017-09-08 11:06:41 +00:00
|
|
|
void
|
|
|
|
spdk_vhost_unlock(void)
|
|
|
|
{
|
2019-07-20 21:06:19 +00:00
|
|
|
pthread_mutex_unlock(&g_vhost_mutex);
|
2017-09-08 11:06:41 +00:00
|
|
|
}
|
|
|
|
|
2019-04-25 16:11:24 +00:00
|
|
|
void
|
|
|
|
spdk_vhost_init(spdk_vhost_init_cb init_cb)
|
2017-11-29 06:47:59 +00:00
|
|
|
{
|
2018-07-12 13:29:18 +00:00
|
|
|
size_t len;
|
2021-01-29 19:04:48 +00:00
|
|
|
uint32_t i;
|
|
|
|
int ret = 0;
|
2017-12-24 23:43:41 +00:00
|
|
|
|
2019-06-24 07:00:19 +00:00
|
|
|
g_vhost_init_thread = spdk_get_thread();
|
|
|
|
assert(g_vhost_init_thread != NULL);
|
|
|
|
|
2021-12-08 10:47:32 +00:00
|
|
|
if (g_vhost_user_dev_dirname[0] == '\0') {
|
|
|
|
if (getcwd(g_vhost_user_dev_dirname, sizeof(g_vhost_user_dev_dirname) - 1) == NULL) {
|
2018-07-12 13:29:18 +00:00
|
|
|
SPDK_ERRLOG("getcwd failed (%d): %s\n", errno, spdk_strerror(errno));
|
2022-01-12 13:10:09 +00:00
|
|
|
init_cb(-1);
|
|
|
|
return;
|
2018-07-12 13:29:18 +00:00
|
|
|
}
|
|
|
|
|
2021-12-08 10:47:32 +00:00
|
|
|
len = strlen(g_vhost_user_dev_dirname);
|
|
|
|
if (g_vhost_user_dev_dirname[len - 1] != '/') {
|
|
|
|
g_vhost_user_dev_dirname[len] = '/';
|
|
|
|
g_vhost_user_dev_dirname[len + 1] = '\0';
|
2018-07-12 13:29:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-24 10:42:40 +00:00
|
|
|
spdk_cpuset_zero(&g_vhost_core_mask);
|
2021-01-29 19:04:48 +00:00
|
|
|
SPDK_ENV_FOREACH_CORE(i) {
|
|
|
|
spdk_cpuset_set_cpu(&g_vhost_core_mask, i, true);
|
|
|
|
}
|
2019-04-25 16:11:24 +00:00
|
|
|
init_cb(ret);
|
2017-11-29 06:47:59 +00:00
|
|
|
}
|
|
|
|
|
2019-01-13 17:24:52 +00:00
|
|
|
static void
|
2020-05-15 16:29:10 +00:00
|
|
|
vhost_fini(void *arg1)
|
2018-01-26 13:46:26 +00:00
|
|
|
{
|
2019-01-13 17:24:52 +00:00
|
|
|
struct spdk_vhost_dev *vdev, *tmp;
|
2018-01-26 13:46:26 +00:00
|
|
|
|
2019-01-13 17:24:52 +00:00
|
|
|
spdk_vhost_lock();
|
|
|
|
vdev = spdk_vhost_dev_next(NULL);
|
|
|
|
while (vdev != NULL) {
|
|
|
|
tmp = spdk_vhost_dev_next(vdev);
|
2018-01-26 13:46:26 +00:00
|
|
|
spdk_vhost_dev_remove(vdev);
|
2019-01-07 23:30:57 +00:00
|
|
|
/* don't care if it fails, there's nothing we can do for now */
|
2019-01-13 17:24:52 +00:00
|
|
|
vdev = tmp;
|
2018-01-26 13:46:26 +00:00
|
|
|
}
|
2019-01-13 17:24:52 +00:00
|
|
|
spdk_vhost_unlock();
|
2018-01-26 13:46:26 +00:00
|
|
|
|
2019-04-29 09:42:29 +00:00
|
|
|
g_fini_cpl_cb();
|
2018-01-26 10:55:05 +00:00
|
|
|
}
|
|
|
|
|
2019-04-29 09:44:40 +00:00
|
|
|
static void *
|
|
|
|
session_shutdown(void *arg)
|
|
|
|
{
|
|
|
|
struct spdk_vhost_dev *vdev = NULL;
|
vhost: stop started session in session_shutdown
DPDK vhost will call `new_device` when the VRINGs are
queue paired(virtio-net) or all the VRINGs are started.
However, for virtio-blk/scsi, SeaBIOS will only use one
VRING queue, DPDK added a workaround patch to add
`pre_msg_handle` and `post_msg_handle` callbacks to let
devices other than virtio-net to process such scenarios.
In SPDK, we will start the device when there is one valid
VRING, so there is a case that SPDK and DPDK have different
state for one device. For a virtio-scsi device, SeaBIOS will
only start the request queue, and in the BIOS stage, SPDK will
start the device but DPDK doesn't think so. If users killed
SPDK vhost target at the moment, in `session_shutdown`, SPDK
will expect DPDK to call `destroy_device` to do the cleanup,
but DPDK won't do that as it thinks the device isn't started.
Here in `session_shutdown`, SPDK will do this first, it's OK
that DPDK will call another `destroy_device` for devices that
have the same state both in SPDK and DPDK.
Fix issue #2228.
Change-Id: Ib76dd54c8fa302ffe6da9b13498312b7d344bbfe
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10143
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2021-11-08 12:13:54 +00:00
|
|
|
struct spdk_vhost_session *vsession;
|
2019-04-29 09:44:40 +00:00
|
|
|
|
2022-01-11 10:48:33 +00:00
|
|
|
for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
|
|
|
|
vdev = spdk_vhost_dev_next(vdev)) {
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_lock();
|
vhost: stop started session in session_shutdown
DPDK vhost will call `new_device` when the VRINGs are
queue paired(virtio-net) or all the VRINGs are started.
However, for virtio-blk/scsi, SeaBIOS will only use one
VRING queue, DPDK added a workaround patch to add
`pre_msg_handle` and `post_msg_handle` callbacks to let
devices other than virtio-net to process such scenarios.
In SPDK, we will start the device when there is one valid
VRING, so there is a case that SPDK and DPDK have different
state for one device. For a virtio-scsi device, SeaBIOS will
only start the request queue, and in the BIOS stage, SPDK will
start the device but DPDK doesn't think so. If users killed
SPDK vhost target at the moment, in `session_shutdown`, SPDK
will expect DPDK to call `destroy_device` to do the cleanup,
but DPDK won't do that as it thinks the device isn't started.
Here in `session_shutdown`, SPDK will do this first, it's OK
that DPDK will call another `destroy_device` for devices that
have the same state both in SPDK and DPDK.
Fix issue #2228.
Change-Id: Ib76dd54c8fa302ffe6da9b13498312b7d344bbfe
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10143
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2021-11-08 12:13:54 +00:00
|
|
|
TAILQ_FOREACH(vsession, &vdev->vsessions, tailq) {
|
|
|
|
if (vsession->started) {
|
|
|
|
_stop_session(vsession);
|
|
|
|
}
|
|
|
|
}
|
2022-01-11 10:10:07 +00:00
|
|
|
spdk_vhost_unlock();
|
2019-10-03 16:36:30 +00:00
|
|
|
vhost_driver_unregister(vdev->path);
|
2019-04-29 09:44:40 +00:00
|
|
|
vdev->registered = false;
|
|
|
|
}
|
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_INFOLOG(vhost, "Exiting\n");
|
2020-05-15 16:29:10 +00:00
|
|
|
spdk_thread_send_msg(g_vhost_init_thread, vhost_fini, NULL);
|
2019-04-29 09:44:40 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-01-26 10:55:05 +00:00
|
|
|
void
|
|
|
|
spdk_vhost_fini(spdk_vhost_fini_cb fini_cb)
|
|
|
|
{
|
|
|
|
pthread_t tid;
|
|
|
|
int rc;
|
|
|
|
|
2019-06-24 07:00:19 +00:00
|
|
|
assert(spdk_get_thread() == g_vhost_init_thread);
|
2019-04-29 09:42:29 +00:00
|
|
|
g_fini_cpl_cb = fini_cb;
|
2018-01-26 10:55:05 +00:00
|
|
|
|
|
|
|
/* rte_vhost API for removing sockets is not asynchronous. Since it may call SPDK
|
|
|
|
* ops for stopping a device or removing a connection, we need to call it from
|
|
|
|
* a separate thread to avoid deadlock.
|
|
|
|
*/
|
2019-04-29 09:42:29 +00:00
|
|
|
rc = pthread_create(&tid, NULL, &session_shutdown, NULL);
|
2018-01-26 10:55:05 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
SPDK_ERRLOG("Failed to start session shutdown thread (%d): %s\n", rc, spdk_strerror(rc));
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
pthread_detach(tid);
|
2017-11-29 06:47:59 +00:00
|
|
|
}
|
|
|
|
|
2019-01-13 17:24:52 +00:00
|
|
|
void
|
2019-03-05 21:21:57 +00:00
|
|
|
spdk_vhost_config_json(struct spdk_json_write_ctx *w)
|
2018-03-19 18:01:57 +00:00
|
|
|
{
|
2019-01-13 17:24:52 +00:00
|
|
|
struct spdk_vhost_dev *vdev;
|
2018-06-15 15:10:58 +00:00
|
|
|
uint32_t delay_base_us;
|
|
|
|
uint32_t iops_threshold;
|
2018-03-19 18:01:57 +00:00
|
|
|
|
2019-01-13 17:24:52 +00:00
|
|
|
spdk_json_write_array_begin(w);
|
2018-06-15 15:10:58 +00:00
|
|
|
|
2019-01-13 17:24:52 +00:00
|
|
|
spdk_vhost_lock();
|
2022-01-11 10:48:33 +00:00
|
|
|
for (vdev = spdk_vhost_dev_next(NULL); vdev != NULL;
|
|
|
|
vdev = spdk_vhost_dev_next(vdev)) {
|
2019-01-13 17:24:52 +00:00
|
|
|
vdev->backend->write_config_json(vdev, w);
|
2018-06-15 15:10:58 +00:00
|
|
|
|
2019-01-13 17:24:52 +00:00
|
|
|
spdk_vhost_get_coalescing(vdev, &delay_base_us, &iops_threshold);
|
|
|
|
if (delay_base_us) {
|
|
|
|
spdk_json_write_object_begin(w);
|
2019-09-30 12:16:22 +00:00
|
|
|
spdk_json_write_named_string(w, "method", "vhost_controller_set_coalescing");
|
2018-06-15 15:10:58 +00:00
|
|
|
|
2019-01-13 17:24:52 +00:00
|
|
|
spdk_json_write_named_object_begin(w, "params");
|
|
|
|
spdk_json_write_named_string(w, "ctrlr", vdev->name);
|
|
|
|
spdk_json_write_named_uint32(w, "delay_base_us", delay_base_us);
|
|
|
|
spdk_json_write_named_uint32(w, "iops_threshold", iops_threshold);
|
|
|
|
spdk_json_write_object_end(w);
|
2018-04-18 20:36:40 +00:00
|
|
|
|
2019-01-13 17:24:52 +00:00
|
|
|
spdk_json_write_object_end(w);
|
|
|
|
}
|
2018-04-18 20:36:40 +00:00
|
|
|
}
|
2019-01-13 17:24:52 +00:00
|
|
|
spdk_vhost_unlock();
|
2018-03-19 18:01:57 +00:00
|
|
|
|
2019-01-13 17:24:52 +00:00
|
|
|
spdk_json_write_array_end(w);
|
2018-03-19 18:01:57 +00:00
|
|
|
}
|
|
|
|
|
2020-09-04 11:27:29 +00:00
|
|
|
SPDK_LOG_REGISTER_COMPONENT(vhost)
|
|
|
|
SPDK_LOG_REGISTER_COMPONENT(vhost_ring)
|