vhost: introduce device sessions

Grouped a few spdk_vhost_dev struct fields into a new
struct spdk_vhost_session. A session will represent the
connection between SPDK vhost device (vhost-user slave)
and QEMU (vhost-user master).

This essentially serves two purposes. The first is to
allow multiple simultaneous connections to a single
vhost device. Each connection (session) will have access
to the same storage, but will use separate virtqueues,
separate features and possibly different memory. For
Vhost-SCSI, this could be used together with the upcoming
SCSI reservations feature.

The other purpose is to untie devices from lcores and tie
sessions instead. This will potentially allow us to modify
the device struct from any thread, meaning we'll be able
to get rid of the external events API and simplify a lot
of the code that manages vhost - vhost RPC for instance.
Device backends themselves would be responsible for
propagating all device events to each session, but it could
be completely transparent to the upper layers.

Change-Id: I39984cc0a3ae2e76e0817d48fdaa5f43d3339607
Signed-off-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/437774
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
This commit is contained in:
Darek Stojaczyk 2018-12-13 11:51:34 +01:00 committed by Jim Harris
parent 58f1624497
commit 73844cccf4
6 changed files with 229 additions and 172 deletions

View File

@ -89,13 +89,13 @@ static TAILQ_HEAD(, spdk_vhost_dev) g_spdk_vhost_devices = TAILQ_HEAD_INITIALIZE
g_spdk_vhost_devices);
static pthread_mutex_t g_spdk_vhost_mutex = PTHREAD_MUTEX_INITIALIZER;
void *spdk_vhost_gpa_to_vva(struct spdk_vhost_dev *vdev, uint64_t addr, uint64_t len)
void *spdk_vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len)
{
void *vva;
uint64_t newlen;
newlen = len;
vva = (void *)rte_vhost_va_from_guest_pa(vdev->mem, addr, &newlen);
vva = (void *)rte_vhost_va_from_guest_pa(vsession->mem, addr, &newlen);
if (newlen != len) {
return NULL;
}
@ -105,18 +105,19 @@ void *spdk_vhost_gpa_to_vva(struct spdk_vhost_dev *vdev, uint64_t addr, uint64_t
}
static void
spdk_vhost_log_req_desc(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *virtqueue,
spdk_vhost_log_req_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue,
uint16_t req_id)
{
struct spdk_vhost_dev *vdev = vsession->vdev;
struct vring_desc *desc, *desc_table;
uint32_t desc_table_size;
int rc;
if (spdk_likely(!spdk_vhost_dev_has_feature(vdev, VHOST_F_LOG_ALL))) {
if (spdk_likely(!spdk_vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
return;
}
rc = spdk_vhost_vq_get_desc(vdev, virtqueue, req_id, &desc, &desc_table, &desc_table_size);
rc = spdk_vhost_vq_get_desc(vsession, virtqueue, req_id, &desc, &desc_table, &desc_table_size);
if (spdk_unlikely(rc != 0)) {
SPDK_ERRLOG("Can't log used ring descriptors!\n");
return;
@ -135,36 +136,40 @@ spdk_vhost_log_req_desc(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue
}
static void
spdk_vhost_log_used_vring_elem(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *virtqueue,
spdk_vhost_log_used_vring_elem(struct spdk_vhost_session *vsession,
struct spdk_vhost_virtqueue *virtqueue,
uint16_t idx)
{
struct spdk_vhost_dev *vdev = vsession->vdev;
uint64_t offset, len;
uint16_t vq_idx;
if (spdk_likely(!spdk_vhost_dev_has_feature(vdev, VHOST_F_LOG_ALL))) {
if (spdk_likely(!spdk_vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
return;
}
offset = offsetof(struct vring_used, ring[idx]);
len = sizeof(virtqueue->vring.used->ring[idx]);
vq_idx = virtqueue - vdev->virtqueue;
vq_idx = virtqueue - vsession->virtqueue;
rte_vhost_log_used_vring(vdev->vid, vq_idx, offset, len);
}
static void
spdk_vhost_log_used_vring_idx(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *virtqueue)
spdk_vhost_log_used_vring_idx(struct spdk_vhost_session *vsession,
struct spdk_vhost_virtqueue *virtqueue)
{
struct spdk_vhost_dev *vdev = vsession->vdev;
uint64_t offset, len;
uint16_t vq_idx;
if (spdk_likely(!spdk_vhost_dev_has_feature(vdev, VHOST_F_LOG_ALL))) {
if (spdk_likely(!spdk_vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
return;
}
offset = offsetof(struct vring_used, idx);
len = sizeof(virtqueue->vring.used->idx);
vq_idx = virtqueue - vdev->virtqueue;
vq_idx = virtqueue - vsession->virtqueue;
rte_vhost_log_used_vring(vdev->vid, vq_idx, offset, len);
}
@ -214,7 +219,7 @@ spdk_vhost_vring_desc_is_indirect(struct vring_desc *cur_desc)
}
int
spdk_vhost_vq_get_desc(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *virtqueue,
spdk_vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue,
uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
uint32_t *desc_table_size)
{
@ -225,9 +230,9 @@ spdk_vhost_vq_get_desc(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue
*desc = &virtqueue->vring.desc[req_idx];
if (spdk_vhost_vring_desc_is_indirect(*desc)) {
assert(spdk_vhost_dev_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC));
assert(spdk_vhost_dev_has_feature(vsession, VIRTIO_RING_F_INDIRECT_DESC));
*desc_table_size = (*desc)->len / sizeof(**desc);
*desc_table = spdk_vhost_gpa_to_vva(vdev, (*desc)->addr,
*desc_table = spdk_vhost_gpa_to_vva(vsession, (*desc)->addr,
sizeof(**desc) * *desc_table_size);
*desc = *desc_table;
if (*desc == NULL) {
@ -244,7 +249,8 @@ spdk_vhost_vq_get_desc(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue
}
int
spdk_vhost_vq_used_signal(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *virtqueue)
spdk_vhost_vq_used_signal(struct spdk_vhost_session *vsession,
struct spdk_vhost_virtqueue *virtqueue)
{
if (virtqueue->used_req_cnt == 0) {
return 0;
@ -255,7 +261,7 @@ spdk_vhost_vq_used_signal(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtque
SPDK_DEBUGLOG(SPDK_LOG_VHOST_RING,
"Queue %td - USED RING: sending IRQ: last used %"PRIu16"\n",
virtqueue - vdev->virtqueue, virtqueue->vring.last_used_idx);
virtqueue - vsession->virtqueue, virtqueue->vring.last_used_idx);
eventfd_write(virtqueue->vring.callfd, (eventfd_t)1);
return 1;
@ -263,8 +269,9 @@ spdk_vhost_vq_used_signal(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtque
static void
check_dev_io_stats(struct spdk_vhost_dev *vdev, uint64_t now)
check_session_io_stats(struct spdk_vhost_session *vsession, uint64_t now)
{
struct spdk_vhost_dev *vdev = vsession->vdev;
struct spdk_vhost_virtqueue *virtqueue;
uint32_t irq_delay_base = vdev->coalescing_delay_time_base;
uint32_t io_threshold = vdev->coalescing_io_rate_threshold;
@ -277,8 +284,8 @@ check_dev_io_stats(struct spdk_vhost_dev *vdev, uint64_t now)
}
vdev->next_stats_check_time = now + vdev->stats_check_interval;
for (q_idx = 0; q_idx < vdev->max_queues; q_idx++) {
virtqueue = &vdev->virtqueue[q_idx];
for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) {
virtqueue = &vsession->virtqueue[q_idx];
req_cnt = virtqueue->req_cnt + virtqueue->used_req_cnt;
if (req_cnt <= io_threshold) {
@ -294,29 +301,30 @@ check_dev_io_stats(struct spdk_vhost_dev *vdev, uint64_t now)
}
void
spdk_vhost_dev_used_signal(struct spdk_vhost_dev *vdev)
spdk_vhost_session_used_signal(struct spdk_vhost_session *vsession)
{
struct spdk_vhost_dev *vdev = vsession->vdev;
struct spdk_vhost_virtqueue *virtqueue;
uint64_t now;
uint16_t q_idx;
if (vdev->coalescing_delay_time_base == 0) {
for (q_idx = 0; q_idx < vdev->max_queues; q_idx++) {
virtqueue = &vdev->virtqueue[q_idx];
for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) {
virtqueue = &vsession->virtqueue[q_idx];
if (virtqueue->vring.desc == NULL ||
(virtqueue->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
continue;
}
spdk_vhost_vq_used_signal(vdev, virtqueue);
spdk_vhost_vq_used_signal(vsession, virtqueue);
}
} else {
now = spdk_get_ticks();
check_dev_io_stats(vdev, now);
check_session_io_stats(vsession, now);
for (q_idx = 0; q_idx < vdev->max_queues; q_idx++) {
virtqueue = &vdev->virtqueue[q_idx];
for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) {
virtqueue = &vsession->virtqueue[q_idx];
/* No need for event right now */
if (now < virtqueue->next_event_time ||
@ -324,7 +332,7 @@ spdk_vhost_dev_used_signal(struct spdk_vhost_dev *vdev)
continue;
}
if (!spdk_vhost_vq_used_signal(vdev, virtqueue)) {
if (!spdk_vhost_vq_used_signal(vsession, virtqueue)) {
continue;
}
@ -376,7 +384,8 @@ spdk_vhost_get_coalescing(struct spdk_vhost_dev *vdev, uint32_t *delay_base_us,
* Enqueue id and len to used ring.
*/
void
spdk_vhost_vq_used_ring_enqueue(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *virtqueue,
spdk_vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession,
struct spdk_vhost_virtqueue *virtqueue,
uint16_t id, uint32_t len)
{
struct rte_vhost_vring *vring = &virtqueue->vring;
@ -385,9 +394,9 @@ spdk_vhost_vq_used_ring_enqueue(struct spdk_vhost_dev *vdev, struct spdk_vhost_v
SPDK_DEBUGLOG(SPDK_LOG_VHOST_RING,
"Queue %td - USED RING: last_idx=%"PRIu16" req id=%"PRIu16" len=%"PRIu32"\n",
virtqueue - vdev->virtqueue, vring->last_used_idx, id, len);
virtqueue - vsession->virtqueue, vring->last_used_idx, id, len);
spdk_vhost_log_req_desc(vdev, virtqueue, id);
spdk_vhost_log_req_desc(vsession, virtqueue, id);
vring->last_used_idx++;
used->ring[last_idx].id = id;
@ -396,9 +405,9 @@ spdk_vhost_vq_used_ring_enqueue(struct spdk_vhost_dev *vdev, struct spdk_vhost_v
/* Ensure the used ring is updated before we log it or increment used->idx. */
spdk_smp_wmb();
spdk_vhost_log_used_vring_elem(vdev, virtqueue, last_idx);
spdk_vhost_log_used_vring_elem(vsession, virtqueue, last_idx);
* (volatile uint16_t *) &used->idx = vring->last_used_idx;
spdk_vhost_log_used_vring_idx(vdev, virtqueue);
spdk_vhost_log_used_vring_idx(vsession, virtqueue);
/* Ensure all our used ring changes are visible to the guest at the time
* of interrupt.
@ -441,7 +450,7 @@ spdk_vhost_vring_desc_is_wr(struct vring_desc *cur_desc)
#define _2MB_OFFSET(ptr) ((ptr) & (0x200000 - 1))
int
spdk_vhost_vring_desc_to_iov(struct spdk_vhost_dev *vdev, struct iovec *iov,
spdk_vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
uint16_t *iov_index, const struct vring_desc *desc)
{
uint32_t remaining = desc->len;
@ -455,7 +464,7 @@ spdk_vhost_vring_desc_to_iov(struct spdk_vhost_dev *vdev, struct iovec *iov,
SPDK_ERRLOG("SPDK_VHOST_IOVS_MAX(%d) reached\n", SPDK_VHOST_IOVS_MAX);
return -1;
}
vva = (uintptr_t)rte_vhost_gpa_to_vva(vdev->mem, payload);
vva = (uintptr_t)rte_vhost_gpa_to_vva(vsession->mem, payload);
if (vva == 0) {
SPDK_ERRLOG("gpa_to_vva(%p) == NULL\n", (void *)payload);
return -1;
@ -474,7 +483,7 @@ spdk_vhost_vring_desc_to_iov(struct spdk_vhost_dev *vdev, struct iovec *iov,
*/
len = to_boundary;
while (len < remaining) {
if (vva + len != (uintptr_t)rte_vhost_gpa_to_vva(vdev->mem, payload + len)) {
if (vva + len != (uintptr_t)rte_vhost_gpa_to_vva(vsession->mem, payload + len)) {
break;
}
len += spdk_min(remaining - len, 0x200000);
@ -524,14 +533,14 @@ spdk_vhost_dev_find_by_vid(int vid)
#define CEIL_2MB(x) ((((uintptr_t)x) + SIZE_2MB - 1) / SIZE_2MB) << SHIFT_2MB
static void
spdk_vhost_dev_mem_register(struct spdk_vhost_dev *vdev)
spdk_vhost_session_mem_register(struct spdk_vhost_session *vsession)
{
struct rte_vhost_mem_region *region;
uint32_t i;
for (i = 0; i < vdev->mem->nregions; i++) {
for (i = 0; i < vsession->mem->nregions; i++) {
uint64_t start, end, len;
region = &vdev->mem->regions[i];
region = &vsession->mem->regions[i];
start = FLOOR_2MB(region->mmap_addr);
end = CEIL_2MB(region->mmap_addr + region->mmap_size);
len = end - start;
@ -547,14 +556,14 @@ spdk_vhost_dev_mem_register(struct spdk_vhost_dev *vdev)
}
static void
spdk_vhost_dev_mem_unregister(struct spdk_vhost_dev *vdev)
spdk_vhost_session_mem_unregister(struct spdk_vhost_session *vsession)
{
struct rte_vhost_mem_region *region;
uint32_t i;
for (i = 0; i < vdev->mem->nregions; i++) {
for (i = 0; i < vsession->mem->nregions; i++) {
uint64_t start, end, len;
region = &vdev->mem->regions[i];
region = &vsession->mem->regions[i];
start = FLOOR_2MB(region->mmap_addr);
end = CEIL_2MB(region->mmap_addr + region->mmap_size);
len = end - start;
@ -1015,6 +1024,7 @@ static void
stop_device(int vid)
{
struct spdk_vhost_dev *vdev;
struct spdk_vhost_session *vsession;
struct rte_vhost_vring *q;
int rc;
uint16_t i;
@ -1040,16 +1050,17 @@ stop_device(int vid)
return;
}
for (i = 0; i < vdev->max_queues; i++) {
q = &vdev->virtqueue[i].vring;
vsession = &vdev->session;
for (i = 0; i < vsession->max_queues; i++) {
q = &vsession->virtqueue[i].vring;
if (q->desc == NULL) {
continue;
}
rte_vhost_set_vhost_vring_last_idx(vdev->vid, i, q->last_avail_idx, q->last_used_idx);
}
spdk_vhost_dev_mem_unregister(vdev);
free(vdev->mem);
spdk_vhost_session_mem_unregister(vsession);
free(vsession->mem);
spdk_vhost_free_reactor(vdev->lcore);
vdev->lcore = -1;
pthread_mutex_unlock(&g_spdk_vhost_mutex);
@ -1059,6 +1070,7 @@ static int
start_device(int vid)
{
struct spdk_vhost_dev *vdev;
struct spdk_vhost_session *vsession;
int rc = -1;
uint16_t i;
@ -1075,15 +1087,17 @@ start_device(int vid)
goto out;
}
vdev->max_queues = 0;
memset(vdev->virtqueue, 0, sizeof(vdev->virtqueue));
vsession = &vdev->session;
vsession->max_queues = 0;
memset(vsession->virtqueue, 0, sizeof(vsession->virtqueue));
for (i = 0; i < SPDK_VHOST_MAX_VQUEUES; i++) {
if (rte_vhost_get_vhost_vring(vid, i, &vdev->virtqueue[i].vring)) {
if (rte_vhost_get_vhost_vring(vid, i, &vsession->virtqueue[i].vring)) {
continue;
}
if (vdev->virtqueue[i].vring.desc == NULL ||
vdev->virtqueue[i].vring.size == 0) {
if (vsession->virtqueue[i].vring.desc == NULL ||
vsession->virtqueue[i].vring.size == 0) {
continue;
}
@ -1093,15 +1107,15 @@ start_device(int vid)
goto out;
}
vdev->max_queues = i + 1;
vsession->max_queues = i + 1;
}
if (rte_vhost_get_negotiated_features(vid, &vdev->negotiated_features) != 0) {
if (rte_vhost_get_negotiated_features(vid, &vsession->negotiated_features) != 0) {
SPDK_ERRLOG("vhost device %d: Failed to get negotiated driver features\n", vid);
goto out;
}
if (rte_vhost_get_mem_table(vid, &vdev->mem) != 0) {
if (rte_vhost_get_mem_table(vid, &vsession->mem) != 0) {
SPDK_ERRLOG("vhost device %d: Failed to get guest memory table\n", vid);
goto out;
}
@ -1115,18 +1129,18 @@ start_device(int vid)
*
* Tested on QEMU 2.10.91 and 2.11.50.
*/
for (i = 0; i < vdev->max_queues; i++) {
if (vdev->virtqueue[i].vring.callfd != -1) {
eventfd_write(vdev->virtqueue[i].vring.callfd, (eventfd_t)1);
for (i = 0; i < vsession->max_queues; i++) {
if (vsession->virtqueue[i].vring.callfd != -1) {
eventfd_write(vsession->virtqueue[i].vring.callfd, (eventfd_t)1);
}
}
vdev->lcore = spdk_vhost_allocate_reactor(vdev->cpumask);
spdk_vhost_dev_mem_register(vdev);
spdk_vhost_session_mem_register(vsession);
rc = _spdk_vhost_event_send(vdev, vdev->backend->start_device, 3, "start device");
if (rc != 0) {
spdk_vhost_dev_mem_unregister(vdev);
free(vdev->mem);
spdk_vhost_session_mem_unregister(vsession);
free(vsession->mem);
spdk_vhost_free_reactor(vdev->lcore);
vdev->lcore = -1;
}
@ -1260,6 +1274,7 @@ new_connection(int vid)
}
vdev->vid = vid;
vdev->session.vdev = vdev;
pthread_mutex_unlock(&g_spdk_vhost_mutex);
return 0;
}

View File

@ -85,8 +85,8 @@ process_blk_request(struct spdk_vhost_blk_task *task, struct spdk_vhost_blk_dev
static void
blk_task_finish(struct spdk_vhost_blk_task *task)
{
assert(task->bvdev->vdev.task_cnt > 0);
task->bvdev->vdev.task_cnt--;
assert(task->bvdev->vdev.session.task_cnt > 0);
task->bvdev->vdev.session.task_cnt--;
task->used = false;
}
@ -97,7 +97,7 @@ invalid_blk_request(struct spdk_vhost_blk_task *task, uint8_t status)
*task->status = status;
}
spdk_vhost_vq_used_ring_enqueue(&task->bvdev->vdev, task->vq, task->req_idx,
spdk_vhost_vq_used_ring_enqueue(&task->bvdev->vdev.session, task->vq, task->req_idx,
task->used_len);
blk_task_finish(task);
SPDK_DEBUGLOG(SPDK_LOG_VHOST_BLK_DATA, "Invalid request (status=%" PRIu8")\n", status);
@ -119,7 +119,7 @@ blk_iovs_setup(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *vq, uin
uint32_t desc_table_size, len = 0;
int rc;
rc = spdk_vhost_vq_get_desc(vdev, vq, req_idx, &desc, &desc_table, &desc_table_size);
rc = spdk_vhost_vq_get_desc(&vdev->session, vq, req_idx, &desc, &desc_table, &desc_table_size);
if (rc != 0) {
SPDK_ERRLOG("%s: Invalid descriptor at index %"PRIu16".\n", vdev->name, req_idx);
return -1;
@ -136,7 +136,7 @@ blk_iovs_setup(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *vq, uin
return -1;
}
if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &cnt, desc))) {
if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(&vdev->session, iovs, &cnt, desc))) {
SPDK_DEBUGLOG(SPDK_LOG_VHOST_BLK, "Invalid descriptor %" PRIu16" (req_idx = %"PRIu16").\n",
req_idx, cnt);
return -1;
@ -174,7 +174,7 @@ static void
blk_request_finish(bool success, struct spdk_vhost_blk_task *task)
{
*task->status = success ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR;
spdk_vhost_vq_used_ring_enqueue(&task->bvdev->vdev, task->vq, task->req_idx,
spdk_vhost_vq_used_ring_enqueue(&task->bvdev->vdev.session, task->vq, task->req_idx,
task->used_len);
SPDK_DEBUGLOG(SPDK_LOG_VHOST_BLK, "Finished task (%p) req_idx=%d\n status: %s\n", task,
task->req_idx, success ? "OK" : "FAIL");
@ -327,6 +327,7 @@ static void
process_vq(struct spdk_vhost_blk_dev *bvdev, struct spdk_vhost_virtqueue *vq)
{
struct spdk_vhost_blk_task *task;
struct spdk_vhost_session *vsession = &bvdev->vdev.session;
int rc;
uint16_t reqs[32];
uint16_t reqs_cnt, i;
@ -343,7 +344,7 @@ process_vq(struct spdk_vhost_blk_dev *bvdev, struct spdk_vhost_virtqueue *vq)
if (spdk_unlikely(reqs[i] >= vq->vring.size)) {
SPDK_ERRLOG("%s: request idx '%"PRIu16"' exceeds virtqueue size (%"PRIu16").\n",
bvdev->vdev.name, reqs[i], vq->vring.size);
spdk_vhost_vq_used_ring_enqueue(&bvdev->vdev, vq, reqs[i], 0);
spdk_vhost_vq_used_ring_enqueue(vsession, vq, reqs[i], 0);
continue;
}
@ -351,11 +352,11 @@ process_vq(struct spdk_vhost_blk_dev *bvdev, struct spdk_vhost_virtqueue *vq)
if (spdk_unlikely(task->used)) {
SPDK_ERRLOG("%s: request with idx '%"PRIu16"' is already pending.\n",
bvdev->vdev.name, reqs[i]);
spdk_vhost_vq_used_ring_enqueue(&bvdev->vdev, vq, reqs[i], 0);
spdk_vhost_vq_used_ring_enqueue(vsession, vq, reqs[i], 0);
continue;
}
bvdev->vdev.task_cnt++;
vsession->task_cnt++;
task->used = true;
task->iovcnt = SPDK_COUNTOF(task->iovs);
@ -376,13 +377,14 @@ static int
vdev_worker(void *arg)
{
struct spdk_vhost_blk_dev *bvdev = arg;
struct spdk_vhost_session *vsession = &bvdev->vdev.session;
uint16_t q_idx;
for (q_idx = 0; q_idx < bvdev->vdev.max_queues; q_idx++) {
process_vq(bvdev, &bvdev->vdev.virtqueue[q_idx]);
for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) {
process_vq(bvdev, &vsession->virtqueue[q_idx]);
}
spdk_vhost_dev_used_signal(&bvdev->vdev);
spdk_vhost_session_used_signal(vsession);
return -1;
}
@ -390,6 +392,7 @@ vdev_worker(void *arg)
static void
no_bdev_process_vq(struct spdk_vhost_blk_dev *bvdev, struct spdk_vhost_virtqueue *vq)
{
struct spdk_vhost_session *vsession = &bvdev->vdev.session;
struct iovec iovs[SPDK_VHOST_IOVS_MAX];
uint32_t length;
uint16_t iovcnt, req_idx;
@ -404,22 +407,23 @@ no_bdev_process_vq(struct spdk_vhost_blk_dev *bvdev, struct spdk_vhost_virtqueue
SPDK_DEBUGLOG(SPDK_LOG_VHOST_BLK_DATA, "Aborting request %" PRIu16"\n", req_idx);
}
spdk_vhost_vq_used_ring_enqueue(&bvdev->vdev, vq, req_idx, 0);
spdk_vhost_vq_used_ring_enqueue(vsession, vq, req_idx, 0);
}
static int
no_bdev_vdev_worker(void *arg)
{
struct spdk_vhost_blk_dev *bvdev = arg;
struct spdk_vhost_session *vsession = &bvdev->vdev.session;
uint16_t q_idx;
for (q_idx = 0; q_idx < bvdev->vdev.max_queues; q_idx++) {
no_bdev_process_vq(bvdev, &bvdev->vdev.virtqueue[q_idx]);
for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) {
no_bdev_process_vq(bvdev, &vsession->virtqueue[q_idx]);
}
spdk_vhost_dev_used_signal(&bvdev->vdev);
spdk_vhost_session_used_signal(vsession);
if (bvdev->vdev.task_cnt == 0 && bvdev->bdev_io_channel) {
if (vsession->task_cnt == 0 && bvdev->bdev_io_channel) {
spdk_put_io_channel(bvdev->bdev_io_channel);
bvdev->bdev_io_channel = NULL;
}
@ -480,11 +484,12 @@ bdev_remove_cb(void *remove_ctx)
static void
free_task_pool(struct spdk_vhost_blk_dev *bvdev)
{
struct spdk_vhost_session *vsession = &bvdev->vdev.session;
struct spdk_vhost_virtqueue *vq;
uint16_t i;
for (i = 0; i < bvdev->vdev.max_queues; i++) {
vq = &bvdev->vdev.virtqueue[i];
for (i = 0; i < vsession->max_queues; i++) {
vq = &vsession->virtqueue[i];
if (vq->tasks == NULL) {
continue;
}
@ -497,14 +502,15 @@ free_task_pool(struct spdk_vhost_blk_dev *bvdev)
static int
alloc_task_pool(struct spdk_vhost_blk_dev *bvdev)
{
struct spdk_vhost_session *vsession = &bvdev->vdev.session;
struct spdk_vhost_virtqueue *vq;
struct spdk_vhost_blk_task *task;
uint32_t task_cnt;
uint16_t i;
uint32_t j;
for (i = 0; i < bvdev->vdev.max_queues; i++) {
vq = &bvdev->vdev.virtqueue[i];
for (i = 0; i < vsession->max_queues; i++) {
vq = &vsession->virtqueue[i];
if (vq->vring.desc == NULL) {
continue;
}
@ -546,6 +552,7 @@ static int
spdk_vhost_blk_start(struct spdk_vhost_dev *vdev, void *event_ctx)
{
struct spdk_vhost_blk_dev *bvdev;
struct spdk_vhost_session *vsession = &vdev->session;
int i, rc = 0;
bvdev = to_blk_dev(vdev);
@ -556,8 +563,8 @@ spdk_vhost_blk_start(struct spdk_vhost_dev *vdev, void *event_ctx)
}
/* validate all I/O queues are in a contiguous index range */
for (i = 0; i < vdev->max_queues; i++) {
if (vdev->virtqueue[i].vring.desc == NULL) {
for (i = 0; i < vsession->max_queues; i++) {
if (vsession->virtqueue[i].vring.desc == NULL) {
SPDK_ERRLOG("%s: queue %"PRIu32" is empty\n", vdev->name, i);
rc = -1;
goto out;
@ -593,15 +600,16 @@ static int
destroy_device_poller_cb(void *arg)
{
struct spdk_vhost_blk_dev *bvdev = arg;
struct spdk_vhost_session *vsession = &bvdev->vdev.session;
int i;
if (bvdev->vdev.task_cnt > 0) {
if (vsession->task_cnt > 0) {
return -1;
}
for (i = 0; i < bvdev->vdev.max_queues; i++) {
bvdev->vdev.virtqueue[i].next_event_time = 0;
spdk_vhost_vq_used_signal(&bvdev->vdev, &bvdev->vdev.virtqueue[i]);
for (i = 0; i < vsession->max_queues; i++) {
vsession->virtqueue[i].next_event_time = 0;
spdk_vhost_vq_used_signal(vsession, &vsession->virtqueue[i]);
}
SPDK_INFOLOG(SPDK_LOG_VHOST, "Stopping poller for vhost controller %s\n", bvdev->vdev.name);

View File

@ -134,7 +134,6 @@ struct spdk_vhost_dev_backend {
};
struct spdk_vhost_dev {
struct rte_vhost_memory *mem;
char *name;
char *path;
@ -143,7 +142,6 @@ struct spdk_vhost_dev {
/* rte_vhost device ID. */
int vid;
int task_cnt;
int32_t lcore;
struct spdk_cpuset *cpumask;
bool registered;
@ -167,11 +165,20 @@ struct spdk_vhost_dev {
/* Interval used for event coalescing checking. */
uint64_t stats_check_interval;
/* Active connection to the device */
struct spdk_vhost_session {
struct spdk_vhost_dev *vdev;
struct rte_vhost_memory *mem;
int task_cnt;
uint16_t max_queues;
uint64_t negotiated_features;
struct spdk_vhost_virtqueue virtqueue[SPDK_VHOST_MAX_VQUEUES];
} session;
TAILQ_ENTRY(spdk_vhost_dev) tailq;
};
@ -183,7 +190,7 @@ struct spdk_vhost_dev_destroy_ctx {
struct spdk_vhost_dev *spdk_vhost_dev_find(const char *ctrlr_name);
void *spdk_vhost_gpa_to_vva(struct spdk_vhost_dev *vdev, uint64_t addr, uint64_t len);
void *spdk_vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len);
uint16_t spdk_vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *vq, uint16_t *reqs,
uint16_t reqs_len);
@ -193,7 +200,7 @@ uint16_t spdk_vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *vq, uint16_t
* The descriptor will provide access to the entire descriptor
* chain. The subsequent descriptors are accesible via
* \c spdk_vhost_vring_desc_get_next.
* \param vdev vhost device
* \param vsession vhost session
* \param vq virtqueue
* \param req_idx descriptor index
* \param desc pointer to be set to the descriptor
@ -205,29 +212,30 @@ uint16_t spdk_vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *vq, uint16_t
* \return 0 on success, -1 if given index is invalid.
* If -1 is returned, the content of params is undefined.
*/
int spdk_vhost_vq_get_desc(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *vq,
int spdk_vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq,
uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
uint32_t *desc_table_size);
/**
* Send IRQ/call client (if pending) for \c vq.
* \param vdev vhost device
* \param vsession vhost session
* \param vq virtqueue
* \return
* 0 - if no interrupt was signalled
* 1 - if interrupt was signalled
*/
int spdk_vhost_vq_used_signal(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *vq);
int spdk_vhost_vq_used_signal(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *vq);
/**
* Send IRQs for all queues that need to be signaled.
* \param vdev vhost device
* \param vsession vhost session
* \param vq virtqueue
*/
void spdk_vhost_dev_used_signal(struct spdk_vhost_dev *vdev);
void spdk_vhost_session_used_signal(struct spdk_vhost_session *vsession);
void spdk_vhost_vq_used_ring_enqueue(struct spdk_vhost_dev *vdev, struct spdk_vhost_virtqueue *vq,
void spdk_vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession,
struct spdk_vhost_virtqueue *vq,
uint16_t id, uint32_t len);
/**
@ -245,13 +253,13 @@ int spdk_vhost_vring_desc_get_next(struct vring_desc **desc,
struct vring_desc *desc_table, uint32_t desc_table_size);
bool spdk_vhost_vring_desc_is_wr(struct vring_desc *cur_desc);
int spdk_vhost_vring_desc_to_iov(struct spdk_vhost_dev *vdev, struct iovec *iov,
int spdk_vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
uint16_t *iov_index, const struct vring_desc *desc);
static inline bool __attribute__((always_inline))
spdk_vhost_dev_has_feature(struct spdk_vhost_dev *vdev, unsigned feature_id)
spdk_vhost_dev_has_feature(struct spdk_vhost_session *vsession, unsigned feature_id)
{
return vdev->negotiated_features & (1ULL << feature_id);
return vsession->negotiated_features & (1ULL << feature_id);
}
int spdk_vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,

View File

@ -248,6 +248,7 @@ static int
spdk_nvme_map_prps(struct spdk_vhost_nvme_dev *nvme, struct spdk_nvme_cmd *cmd,
struct spdk_vhost_nvme_task *task, uint32_t len)
{
struct spdk_vhost_session *vsession = &nvme->vdev.session;
uint64_t prp1, prp2;
void *vva;
uint32_t i;
@ -261,7 +262,7 @@ spdk_nvme_map_prps(struct spdk_vhost_nvme_dev *nvme, struct spdk_nvme_cmd *cmd,
residue_len = mps - (prp1 % mps);
residue_len = spdk_min(len, residue_len);
vva = spdk_vhost_gpa_to_vva(&nvme->vdev, prp1, residue_len);
vva = spdk_vhost_gpa_to_vva(vsession, prp1, residue_len);
if (spdk_unlikely(vva == NULL)) {
SPDK_ERRLOG("GPA to VVA failed\n");
return -1;
@ -279,7 +280,7 @@ spdk_nvme_map_prps(struct spdk_vhost_nvme_dev *nvme, struct spdk_nvme_cmd *cmd,
if (len <= mps) {
/* 2 PRP used */
task->iovcnt = 2;
vva = spdk_vhost_gpa_to_vva(&nvme->vdev, prp2, len);
vva = spdk_vhost_gpa_to_vva(vsession, prp2, len);
if (spdk_unlikely(vva == NULL)) {
return -1;
}
@ -288,7 +289,7 @@ spdk_nvme_map_prps(struct spdk_vhost_nvme_dev *nvme, struct spdk_nvme_cmd *cmd,
} else {
/* PRP list used */
nents = (len + mps - 1) / mps;
vva = spdk_vhost_gpa_to_vva(&nvme->vdev, prp2, nents * sizeof(*prp_list));
vva = spdk_vhost_gpa_to_vva(vsession, prp2, nents * sizeof(*prp_list));
if (spdk_unlikely(vva == NULL)) {
return -1;
}
@ -296,7 +297,7 @@ spdk_nvme_map_prps(struct spdk_vhost_nvme_dev *nvme, struct spdk_nvme_cmd *cmd,
i = 0;
while (len != 0) {
residue_len = spdk_min(len, mps);
vva = spdk_vhost_gpa_to_vva(&nvme->vdev, prp_list[i], residue_len);
vva = spdk_vhost_gpa_to_vva(vsession, prp_list[i], residue_len);
if (spdk_unlikely(vva == NULL)) {
return -1;
}
@ -698,6 +699,7 @@ static int
vhost_nvme_doorbell_buffer_config(struct spdk_vhost_nvme_dev *nvme,
struct spdk_nvme_cmd *cmd, struct spdk_nvme_cpl *cpl)
{
struct spdk_vhost_session *vsession = &nvme->vdev.session;
uint64_t dbs_dma_addr, eis_dma_addr;
dbs_dma_addr = cmd->dptr.prp.prp1;
@ -707,8 +709,8 @@ vhost_nvme_doorbell_buffer_config(struct spdk_vhost_nvme_dev *nvme,
return -1;
}
/* Guest Physical Address to Host Virtual Address */
nvme->dbbuf_dbs = spdk_vhost_gpa_to_vva(&nvme->vdev, dbs_dma_addr, 4096);
nvme->dbbuf_eis = spdk_vhost_gpa_to_vva(&nvme->vdev, eis_dma_addr, 4096);
nvme->dbbuf_dbs = spdk_vhost_gpa_to_vva(vsession, dbs_dma_addr, 4096);
nvme->dbbuf_eis = spdk_vhost_gpa_to_vva(vsession, eis_dma_addr, 4096);
if (!nvme->dbbuf_dbs || !nvme->dbbuf_eis) {
return -1;
}
@ -763,7 +765,7 @@ vhost_nvme_create_io_sq(struct spdk_vhost_nvme_dev *nvme,
sq->size = qsize + 1;
sq->sq_head = sq->sq_tail = 0;
requested_len = sizeof(struct spdk_nvme_cmd) * sq->size;
sq->sq_cmd = spdk_vhost_gpa_to_vva(&nvme->vdev, dma_addr, requested_len);
sq->sq_cmd = spdk_vhost_gpa_to_vva(&nvme->vdev.session, dma_addr, requested_len);
if (!sq->sq_cmd) {
return -1;
}
@ -846,7 +848,7 @@ vhost_nvme_create_io_cq(struct spdk_vhost_nvme_dev *nvme,
cq->guest_signaled_cq_head = 0;
cq->need_signaled_cnt = 0;
requested_len = sizeof(struct spdk_nvme_cpl) * cq->size;
cq->cq_cqe = spdk_vhost_gpa_to_vva(&nvme->vdev, dma_addr, requested_len);
cq->cq_cqe = spdk_vhost_gpa_to_vva(&nvme->vdev.session, dma_addr, requested_len);
if (!cq->cq_cqe) {
return -1;
}

View File

@ -134,9 +134,10 @@ static void
spdk_vhost_scsi_task_free_cb(struct spdk_scsi_task *scsi_task)
{
struct spdk_vhost_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_vhost_scsi_task, scsi);
struct spdk_vhost_session *vsession = &task->svdev->vdev.session;
assert(task->svdev->vdev.task_cnt > 0);
task->svdev->vdev.task_cnt--;
assert(vsession->task_cnt > 0);
vsession->task_cnt--;
task->used = false;
}
@ -169,6 +170,7 @@ static void
eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_t event,
uint32_t reason)
{
struct spdk_vhost_session *vsession = &svdev->vdev.session;
struct spdk_vhost_virtqueue *vq;
struct vring_desc *desc, *desc_table;
struct virtio_scsi_event *desc_ev;
@ -177,7 +179,7 @@ eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_
int rc;
assert(scsi_dev_num < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS);
vq = &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ];
vq = &vsession->virtqueue[VIRTIO_SCSI_EVENTQ];
if (spdk_vhost_vq_avail_ring_get(vq, &req, 1) != 1) {
SPDK_ERRLOG("Controller %s: Failed to send virtio event (no avail ring entries?).\n",
@ -185,14 +187,14 @@ eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_
return;
}
rc = spdk_vhost_vq_get_desc(&svdev->vdev, vq, req, &desc, &desc_table, &desc_table_size);
rc = spdk_vhost_vq_get_desc(vsession, vq, req, &desc, &desc_table, &desc_table_size);
if (rc != 0 || desc->len < sizeof(*desc_ev)) {
SPDK_ERRLOG("Controller %s: Invalid eventq descriptor at index %"PRIu16".\n",
svdev->vdev.name, req);
goto out;
}
desc_ev = spdk_vhost_gpa_to_vva(&svdev->vdev, desc->addr, sizeof(*desc_ev));
desc_ev = spdk_vhost_gpa_to_vva(vsession, desc->addr, sizeof(*desc_ev));
if (desc_ev == NULL) {
SPDK_ERRLOG("Controller %s: Eventq descriptor at index %"PRIu16" points to unmapped guest memory address %p.\n",
svdev->vdev.name, req, (void *)(uintptr_t)desc->addr);
@ -215,13 +217,15 @@ eventq_enqueue(struct spdk_vhost_scsi_dev *svdev, unsigned scsi_dev_num, uint32_
req_size = sizeof(*desc_ev);
out:
spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, req, req_size);
spdk_vhost_vq_used_ring_enqueue(vsession, vq, req, req_size);
}
static void
submit_completion(struct spdk_vhost_scsi_task *task)
{
spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx,
struct spdk_vhost_session *vsession = &task->svdev->vdev.session;
spdk_vhost_vq_used_ring_enqueue(vsession, task->vq, task->req_idx,
task->used_len);
SPDK_DEBUGLOG(SPDK_LOG_VHOST_SCSI, "Finished task (%p) req_idx=%d\n", task, task->req_idx);
@ -276,7 +280,9 @@ mgmt_task_submit(struct spdk_vhost_scsi_task *task, enum spdk_scsi_task_func fun
static void
invalid_request(struct spdk_vhost_scsi_task *task)
{
spdk_vhost_vq_used_ring_enqueue(&task->svdev->vdev, task->vq, task->req_idx,
struct spdk_vhost_session *vsession = &task->svdev->vdev.session;
spdk_vhost_vq_used_ring_enqueue(vsession, task->vq, task->req_idx,
task->used_len);
spdk_vhost_scsi_task_put(task);
@ -315,6 +321,7 @@ static void
process_ctrl_request(struct spdk_vhost_scsi_task *task)
{
struct spdk_vhost_dev *vdev = &task->svdev->vdev;
struct spdk_vhost_session *vsession = &vdev->session;
struct vring_desc *desc, *desc_table;
struct virtio_scsi_ctrl_tmf_req *ctrl_req;
struct virtio_scsi_ctrl_an_resp *an_resp;
@ -322,14 +329,15 @@ process_ctrl_request(struct spdk_vhost_scsi_task *task)
int rc;
spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_mgmt_cpl, spdk_vhost_scsi_task_free_cb);
rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_size);
rc = spdk_vhost_vq_get_desc(vsession, task->vq, task->req_idx, &desc, &desc_table,
&desc_table_size);
if (spdk_unlikely(rc != 0)) {
SPDK_ERRLOG("%s: Invalid controlq descriptor at index %d.\n",
vdev->name, task->req_idx);
goto out;
}
ctrl_req = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*ctrl_req));
ctrl_req = spdk_vhost_gpa_to_vva(vsession, desc->addr, sizeof(*ctrl_req));
if (ctrl_req == NULL) {
SPDK_ERRLOG("%s: Invalid task management request at index %d.\n",
vdev->name, task->req_idx);
@ -354,7 +362,7 @@ process_ctrl_request(struct spdk_vhost_scsi_task *task)
/* Process the TMF request */
switch (ctrl_req->type) {
case VIRTIO_SCSI_T_TMF:
task->tmf_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->tmf_resp));
task->tmf_resp = spdk_vhost_gpa_to_vva(vsession, desc->addr, sizeof(*task->tmf_resp));
if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_tmf_resp) || task->tmf_resp == NULL)) {
SPDK_ERRLOG("%s: TMF response descriptor at index %d points to invalid guest memory region\n",
vdev->name, task->req_idx);
@ -383,7 +391,7 @@ process_ctrl_request(struct spdk_vhost_scsi_task *task)
break;
case VIRTIO_SCSI_T_AN_QUERY:
case VIRTIO_SCSI_T_AN_SUBSCRIBE: {
an_resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*an_resp));
an_resp = spdk_vhost_gpa_to_vva(vsession, desc->addr, sizeof(*an_resp));
if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_ctrl_an_resp) || an_resp == NULL)) {
SPDK_WARNLOG("%s: Asynchronous response descriptor points to invalid guest memory region\n",
vdev->name);
@ -400,7 +408,7 @@ process_ctrl_request(struct spdk_vhost_scsi_task *task)
used_len = sizeof(struct virtio_scsi_ctrl_tmf_resp);
out:
spdk_vhost_vq_used_ring_enqueue(vdev, task->vq, task->req_idx, used_len);
spdk_vhost_vq_used_ring_enqueue(vsession, task->vq, task->req_idx, used_len);
spdk_vhost_scsi_task_put(task);
}
@ -415,6 +423,7 @@ task_data_setup(struct spdk_vhost_scsi_task *task,
struct virtio_scsi_cmd_req **req)
{
struct spdk_vhost_dev *vdev = &task->svdev->vdev;
struct spdk_vhost_session *vsession = &vdev->session;
struct vring_desc *desc, *desc_table;
struct iovec *iovs = task->iovs;
uint16_t iovcnt = 0;
@ -423,7 +432,7 @@ task_data_setup(struct spdk_vhost_scsi_task *task,
spdk_scsi_task_construct(&task->scsi, spdk_vhost_scsi_task_cpl, spdk_vhost_scsi_task_free_cb);
rc = spdk_vhost_vq_get_desc(vdev, task->vq, task->req_idx, &desc, &desc_table, &desc_table_len);
rc = spdk_vhost_vq_get_desc(vsession, task->vq, task->req_idx, &desc, &desc_table, &desc_table_len);
/* First descriptor must be readable */
if (spdk_unlikely(rc != 0 || spdk_vhost_vring_desc_is_wr(desc) ||
desc->len < sizeof(struct virtio_scsi_cmd_req))) {
@ -432,7 +441,7 @@ task_data_setup(struct spdk_vhost_scsi_task *task,
goto invalid_task;
}
*req = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(**req));
*req = spdk_vhost_gpa_to_vva(vsession, desc->addr, sizeof(**req));
if (spdk_unlikely(*req == NULL)) {
SPDK_WARNLOG("%s: Request descriptor at index %d points to invalid guest memory region\n",
vdev->name, task->req_idx);
@ -454,7 +463,7 @@ task_data_setup(struct spdk_vhost_scsi_task *task,
/*
* FROM_DEV (READ): [RD_req][WR_resp][WR_buf0]...[WR_bufN]
*/
task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->resp));
task->resp = spdk_vhost_gpa_to_vva(vsession, desc->addr, sizeof(*task->resp));
if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) {
SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n",
vdev->name, task->req_idx);
@ -489,7 +498,7 @@ task_data_setup(struct spdk_vhost_scsi_task *task,
goto invalid_task;
}
if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) {
if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vsession, iovs, &iovcnt, desc))) {
goto invalid_task;
}
len += desc->len;
@ -512,7 +521,7 @@ task_data_setup(struct spdk_vhost_scsi_task *task,
/* Process descriptors up to response. */
while (!spdk_vhost_vring_desc_is_wr(desc)) {
if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vdev, iovs, &iovcnt, desc))) {
if (spdk_unlikely(spdk_vhost_vring_desc_to_iov(vsession, iovs, &iovcnt, desc))) {
goto invalid_task;
}
len += desc->len;
@ -524,7 +533,7 @@ task_data_setup(struct spdk_vhost_scsi_task *task,
}
}
task->resp = spdk_vhost_gpa_to_vva(vdev, desc->addr, sizeof(*task->resp));
task->resp = spdk_vhost_gpa_to_vva(vsession, desc->addr, sizeof(*task->resp));
if (spdk_unlikely(desc->len < sizeof(struct virtio_scsi_cmd_resp) || task->resp == NULL)) {
SPDK_WARNLOG("%s: Response descriptor at index %d points to invalid guest memory region\n",
vdev->name, task->req_idx);
@ -577,6 +586,7 @@ process_request(struct spdk_vhost_scsi_task *task)
static void
process_controlq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq)
{
struct spdk_vhost_session *vsession = &svdev->vdev.session;
struct spdk_vhost_scsi_task *task;
uint16_t reqs[32];
uint16_t reqs_cnt, i;
@ -586,7 +596,7 @@ process_controlq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue
if (spdk_unlikely(reqs[i] >= vq->vring.size)) {
SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' exceeds virtqueue size (%"PRIu16")\n",
svdev->vdev.name, reqs[i], vq->vring.size);
spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
spdk_vhost_vq_used_ring_enqueue(vsession, vq, reqs[i], 0);
continue;
}
@ -594,11 +604,11 @@ process_controlq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue
if (spdk_unlikely(task->used)) {
SPDK_ERRLOG("%s: invalid entry in avail ring. Buffer '%"PRIu16"' is still in use!\n",
svdev->vdev.name, reqs[i]);
spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
spdk_vhost_vq_used_ring_enqueue(vsession, vq, reqs[i], 0);
continue;
}
svdev->vdev.task_cnt++;
vsession->task_cnt++;
memset(&task->scsi, 0, sizeof(task->scsi));
task->tmf_resp = NULL;
task->used = true;
@ -609,6 +619,7 @@ process_controlq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue
static void
process_requestq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue *vq)
{
struct spdk_vhost_session *vsession = &svdev->vdev.session;
struct spdk_vhost_scsi_task *task;
uint16_t reqs[32];
uint16_t reqs_cnt, i;
@ -624,7 +635,7 @@ process_requestq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue
if (spdk_unlikely(reqs[i] >= vq->vring.size)) {
SPDK_ERRLOG("%s: request idx '%"PRIu16"' exceeds virtqueue size (%"PRIu16").\n",
svdev->vdev.name, reqs[i], vq->vring.size);
spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
spdk_vhost_vq_used_ring_enqueue(vsession, vq, reqs[i], 0);
continue;
}
@ -632,11 +643,11 @@ process_requestq(struct spdk_vhost_scsi_dev *svdev, struct spdk_vhost_virtqueue
if (spdk_unlikely(task->used)) {
SPDK_ERRLOG("%s: request with idx '%"PRIu16"' is already pending.\n",
svdev->vdev.name, reqs[i]);
spdk_vhost_vq_used_ring_enqueue(&svdev->vdev, vq, reqs[i], 0);
spdk_vhost_vq_used_ring_enqueue(vsession, vq, reqs[i], 0);
continue;
}
svdev->vdev.task_cnt++;
vsession->task_cnt++;
memset(&task->scsi, 0, sizeof(task->scsi));
task->resp = NULL;
task->used = true;
@ -662,12 +673,13 @@ static int
vdev_mgmt_worker(void *arg)
{
struct spdk_vhost_scsi_dev *svdev = arg;
struct spdk_vhost_session *vsession = &svdev->vdev.session;
process_removed_devs(svdev);
spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_EVENTQ]);
spdk_vhost_vq_used_signal(vsession, &vsession->virtqueue[VIRTIO_SCSI_EVENTQ]);
process_controlq(svdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]);
spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[VIRTIO_SCSI_CONTROLQ]);
process_controlq(svdev, &vsession->virtqueue[VIRTIO_SCSI_CONTROLQ]);
spdk_vhost_vq_used_signal(vsession, &vsession->virtqueue[VIRTIO_SCSI_CONTROLQ]);
return -1;
}
@ -676,13 +688,14 @@ static int
vdev_worker(void *arg)
{
struct spdk_vhost_scsi_dev *svdev = arg;
struct spdk_vhost_session *vsession = &svdev->vdev.session;
uint32_t q_idx;
for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < svdev->vdev.max_queues; q_idx++) {
process_requestq(svdev, &svdev->vdev.virtqueue[q_idx]);
for (q_idx = VIRTIO_SCSI_REQUESTQ; q_idx < vsession->max_queues; q_idx++) {
process_requestq(svdev, &vsession->virtqueue[q_idx]);
}
spdk_vhost_dev_used_signal(&svdev->vdev);
spdk_vhost_session_used_signal(vsession);
return -1;
}
@ -780,7 +793,7 @@ spdk_vhost_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg)
assert(lun != NULL);
assert(svdev != NULL);
if (svdev->vdev.lcore != -1 &&
!spdk_vhost_dev_has_feature(&svdev->vdev, VIRTIO_SCSI_F_HOTPLUG)) {
!spdk_vhost_dev_has_feature(&svdev->vdev.session, VIRTIO_SCSI_F_HOTPLUG)) {
SPDK_WARNLOG("%s: hotremove is not enabled for this controller.\n", svdev->vdev.name);
return;
}
@ -860,7 +873,7 @@ spdk_vhost_scsi_dev_add_tgt(struct spdk_vhost_dev *vdev, unsigned scsi_tgt_num,
spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[scsi_tgt_num]);
if (spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
if (spdk_vhost_dev_has_feature(&vdev->session, VIRTIO_SCSI_F_HOTPLUG)) {
eventq_enqueue(svdev, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET,
VIRTIO_SCSI_EVT_RESET_RESCAN);
} else {
@ -909,7 +922,7 @@ spdk_vhost_scsi_dev_remove_tgt(struct spdk_vhost_dev *vdev, unsigned scsi_tgt_nu
return rc;
}
if (!spdk_vhost_dev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
if (!spdk_vhost_dev_has_feature(&vdev->session, VIRTIO_SCSI_F_HOTPLUG)) {
SPDK_WARNLOG("%s: 'Target %u' is in use and hot-detach is not enabled for this controller.\n",
svdev->vdev.name, scsi_tgt_num);
return -ENOTSUP;
@ -1002,11 +1015,12 @@ spdk_vhost_scsi_controller_construct(void)
static void
free_task_pool(struct spdk_vhost_scsi_dev *svdev)
{
struct spdk_vhost_session *vsession = &svdev->vdev.session;
struct spdk_vhost_virtqueue *vq;
uint16_t i;
for (i = 0; i < svdev->vdev.max_queues; i++) {
vq = &svdev->vdev.virtqueue[i];
for (i = 0; i < vsession->max_queues; i++) {
vq = &vsession->virtqueue[i];
if (vq->tasks == NULL) {
continue;
}
@ -1019,14 +1033,15 @@ free_task_pool(struct spdk_vhost_scsi_dev *svdev)
static int
alloc_task_pool(struct spdk_vhost_scsi_dev *svdev)
{
struct spdk_vhost_session *vsession = &svdev->vdev.session;
struct spdk_vhost_virtqueue *vq;
struct spdk_vhost_scsi_task *task;
uint32_t task_cnt;
uint16_t i;
uint32_t j;
for (i = 0; i < svdev->vdev.max_queues; i++) {
vq = &svdev->vdev.virtqueue[i];
for (i = 0; i < vsession->max_queues; i++) {
vq = &vsession->virtqueue[i];
if (vq->vring.desc == NULL) {
continue;
}
@ -1067,6 +1082,7 @@ static int
spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx)
{
struct spdk_vhost_scsi_dev *svdev;
struct spdk_vhost_session *vsession = &vdev->session;
uint32_t i;
int rc;
@ -1078,8 +1094,8 @@ spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx)
}
/* validate all I/O queues are in a contiguous index range */
for (i = VIRTIO_SCSI_REQUESTQ; i < vdev->max_queues; i++) {
if (vdev->virtqueue[i].vring.desc == NULL) {
for (i = VIRTIO_SCSI_REQUESTQ; i < vsession->max_queues; i++) {
if (vsession->virtqueue[i].vring.desc == NULL) {
SPDK_ERRLOG("%s: queue %"PRIu32" is empty\n", vdev->name, i);
rc = -1;
goto out;
@ -1102,8 +1118,8 @@ spdk_vhost_scsi_start(struct spdk_vhost_dev *vdev, void *event_ctx)
vdev->name, vdev->lcore);
svdev->requestq_poller = spdk_poller_register(vdev_worker, svdev, 0);
if (vdev->virtqueue[VIRTIO_SCSI_CONTROLQ].vring.desc &&
vdev->virtqueue[VIRTIO_SCSI_EVENTQ].vring.desc) {
if (vsession->virtqueue[VIRTIO_SCSI_CONTROLQ].vring.desc &&
vsession->virtqueue[VIRTIO_SCSI_EVENTQ].vring.desc) {
svdev->mgmt_poller = spdk_poller_register(vdev_mgmt_worker, svdev,
MGMT_POLL_PERIOD_US);
}
@ -1116,15 +1132,16 @@ static int
destroy_device_poller_cb(void *arg)
{
struct spdk_vhost_scsi_dev *svdev = arg;
struct spdk_vhost_session *vsession = &svdev->vdev.session;
uint32_t i;
if (svdev->vdev.task_cnt > 0) {
if (vsession->task_cnt > 0) {
return -1;
}
for (i = 0; i < svdev->vdev.max_queues; i++) {
spdk_vhost_vq_used_signal(&svdev->vdev, &svdev->vdev.virtqueue[i]);
for (i = 0; i < vsession->max_queues; i++) {
spdk_vhost_vq_used_signal(vsession, &vsession->virtqueue[i]);
}
for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {

View File

@ -151,24 +151,28 @@ alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask
static void
start_vdev(struct spdk_vhost_dev *vdev)
{
struct rte_vhost_memory *mem;
mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
SPDK_CU_ASSERT_FATAL(mem != NULL);
mem->nregions = 2;
mem->regions[0].guest_phys_addr = 0;
mem->regions[0].size = 0x400000; /* 4 MB */
mem->regions[0].host_user_addr = 0x1000000;
mem->regions[1].guest_phys_addr = 0x400000;
mem->regions[1].size = 0x400000; /* 4 MB */
mem->regions[1].host_user_addr = 0x2000000;
vdev->vid = 0;
vdev->lcore = 0;
vdev->mem = calloc(1, sizeof(*vdev->mem) + 2 * sizeof(struct rte_vhost_mem_region));
SPDK_CU_ASSERT_FATAL(vdev->mem != NULL);
vdev->mem->nregions = 2;
vdev->mem->regions[0].guest_phys_addr = 0;
vdev->mem->regions[0].size = 0x400000; /* 4 MB */
vdev->mem->regions[0].host_user_addr = 0x1000000;
vdev->mem->regions[1].guest_phys_addr = 0x400000;
vdev->mem->regions[1].size = 0x400000; /* 4 MB */
vdev->mem->regions[1].host_user_addr = 0x2000000;
vdev->session.mem = mem;
}
static void
stop_vdev(struct spdk_vhost_dev *vdev)
{
free(vdev->mem);
vdev->mem = NULL;
free(vdev->session.mem);
vdev->session.mem = NULL;
vdev->vid = -1;
}
@ -184,6 +188,7 @@ static void
desc_to_iov_test(void)
{
struct spdk_vhost_dev *vdev;
struct spdk_vhost_session *vsession;
struct iovec iov[SPDK_VHOST_IOVS_MAX];
uint16_t iov_index;
struct vring_desc desc;
@ -193,11 +198,13 @@ desc_to_iov_test(void)
SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
start_vdev(vdev);
vsession = &vdev->session;
/* Test simple case where iov falls fully within a 2MB page. */
desc.addr = 0x110000;
desc.len = 0x1000;
iov_index = 0;
rc = spdk_vhost_vring_desc_to_iov(vdev, iov, &iov_index, &desc);
rc = spdk_vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
CU_ASSERT(rc == 0);
CU_ASSERT(iov_index == 1);
CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
@ -210,7 +217,7 @@ desc_to_iov_test(void)
/* Same test, but ensure it respects the non-zero starting iov_index. */
iov_index = SPDK_VHOST_IOVS_MAX - 1;
rc = spdk_vhost_vring_desc_to_iov(vdev, iov, &iov_index, &desc);
rc = spdk_vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
CU_ASSERT(rc == 0);
CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
@ -219,7 +226,7 @@ desc_to_iov_test(void)
/* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
iov_index = SPDK_VHOST_IOVS_MAX;
rc = spdk_vhost_vring_desc_to_iov(vdev, iov, &iov_index, &desc);
rc = spdk_vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
CU_ASSERT(rc != 0);
memset(iov, 0, sizeof(iov));
@ -227,7 +234,7 @@ desc_to_iov_test(void)
desc.addr = 0x1F0000;
desc.len = 0x20000;
iov_index = 0;
rc = spdk_vhost_vring_desc_to_iov(vdev, iov, &iov_index, &desc);
rc = spdk_vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
CU_ASSERT(rc == 0);
CU_ASSERT(iov_index == 1);
CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
@ -236,7 +243,7 @@ desc_to_iov_test(void)
/* Same test, but ensure it respects the non-zero starting iov_index. */
iov_index = SPDK_VHOST_IOVS_MAX - 1;
rc = spdk_vhost_vring_desc_to_iov(vdev, iov, &iov_index, &desc);
rc = spdk_vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
CU_ASSERT(rc == 0);
CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
@ -247,7 +254,7 @@ desc_to_iov_test(void)
desc.addr = 0x3F0000;
desc.len = 0x20000;
iov_index = 0;
rc = spdk_vhost_vring_desc_to_iov(vdev, iov, &iov_index, &desc);
rc = spdk_vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
CU_ASSERT(rc == 0);
CU_ASSERT(iov_index == 2);
CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);