module/vfu_device: add virtio-blk emulation
Here we use vfu-tgt library and emulate a virtio-blk device as the first use case of vfu-tgt library. Usage example with QEMU: 1. build/bin/spdk_tgt 2. scripts/rpc.py bdev_malloc_create -b malloc0 $((512)) 512 3. scripts/rpc.py vfu_virtio_create_blk_endpoint vfu.0 --bdev-name malloc0 \ --cpumask=0x1 --num-queues=2 \ --qsize=256 --packed-ring 4. Start QEMU with '-device vfio-user-pci,socket=/spdk/vfu.0' Change-Id: I45e45360c669584583b0b8a3f83250ab6c48efec Signed-off-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12315 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Community-CI: Mellanox Build Bot
This commit is contained in:
parent
da231290b2
commit
23ef63882c
@ -8010,6 +8010,85 @@ Example response:
|
||||
}
|
||||
~~~
|
||||
|
||||
### vfu_virtio_delete_endpoint {#rpc_vfu_virtio_delete_endpoint}
|
||||
|
||||
Delete PCI device via endpoint name.
|
||||
|
||||
#### Parameters
|
||||
|
||||
Name | Optional | Type | Description
|
||||
----------------------- | -------- | ----------- | -----------
|
||||
name | Required | string | Endpoint name
|
||||
|
||||
#### Example
|
||||
|
||||
Example request:
|
||||
|
||||
~~~json
|
||||
{
|
||||
"params": {
|
||||
"name": "vfu.0"
|
||||
},
|
||||
"jsonrpc": "2.0",
|
||||
"method": "vfu_virtio_delete_endpoint",
|
||||
"id": 1
|
||||
}
|
||||
~~~
|
||||
|
||||
Example response:
|
||||
|
||||
~~~json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": true
|
||||
}
|
||||
~~~
|
||||
|
||||
### vfu_virtio_create_blk_endpoint {#rpc_vfu_virtio_create_blk_endpoint}
|
||||
|
||||
Create vfio-user virtio-blk PCI endpoint.
|
||||
|
||||
#### Parameters
|
||||
|
||||
Name | Optional | Type | Description
|
||||
----------------------- | -------- | ----------- | -----------
|
||||
name | Required | string | Endpoint name
|
||||
bdev_name | Required | string | Block device name
|
||||
cpumask | Optional | string | CPU masks
|
||||
num_queues | Optional | number | Number of queues
|
||||
qsize | Optional | number | Queue size
|
||||
packed_ring | Optional | boolean | Enable packed ring
|
||||
|
||||
#### Example
|
||||
|
||||
Example request:
|
||||
|
||||
~~~json
|
||||
{
|
||||
"params": {
|
||||
"name": "vfu.0",
|
||||
"bdev_name": "Malloc0",
|
||||
"cpumask": "0x2",
|
||||
"num_queues": 4,
|
||||
"qsize": 256
|
||||
},
|
||||
"jsonrpc": "2.0",
|
||||
"method": "vfu_virtio_create_blk_endpoint",
|
||||
"id": 1
|
||||
}
|
||||
~~~
|
||||
|
||||
Example response:
|
||||
|
||||
~~~json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": true
|
||||
}
|
||||
~~~
|
||||
|
||||
## Vhost Target {#jsonrpc_components_vhost_tgt}
|
||||
|
||||
The following common preconditions need to be met in all target types.
|
||||
|
@ -160,3 +160,9 @@ DEPDIRS-event_vhost_blk := init vhost
|
||||
DEPDIRS-event_vhost_scsi := init vhost event_scheduler event_scsi
|
||||
DEPDIRS-event_sock := init sock
|
||||
DEPDIRS-event_vfu_tgt := init vfu_tgt
|
||||
|
||||
# module/vfu_device
|
||||
|
||||
ifeq ($(CONFIG_VFIO_USER),y)
|
||||
DEPDIRS-vfu_device := $(BDEV_DEPS_THREAD) vfu_tgt
|
||||
endif
|
||||
|
@ -107,7 +107,12 @@ ifeq (y,$(DPDK_POWER))
|
||||
SCHEDULER_MODULES_LIST += env_dpdk scheduler_dpdk_governor scheduler_gscheduler
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_VFIO_USER),y)
|
||||
VFU_DEVICE_MODULES_LIST = vfu_device
|
||||
endif
|
||||
|
||||
EVENT_BDEV_SUBSYSTEM = event_bdev event_accel event_vmd event_sock
|
||||
|
||||
ALL_MODULES_LIST = $(BLOCKDEV_MODULES_LIST) $(ACCEL_MODULES_LIST) $(SCHEDULER_MODULES_LIST) $(SOCK_MODULES_LIST)
|
||||
ALL_MODULES_LIST += $(VFU_DEVICE_MODULES_LIST)
|
||||
SYS_LIBS += $(BLOCKDEV_MODULES_PRIVATE_LIBS)
|
||||
|
@ -12,6 +12,10 @@ ifeq ($(SPDK_ROOT_DIR)/lib/env_dpdk,$(CONFIG_ENV))
|
||||
DIRS-y += env_dpdk
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_VFIO_USER), y)
|
||||
DIRS-y += vfu_device
|
||||
endif
|
||||
|
||||
DEPDIRS-blob :=
|
||||
DEPDIRS-accel :=
|
||||
DEPDIRS-env_dpdk :=
|
||||
@ -20,6 +24,7 @@ DEPDIRS-scheduler :=
|
||||
DEPDIRS-bdev := blob
|
||||
DEPDIRS-blobfs := blob
|
||||
DEPDIRS-event := bdev blob
|
||||
DEPDIRS-vfu_device :=
|
||||
|
||||
.PHONY: all clean $(DIRS-y)
|
||||
|
||||
|
17
module/vfu_device/Makefile
Normal file
17
module/vfu_device/Makefile
Normal file
@ -0,0 +1,17 @@
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
# Copyright (c) Intel Corporation.
|
||||
# All rights reserved.
|
||||
#
|
||||
|
||||
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
|
||||
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
|
||||
|
||||
SO_VER := 1
|
||||
SO_MINOR := 0
|
||||
|
||||
C_SRCS = vfu_virtio.c vfu_virtio_blk.c vfu_virtio_rpc.c
|
||||
LIBNAME = vfu_device
|
||||
|
||||
SPDK_MAP_FILE = $(SPDK_ROOT_DIR)/mk/spdk_blank.map
|
||||
|
||||
include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk
|
1777
module/vfu_device/vfu_virtio.c
Normal file
1777
module/vfu_device/vfu_virtio.c
Normal file
File diff suppressed because it is too large
Load Diff
615
module/vfu_device/vfu_virtio_blk.c
Normal file
615
module/vfu_device/vfu_virtio_blk.c
Normal file
@ -0,0 +1,615 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright (c) Intel Corporation.
|
||||
* All rights reserved.
|
||||
*/
|
||||
|
||||
/*
|
||||
* virtio-blk over vfio-user transport
|
||||
*/
|
||||
#include <linux/virtio_blk.h>
|
||||
|
||||
#include "spdk/env.h"
|
||||
#include "spdk/bdev.h"
|
||||
#include "spdk/bdev_module.h"
|
||||
#include "spdk/stdinc.h"
|
||||
#include "spdk/assert.h"
|
||||
#include "spdk/barrier.h"
|
||||
#include "spdk/thread.h"
|
||||
#include "spdk/memory.h"
|
||||
#include "spdk/util.h"
|
||||
#include "spdk/log.h"
|
||||
#include "spdk/string.h"
|
||||
#include "spdk/likely.h"
|
||||
#include "spdk/pci_ids.h"
|
||||
|
||||
#include "vfu_virtio_internal.h"
|
||||
|
||||
#define VIRTIO_BLK_SUPPORTED_FEATURES ((1ULL << VIRTIO_BLK_F_SIZE_MAX) | \
|
||||
(1ULL << VIRTIO_BLK_F_SEG_MAX) | \
|
||||
(1ULL << VIRTIO_BLK_F_TOPOLOGY) | \
|
||||
(1ULL << VIRTIO_BLK_F_BLK_SIZE) | \
|
||||
(1ULL << VIRTIO_BLK_F_MQ))
|
||||
|
||||
struct virtio_blk_endpoint {
|
||||
struct vfu_virtio_endpoint virtio;
|
||||
|
||||
/* virtio_blk specific configurations */
|
||||
struct spdk_thread *init_thread;
|
||||
struct spdk_bdev *bdev;
|
||||
struct spdk_bdev_desc *bdev_desc;
|
||||
struct spdk_io_channel *io_channel;
|
||||
struct virtio_blk_config blk_cfg;
|
||||
|
||||
/* virtio_blk ring process poller */
|
||||
struct spdk_poller *ring_poller;
|
||||
};
|
||||
|
||||
struct virtio_blk_req {
|
||||
volatile uint8_t *status;
|
||||
struct virtio_blk_endpoint *endpoint;
|
||||
/* KEEP req at last */
|
||||
struct vfu_virtio_req req;
|
||||
};
|
||||
|
||||
static inline struct virtio_blk_endpoint *
|
||||
to_blk_endpoint(struct vfu_virtio_endpoint *virtio_endpoint)
|
||||
{
|
||||
return SPDK_CONTAINEROF(virtio_endpoint, struct virtio_blk_endpoint, virtio);
|
||||
}
|
||||
|
||||
static inline struct virtio_blk_req *
|
||||
to_blk_request(struct vfu_virtio_req *request)
|
||||
{
|
||||
return SPDK_CONTAINEROF(request, struct virtio_blk_req, req);
|
||||
}
|
||||
|
||||
static int
|
||||
vfu_virtio_blk_vring_poll(void *ctx)
|
||||
{
|
||||
struct virtio_blk_endpoint *blk_endpoint = ctx;
|
||||
struct vfu_virtio_dev *dev = blk_endpoint->virtio.dev;
|
||||
struct vfu_virtio_vq *vq;
|
||||
uint32_t i, count = 0;
|
||||
|
||||
if (spdk_unlikely(!virtio_dev_is_started(dev))) {
|
||||
return SPDK_POLLER_IDLE;
|
||||
}
|
||||
|
||||
if (spdk_unlikely(blk_endpoint->virtio.quiesce_in_progress)) {
|
||||
return SPDK_POLLER_IDLE;
|
||||
}
|
||||
|
||||
for (i = 0; i < dev->num_queues; i++) {
|
||||
vq = &dev->vqs[i];
|
||||
if (!vq->enabled || vq->q_state != VFU_VQ_ACTIVE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
vfu_virtio_vq_flush_irq(dev, vq);
|
||||
|
||||
if (vq->packed.packed_ring) {
|
||||
/* packed vring */
|
||||
count += vfu_virito_dev_process_packed_ring(dev, vq);
|
||||
} else {
|
||||
/* split vring */
|
||||
count += vfu_virito_dev_process_split_ring(dev, vq);
|
||||
}
|
||||
}
|
||||
|
||||
return count ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_blk_start(struct vfu_virtio_endpoint *virtio_endpoint)
|
||||
{
|
||||
struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
|
||||
|
||||
if (blk_endpoint->ring_poller) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
SPDK_DEBUGLOG(vfu_virtio_blk, "starting %s\n", virtio_endpoint->dev->name);
|
||||
blk_endpoint->io_channel = spdk_bdev_get_io_channel(blk_endpoint->bdev_desc);
|
||||
blk_endpoint->ring_poller = SPDK_POLLER_REGISTER(vfu_virtio_blk_vring_poll, blk_endpoint, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
_virtio_blk_stop_msg(void *ctx)
|
||||
{
|
||||
struct virtio_blk_endpoint *blk_endpoint = ctx;
|
||||
|
||||
spdk_poller_unregister(&blk_endpoint->ring_poller);
|
||||
spdk_put_io_channel(blk_endpoint->io_channel);
|
||||
blk_endpoint->io_channel = NULL;
|
||||
|
||||
SPDK_DEBUGLOG(vfu_virtio_blk, "%s is stopped\n",
|
||||
spdk_vfu_get_endpoint_id(blk_endpoint->virtio.endpoint));
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_blk_stop(struct vfu_virtio_endpoint *virtio_endpoint)
|
||||
{
|
||||
struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
|
||||
|
||||
if (!blk_endpoint->io_channel) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
SPDK_DEBUGLOG(vfu_virtio_blk, "%s stopping\n", spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint));
|
||||
spdk_thread_send_msg(virtio_endpoint->thread, _virtio_blk_stop_msg, blk_endpoint);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_blk_req_finish(struct virtio_blk_req *blk_req, uint8_t status)
|
||||
{
|
||||
struct vfu_virtio_req *req = &blk_req->req;
|
||||
|
||||
if (spdk_likely(blk_req->status)) {
|
||||
*blk_req->status = status;
|
||||
blk_req->status = NULL;
|
||||
}
|
||||
|
||||
vfu_virtio_finish_req(req);
|
||||
}
|
||||
|
||||
static void
|
||||
blk_request_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
||||
{
|
||||
struct virtio_blk_req *blk_req = cb_arg;
|
||||
|
||||
SPDK_DEBUGLOG(vfu_virtio_blk, "IO done status %u\n", success);
|
||||
|
||||
spdk_bdev_free_io(bdev_io);
|
||||
virtio_blk_req_finish(blk_req, success ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_blk_process_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq,
|
||||
struct vfu_virtio_req *req)
|
||||
{
|
||||
struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
|
||||
struct virtio_blk_req *blk_req = to_blk_request(req);
|
||||
const struct virtio_blk_outhdr *hdr;
|
||||
struct virtio_blk_discard_write_zeroes *desc;
|
||||
struct iovec *iov;
|
||||
uint16_t iovcnt;
|
||||
uint64_t flush_bytes;
|
||||
uint32_t type;
|
||||
uint32_t payload_len;
|
||||
int ret;
|
||||
|
||||
blk_req->endpoint = blk_endpoint;
|
||||
|
||||
iov = &req->iovs[0];
|
||||
if (spdk_unlikely(iov->iov_len != sizeof(*hdr))) {
|
||||
SPDK_ERRLOG("Invalid virtio_blk header length");
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_UNSUPP);
|
||||
return -EINVAL;
|
||||
}
|
||||
hdr = iov->iov_base;
|
||||
|
||||
iov = &req->iovs[req->iovcnt - 1];
|
||||
if (spdk_unlikely(iov->iov_len != 1)) {
|
||||
SPDK_ERRLOG("Invalid virtio_blk response length");
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_UNSUPP);
|
||||
return -EINVAL;
|
||||
}
|
||||
blk_req->status = iov->iov_base;
|
||||
|
||||
payload_len = req->payload_size;
|
||||
payload_len -= sizeof(*hdr) + 1;
|
||||
iovcnt = req->iovcnt - 2;
|
||||
|
||||
type = hdr->type;
|
||||
/* Legacy type isn't supported */
|
||||
type &= ~VIRTIO_BLK_T_BARRIER;
|
||||
|
||||
SPDK_DEBUGLOG(vfu_virtio_blk, "%s: type %u, iovcnt %u, payload_len %u\n",
|
||||
spdk_vfu_get_endpoint_id(virtio_endpoint->endpoint),
|
||||
type, iovcnt, payload_len);
|
||||
|
||||
if (spdk_unlikely(blk_endpoint->bdev_desc == NULL)) {
|
||||
SPDK_ERRLOG("Bdev has been removed\n");
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case VIRTIO_BLK_T_IN:
|
||||
case VIRTIO_BLK_T_OUT:
|
||||
if (spdk_unlikely(payload_len == 0 || (payload_len & (512 - 1)) != 0)) {
|
||||
SPDK_ERRLOG("Invalid payload length %u\n", payload_len);
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_UNSUPP);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (type == VIRTIO_BLK_T_IN) {
|
||||
req->used_len = payload_len + 1;
|
||||
ret = spdk_bdev_readv(blk_endpoint->bdev_desc, blk_endpoint->io_channel,
|
||||
&req->iovs[1], iovcnt, hdr->sector * 512,
|
||||
payload_len, blk_request_complete_cb, blk_req);
|
||||
} else {
|
||||
req->used_len = 1;
|
||||
ret = spdk_bdev_writev(blk_endpoint->bdev_desc, blk_endpoint->io_channel,
|
||||
&req->iovs[1], iovcnt, hdr->sector * 512,
|
||||
payload_len, blk_request_complete_cb, blk_req);
|
||||
}
|
||||
if (ret) {
|
||||
SPDK_ERRLOG("R/W error\n");
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
case VIRTIO_BLK_T_DISCARD:
|
||||
desc = req->iovs[1].iov_base;
|
||||
if (payload_len != sizeof(*desc)) {
|
||||
SPDK_NOTICELOG("Invalid discard payload size: %u\n", payload_len);
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (desc->flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
|
||||
SPDK_ERRLOG("UNMAP flag is only used for WRITE ZEROES command\n");
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_UNSUPP);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = spdk_bdev_unmap(blk_endpoint->bdev_desc, blk_endpoint->io_channel,
|
||||
desc->sector * 512, desc->num_sectors * 512,
|
||||
blk_request_complete_cb, blk_req);
|
||||
if (ret) {
|
||||
SPDK_ERRLOG("UNMAP error\n");
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
case VIRTIO_BLK_T_WRITE_ZEROES:
|
||||
desc = req->iovs[1].iov_base;
|
||||
if (payload_len != sizeof(*desc)) {
|
||||
SPDK_NOTICELOG("Invalid write zeroes payload size: %u\n", payload_len);
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Unmap this range, SPDK doesn't support it, kernel will enable this flag by default
|
||||
* without checking unmap feature is negotiated or not, the flag isn't mandatory, so
|
||||
* just print a warning.
|
||||
*/
|
||||
if (desc->flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
|
||||
SPDK_WARNLOG("Ignore the unmap flag for WRITE ZEROES from %"PRIx64", len %"PRIx64"\n",
|
||||
(uint64_t)desc->sector * 512, (uint64_t)desc->num_sectors * 512);
|
||||
}
|
||||
|
||||
ret = spdk_bdev_write_zeroes(blk_endpoint->bdev_desc, blk_endpoint->io_channel,
|
||||
desc->sector * 512, desc->num_sectors * 512,
|
||||
blk_request_complete_cb, blk_req);
|
||||
if (ret) {
|
||||
SPDK_ERRLOG("WRITE ZEROES error\n");
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
case VIRTIO_BLK_T_FLUSH:
|
||||
flush_bytes = spdk_bdev_get_num_blocks(blk_endpoint->bdev) * spdk_bdev_get_block_size(
|
||||
blk_endpoint->bdev);
|
||||
if (hdr->sector != 0) {
|
||||
SPDK_NOTICELOG("sector must be zero for flush command\n");
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = spdk_bdev_flush(blk_endpoint->bdev_desc, blk_endpoint->io_channel,
|
||||
0, flush_bytes,
|
||||
blk_request_complete_cb, blk_req);
|
||||
if (ret) {
|
||||
SPDK_ERRLOG("FLUSH error\n");
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_IOERR);
|
||||
return ret;
|
||||
}
|
||||
break;
|
||||
case VIRTIO_BLK_T_GET_ID:
|
||||
if (!iovcnt || !payload_len) {
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_UNSUPP);
|
||||
return -EINVAL;
|
||||
}
|
||||
req->used_len = spdk_min((size_t)VIRTIO_BLK_ID_BYTES, req->iovs[1].iov_len);
|
||||
spdk_strcpy_pad(req->iovs[1].iov_base, spdk_bdev_get_name(blk_endpoint->bdev),
|
||||
req->used_len, ' ');
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_OK);
|
||||
break;
|
||||
default:
|
||||
virtio_blk_req_finish(blk_req, VIRTIO_BLK_S_UNSUPP);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_blk_update_config(struct virtio_blk_config *blk_cfg, struct spdk_bdev *bdev,
|
||||
uint16_t num_queues)
|
||||
{
|
||||
memset(blk_cfg, 0, sizeof(*blk_cfg));
|
||||
|
||||
if (!bdev) {
|
||||
return;
|
||||
}
|
||||
|
||||
blk_cfg->blk_size = spdk_bdev_get_block_size(bdev);
|
||||
blk_cfg->capacity = (blk_cfg->blk_size * spdk_bdev_get_num_blocks(bdev)) / 512;
|
||||
/* minimum I/O size in blocks */
|
||||
blk_cfg->min_io_size = 1;
|
||||
blk_cfg->num_queues = num_queues;
|
||||
|
||||
if (spdk_bdev_get_buf_align(bdev) > 1) {
|
||||
blk_cfg->size_max = SPDK_BDEV_LARGE_BUF_MAX_SIZE;
|
||||
blk_cfg->seg_max = spdk_min(VIRTIO_DEV_MAX_IOVS - 2 - 1, BDEV_IO_NUM_CHILD_IOV - 2 - 1);
|
||||
} else {
|
||||
blk_cfg->size_max = 131072;
|
||||
/* -2 for REQ and RESP and -1 for region boundary splitting */
|
||||
blk_cfg->seg_max = VIRTIO_DEV_MAX_IOVS - 2 - 1;
|
||||
}
|
||||
|
||||
if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) {
|
||||
/* 16MiB, expressed in 512 Bytes */
|
||||
blk_cfg->max_discard_sectors = 32768;
|
||||
blk_cfg->max_discard_seg = 1;
|
||||
blk_cfg->discard_sector_alignment = blk_cfg->blk_size / 512;
|
||||
}
|
||||
if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
|
||||
blk_cfg->max_write_zeroes_sectors = 32768;
|
||||
blk_cfg->max_write_zeroes_seg = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
_vfu_virtio_blk_bdev_close(void *arg1)
|
||||
{
|
||||
struct spdk_bdev_desc *bdev_desc = arg1;
|
||||
|
||||
spdk_bdev_close(bdev_desc);
|
||||
}
|
||||
|
||||
static void
|
||||
bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
|
||||
void *event_ctx)
|
||||
{
|
||||
struct virtio_blk_endpoint *blk_endpoint = event_ctx;
|
||||
|
||||
SPDK_DEBUGLOG(vfu_virtio_blk, "Bdev event: type %d, name %s\n", type, bdev->name);
|
||||
|
||||
switch (type) {
|
||||
case SPDK_BDEV_EVENT_REMOVE:
|
||||
SPDK_NOTICELOG("bdev name (%s) received event(SPDK_BDEV_EVENT_REMOVE)\n", bdev->name);
|
||||
virtio_blk_update_config(&blk_endpoint->blk_cfg, NULL, 0);
|
||||
|
||||
if (blk_endpoint->io_channel) {
|
||||
spdk_thread_send_msg(blk_endpoint->virtio.thread, _virtio_blk_stop_msg, blk_endpoint);
|
||||
}
|
||||
|
||||
if (blk_endpoint->bdev_desc) {
|
||||
spdk_thread_send_msg(blk_endpoint->init_thread, _vfu_virtio_blk_bdev_close,
|
||||
blk_endpoint->bdev_desc);
|
||||
blk_endpoint->bdev_desc = NULL;
|
||||
}
|
||||
break;
|
||||
case SPDK_BDEV_EVENT_RESIZE:
|
||||
SPDK_NOTICELOG("bdev name (%s) received event(SPDK_BDEV_EVENT_RESIZE)\n", bdev->name);
|
||||
virtio_blk_update_config(&blk_endpoint->blk_cfg, blk_endpoint->bdev,
|
||||
blk_endpoint->virtio.num_queues);
|
||||
vfu_virtio_notify_config(&blk_endpoint->virtio);
|
||||
break;
|
||||
default:
|
||||
SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
virtio_blk_get_supported_features(struct vfu_virtio_endpoint *virtio_endpoint)
|
||||
{
|
||||
struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
|
||||
uint64_t features;
|
||||
struct spdk_bdev *bdev;
|
||||
|
||||
features = VIRTIO_BLK_SUPPORTED_FEATURES | VIRTIO_HOST_SUPPORTED_FEATURES;
|
||||
|
||||
if (!virtio_endpoint->packed_ring) {
|
||||
features &= ~(1ULL << VIRTIO_F_RING_PACKED);
|
||||
}
|
||||
bdev = blk_endpoint->bdev;
|
||||
|
||||
if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) {
|
||||
features |= (1ULL << VIRTIO_BLK_F_DISCARD);
|
||||
}
|
||||
|
||||
if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) {
|
||||
features |= (1ULL << VIRTIO_BLK_F_WRITE_ZEROES);
|
||||
}
|
||||
|
||||
if (spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) {
|
||||
features |= (1ULL << VIRTIO_BLK_F_FLUSH);
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_blk_get_device_specific_config(struct vfu_virtio_endpoint *virtio_endpoint, char *buf,
|
||||
uint64_t offset, uint64_t count)
|
||||
{
|
||||
struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
|
||||
uint8_t *blk_cfg;
|
||||
uint64_t len;
|
||||
|
||||
if (offset >= sizeof(struct virtio_blk_config)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
len = spdk_min(sizeof(struct virtio_blk_config) - offset, count);
|
||||
|
||||
blk_cfg = (uint8_t *)&blk_endpoint->blk_cfg;
|
||||
memcpy(buf, blk_cfg + offset, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct vfu_virtio_req *
|
||||
virtio_blk_alloc_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq)
|
||||
{
|
||||
struct virtio_blk_req *blk_req;
|
||||
|
||||
blk_req = calloc(1, sizeof(*blk_req) + dma_sg_size() * (VIRTIO_DEV_MAX_IOVS + 1));
|
||||
if (!blk_req) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &blk_req->req;
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_blk_free_req(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq,
|
||||
struct vfu_virtio_req *req)
|
||||
{
|
||||
struct virtio_blk_req *blk_req = to_blk_request(req);
|
||||
|
||||
free(blk_req);
|
||||
}
|
||||
|
||||
struct vfu_virtio_ops virtio_blk_ops = {
|
||||
.get_device_features = virtio_blk_get_supported_features,
|
||||
.alloc_req = virtio_blk_alloc_req,
|
||||
.free_req = virtio_blk_free_req,
|
||||
.exec_request = virtio_blk_process_req,
|
||||
.get_config = virtio_blk_get_device_specific_config,
|
||||
.start_device = virtio_blk_start,
|
||||
.stop_device = virtio_blk_stop,
|
||||
};
|
||||
|
||||
int
|
||||
vfu_virtio_blk_add_bdev(const char *name, const char *bdev_name,
|
||||
uint16_t num_queues, uint16_t qsize, bool packed_ring)
|
||||
{
|
||||
struct spdk_vfu_endpoint *endpoint;
|
||||
struct vfu_virtio_endpoint *virtio_endpoint;
|
||||
struct virtio_blk_endpoint *blk_endpoint;
|
||||
int ret;
|
||||
|
||||
endpoint = spdk_vfu_get_endpoint_by_name(name);
|
||||
if (!endpoint) {
|
||||
SPDK_ERRLOG("Endpoint %s doesn't exist\n", name);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
|
||||
blk_endpoint = to_blk_endpoint(virtio_endpoint);
|
||||
|
||||
if (blk_endpoint->bdev_desc) {
|
||||
SPDK_ERRLOG("%s: block device already exists\n", spdk_vfu_get_endpoint_id(endpoint));
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
if (num_queues && (num_queues <= VIRTIO_DEV_MAX_VQS)) {
|
||||
blk_endpoint->virtio.num_queues = num_queues;
|
||||
}
|
||||
if (qsize && (qsize <= VIRTIO_VQ_MAX_SIZE)) {
|
||||
blk_endpoint->virtio.qsize = qsize;
|
||||
}
|
||||
blk_endpoint->virtio.packed_ring = packed_ring;
|
||||
|
||||
SPDK_DEBUGLOG(vfu_virtio_blk, "%s: add block device %s, num_queues %u, qsize %u, packed ring %s\n",
|
||||
spdk_vfu_get_endpoint_id(endpoint),
|
||||
bdev_name, blk_endpoint->virtio.num_queues, blk_endpoint->virtio.qsize,
|
||||
packed_ring ? "enabled" : "disabled");
|
||||
|
||||
ret = spdk_bdev_open_ext(bdev_name, true, bdev_event_cb, blk_endpoint,
|
||||
&blk_endpoint->bdev_desc);
|
||||
if (ret != 0) {
|
||||
SPDK_ERRLOG("%s could not open bdev '%s', error=%d\n",
|
||||
name, bdev_name, ret);
|
||||
return ret;
|
||||
}
|
||||
blk_endpoint->bdev = spdk_bdev_desc_get_bdev(blk_endpoint->bdev_desc);
|
||||
virtio_blk_update_config(&blk_endpoint->blk_cfg, blk_endpoint->bdev,
|
||||
blk_endpoint->virtio.num_queues);
|
||||
blk_endpoint->init_thread = spdk_get_thread();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vfu_virtio_blk_endpoint_destruct(struct spdk_vfu_endpoint *endpoint)
|
||||
{
|
||||
struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
|
||||
struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
|
||||
|
||||
if (blk_endpoint->bdev_desc) {
|
||||
spdk_thread_send_msg(blk_endpoint->init_thread, _vfu_virtio_blk_bdev_close,
|
||||
blk_endpoint->bdev_desc);
|
||||
blk_endpoint->bdev_desc = NULL;
|
||||
}
|
||||
|
||||
vfu_virtio_endpoint_destruct(&blk_endpoint->virtio);
|
||||
free(blk_endpoint);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *
|
||||
vfu_virtio_blk_endpoint_init(struct spdk_vfu_endpoint *endpoint,
|
||||
char *basename, const char *endpoint_name)
|
||||
{
|
||||
struct virtio_blk_endpoint *blk_endpoint;
|
||||
int ret;
|
||||
|
||||
blk_endpoint = calloc(1, sizeof(*blk_endpoint));
|
||||
if (!blk_endpoint) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = vfu_virtio_endpoint_setup(&blk_endpoint->virtio, endpoint, basename, endpoint_name,
|
||||
&virtio_blk_ops);
|
||||
if (ret) {
|
||||
SPDK_ERRLOG("Error to setup endpoint %s\n", endpoint_name);
|
||||
free(blk_endpoint);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void *)&blk_endpoint->virtio;
|
||||
}
|
||||
|
||||
static int
|
||||
vfu_virtio_blk_get_device_info(struct spdk_vfu_endpoint *endpoint,
|
||||
struct spdk_vfu_pci_device *device_info)
|
||||
{
|
||||
struct vfu_virtio_endpoint *virtio_endpoint = spdk_vfu_get_endpoint_private(endpoint);
|
||||
struct virtio_blk_endpoint *blk_endpoint = to_blk_endpoint(virtio_endpoint);
|
||||
|
||||
vfu_virtio_get_device_info(&blk_endpoint->virtio, device_info);
|
||||
/* Fill Device ID */
|
||||
device_info->id.did = PCI_DEVICE_ID_VIRTIO_BLK_MODERN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct spdk_vfu_endpoint_ops vfu_virtio_blk_ops = {
|
||||
.name = "virtio_blk",
|
||||
.init = vfu_virtio_blk_endpoint_init,
|
||||
.get_device_info = vfu_virtio_blk_get_device_info,
|
||||
.get_vendor_capability = vfu_virtio_get_vendor_capability,
|
||||
.post_memory_add = vfu_virtio_post_memory_add,
|
||||
.pre_memory_remove = vfu_virtio_pre_memory_remove,
|
||||
.reset_device = vfu_virtio_pci_reset_cb,
|
||||
.quiesce_device = vfu_virtio_quiesce_cb,
|
||||
.destruct = vfu_virtio_blk_endpoint_destruct,
|
||||
.attach_device = vfu_virtio_attach_device,
|
||||
.detach_device = vfu_virtio_detach_device,
|
||||
};
|
||||
|
||||
static void
|
||||
__attribute__((constructor)) _vfu_virtio_blk_pci_model_register(void)
|
||||
{
|
||||
spdk_vfu_register_endpoint_ops(&vfu_virtio_blk_ops);
|
||||
}
|
||||
|
||||
SPDK_LOG_REGISTER_COMPONENT(vfu_virtio_blk)
|
403
module/vfu_device/vfu_virtio_internal.h
Normal file
403
module/vfu_device/vfu_virtio_internal.h
Normal file
@ -0,0 +1,403 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright (c) Intel Corporation.
|
||||
* All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _VFU_VIRTIO_INTERNAL_H
|
||||
#define _VFU_VIRTIO_INTERNAL_H
|
||||
|
||||
#include <linux/virtio_config.h>
|
||||
#include <linux/virtio_ring.h>
|
||||
#include <linux/virtio_pci.h>
|
||||
|
||||
#include "spdk/vfu_target.h"
|
||||
|
||||
#define VIRTIO_HOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_F_VERSION_1) | \
|
||||
(1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
|
||||
(1ULL << VIRTIO_F_RING_PACKED))
|
||||
|
||||
/* virtio device layout:
|
||||
*
|
||||
* region 1: MSI-X Table
|
||||
* region 2: MSI-X PBA
|
||||
* region 4: virtio modern memory 64bits BAR
|
||||
* Common configuration 0x0 - 0x1000
|
||||
* ISR access 0x1000 - 0x2000
|
||||
* Device specific configuration 0x2000 - 0x3000
|
||||
* Notifications 0x3000 - 0x4000
|
||||
*/
|
||||
#define VIRTIO_PCI_COMMON_CFG_OFFSET (0x0)
|
||||
#define VIRTIO_PCI_COMMON_CFG_LENGTH (0x1000)
|
||||
#define VIRTIO_PCI_ISR_ACCESS_OFFSET (VIRTIO_PCI_COMMON_CFG_OFFSET + VIRTIO_PCI_COMMON_CFG_LENGTH)
|
||||
#define VIRTIO_PCI_ISR_ACCESS_LENGTH (0x1000)
|
||||
#define VIRTIO_PCI_SPECIFIC_CFG_OFFSET (VIRTIO_PCI_ISR_ACCESS_OFFSET + VIRTIO_PCI_ISR_ACCESS_LENGTH)
|
||||
#define VIRTIO_PCI_SPECIFIC_CFG_LENGTH (0x1000)
|
||||
#define VIRTIO_PCI_NOTIFICATIONS_OFFSET (VIRTIO_PCI_SPECIFIC_CFG_OFFSET + VIRTIO_PCI_SPECIFIC_CFG_LENGTH)
|
||||
#define VIRTIO_PCI_NOTIFICATIONS_LENGTH (0x1000)
|
||||
|
||||
#define VIRTIO_PCI_BAR4_LENGTH (VIRTIO_PCI_NOTIFICATIONS_OFFSET + VIRTIO_PCI_NOTIFICATIONS_LENGTH)
|
||||
|
||||
#define VIRTIO_DEV_MAX_IOVS (129)
|
||||
/* Maximum number of requests which can be processed one time */
|
||||
#define VIRTIO_DEV_VRING_MAX_REQS (32)
|
||||
/* Maximum number of queues can be supported by virtio device */
|
||||
#define VIRTIO_DEV_MAX_VQS (64)
|
||||
/* Default queue size */
|
||||
#define VIRTIO_VQ_DEFAULT_SIZE (128)
|
||||
/* Maximum queue size */
|
||||
#define VIRTIO_VQ_MAX_SIZE (1024)
|
||||
|
||||
struct vfu_virtio_endpoint;
|
||||
struct vfu_virtio_req;
|
||||
|
||||
struct virtio_pci_cfg {
|
||||
/* Common PCI configuration */
|
||||
uint32_t guest_feat_lo;
|
||||
uint32_t guest_feat_hi;
|
||||
|
||||
/* Negotiated feature bits */
|
||||
uint64_t guest_features;
|
||||
|
||||
uint32_t host_feature_select;
|
||||
uint32_t guest_feature_select;
|
||||
|
||||
uint16_t msix_config;
|
||||
uint8_t device_status;
|
||||
uint8_t config_generation;
|
||||
uint16_t queue_select;
|
||||
|
||||
/* ISR access */
|
||||
uint8_t isr;
|
||||
};
|
||||
|
||||
enum vfu_vq_state {
|
||||
VFU_VQ_CREATED = 0,
|
||||
VFU_VQ_ACTIVE,
|
||||
VFU_VQ_INACTIVE,
|
||||
};
|
||||
|
||||
struct q_mapping {
|
||||
/* iov of local process mapping. */
|
||||
struct iovec iov;
|
||||
/* Stored sg, needed for unmap. */
|
||||
dma_sg_t *sg;
|
||||
/* physical address */
|
||||
uint64_t phys_addr;
|
||||
/* virtual address */
|
||||
union {
|
||||
void *addr;
|
||||
|
||||
struct vring_desc *desc;
|
||||
struct vring_packed_desc *desc_packed;
|
||||
|
||||
struct vring_avail *avail;
|
||||
struct vring_packed_desc_event *driver_event;
|
||||
|
||||
struct vring_used *used;
|
||||
struct vring_packed_desc_event *device_event;
|
||||
};
|
||||
/* size in bytes */
|
||||
uint64_t len;
|
||||
};
|
||||
|
||||
struct vfu_virtio_vq {
|
||||
/* Read Only */
|
||||
uint16_t id;
|
||||
uint16_t qsize;
|
||||
|
||||
bool enabled;
|
||||
uint16_t vector;
|
||||
|
||||
enum vfu_vq_state q_state;
|
||||
STAILQ_HEAD(, vfu_virtio_req) free_reqs;
|
||||
|
||||
uint32_t desc_lo;
|
||||
uint32_t desc_hi;
|
||||
uint32_t avail_lo;
|
||||
uint32_t avail_hi;
|
||||
uint32_t used_lo;
|
||||
uint32_t used_hi;
|
||||
|
||||
struct q_mapping avail;
|
||||
struct q_mapping used;
|
||||
struct q_mapping desc;
|
||||
|
||||
uint16_t last_avail_idx;
|
||||
uint16_t last_used_idx;
|
||||
|
||||
struct {
|
||||
/* To mark a descriptor as available in packed ring
|
||||
* Equal to avail_wrap_counter in spec.
|
||||
*/
|
||||
uint8_t avail_phase : 1;
|
||||
/* To mark a descriptor as used in packed ring
|
||||
* Equal to used_wrap_counter in spec.
|
||||
*/
|
||||
uint8_t used_phase : 1;
|
||||
uint8_t padding : 5;
|
||||
bool packed_ring : 1;
|
||||
} packed;
|
||||
|
||||
/* Request count from last event */
|
||||
uint16_t used_req_cnt;
|
||||
/* Next time when we need to send event */
|
||||
uint64_t next_event_time;
|
||||
};
|
||||
|
||||
struct vfu_virtio_dev {
|
||||
char name[SPDK_VFU_MAX_NAME_LEN];
|
||||
/* RO for Guest Driver */
|
||||
uint16_t num_queues;
|
||||
/* Supported feature bits by host driver, RO for Guest Driver */
|
||||
uint64_t host_features;
|
||||
|
||||
struct virtio_pci_cfg cfg;
|
||||
struct vfu_virtio_vq vqs[VIRTIO_DEV_MAX_VQS];
|
||||
|
||||
struct vfu_virtio_endpoint *virtio_endpoint;
|
||||
|
||||
/* VIRTIO_DEV_MAX_VQS * 3 worth of dma_sg_size() */
|
||||
uint8_t sg[];
|
||||
};
|
||||
|
||||
struct vfu_virtio_ops {
|
||||
uint64_t (*get_device_features)(struct vfu_virtio_endpoint *virtio_endpoint);
|
||||
struct vfu_virtio_req *(*alloc_req)(struct vfu_virtio_endpoint *virtio_endpoint,
|
||||
struct vfu_virtio_vq *vq);
|
||||
void (*free_req)(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq,
|
||||
struct vfu_virtio_req *req);
|
||||
int (*exec_request)(struct vfu_virtio_endpoint *virtio_endpoint, struct vfu_virtio_vq *vq,
|
||||
struct vfu_virtio_req *req);
|
||||
int (*get_config)(struct vfu_virtio_endpoint *virtio_endpoint, char *buf, uint64_t offset,
|
||||
uint64_t count);
|
||||
int (*set_config)(struct vfu_virtio_endpoint *virtio_endpoint, char *buf, uint64_t offset,
|
||||
uint64_t count);
|
||||
int (*start_device)(struct vfu_virtio_endpoint *virtio_endpoint);
|
||||
int (*stop_device)(struct vfu_virtio_endpoint *virtio_endpoint);
|
||||
};
|
||||
|
||||
struct vfu_virtio_endpoint {
|
||||
struct vfu_virtio_dev *dev;
|
||||
int devmem_fd;
|
||||
volatile uint32_t *doorbells;
|
||||
|
||||
uint16_t num_queues;
|
||||
uint16_t qsize;
|
||||
bool packed_ring;
|
||||
|
||||
uint32_t coalescing_delay_us;
|
||||
|
||||
struct spdk_vfu_endpoint *endpoint;
|
||||
struct spdk_thread *thread;
|
||||
|
||||
struct vfu_virtio_ops virtio_ops;
|
||||
|
||||
/* quiesce poller */
|
||||
uint32_t io_outstanding;
|
||||
bool quiesce_in_progress;
|
||||
struct spdk_poller *quiesce_poller;
|
||||
};
|
||||
|
||||
struct vfu_virtio_req {
|
||||
struct vfu_virtio_dev *dev;
|
||||
struct vfu_virtio_vq *vq;
|
||||
|
||||
STAILQ_ENTRY(vfu_virtio_req) link;
|
||||
|
||||
uint32_t payload_size;
|
||||
uint32_t used_len;
|
||||
|
||||
/* split vring */
|
||||
uint16_t req_idx;
|
||||
/* packed vring */
|
||||
uint16_t buffer_id;
|
||||
uint16_t num_descs;
|
||||
|
||||
uint16_t iovcnt;
|
||||
struct iovec iovs[VIRTIO_DEV_MAX_IOVS + 1];
|
||||
uint8_t desc_writeable[VIRTIO_DEV_MAX_IOVS + 1];
|
||||
|
||||
struct iovec *indirect_iov;
|
||||
dma_sg_t *indirect_sg;
|
||||
|
||||
/* VIRIO_DEV_MAX_IOVS + 1 worth of dma_sg_size() */
|
||||
uint8_t sg[];
|
||||
};
|
||||
|
||||
static inline bool
|
||||
virtio_guest_has_feature(struct vfu_virtio_dev *dev, uint32_t feature_bit)
|
||||
{
|
||||
assert(feature_bit <= 64);
|
||||
|
||||
return !!(dev->cfg.guest_features & (1ULL << feature_bit));
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
virtio_queue_desc_size(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
|
||||
{
|
||||
return sizeof(struct vring_desc) * vq->qsize;
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
virtio_queue_avail_size(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
|
||||
{
|
||||
uint16_t event_size;
|
||||
|
||||
if (virtio_guest_has_feature(dev, VIRTIO_F_RING_PACKED)) {
|
||||
return sizeof(struct vring_packed_desc_event);
|
||||
}
|
||||
|
||||
event_size = virtio_guest_has_feature(dev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
|
||||
return (sizeof(struct vring_avail) + sizeof(uint16_t) * vq->qsize
|
||||
+ event_size);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
virtio_queue_used_size(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
|
||||
{
|
||||
uint16_t event_size;
|
||||
|
||||
if (virtio_guest_has_feature(dev, VIRTIO_F_RING_PACKED)) {
|
||||
return sizeof(struct vring_packed_desc_event);
|
||||
}
|
||||
|
||||
event_size = virtio_guest_has_feature(dev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
|
||||
return (sizeof(struct vring_used) + sizeof(struct vring_used_elem) * vq->qsize
|
||||
+ event_size);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
virtio_queue_event_is_suppressed(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq)
|
||||
{
|
||||
bool is_suppressed = false;
|
||||
|
||||
if (virtio_guest_has_feature(dev, VIRTIO_F_RING_PACKED)) {
|
||||
is_suppressed = vq->avail.driver_event->flags & VRING_PACKED_EVENT_FLAG_DISABLE;
|
||||
} else {
|
||||
is_suppressed = vq->avail.avail->flags & VRING_AVAIL_F_NO_INTERRUPT;
|
||||
|
||||
}
|
||||
|
||||
return is_suppressed;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
virtio_dev_is_started(struct vfu_virtio_dev *dev)
|
||||
{
|
||||
return !!(dev->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
virtio_vring_split_desc_is_indirect(struct vring_desc *desc)
|
||||
{
|
||||
return !!(desc->flags & VRING_DESC_F_INDIRECT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
virtio_vring_packed_desc_is_indirect(struct vring_packed_desc *desc)
|
||||
{
|
||||
return !!(desc->flags & VRING_DESC_F_INDIRECT);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
virtio_vring_split_desc_is_wr(struct vring_desc *desc)
|
||||
{
|
||||
return !!(desc->flags & VRING_DESC_F_WRITE);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
virtio_vring_packed_desc_is_wr(struct vring_packed_desc *desc)
|
||||
{
|
||||
return !!(desc->flags & VRING_DESC_F_WRITE);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
virtio_vring_packed_is_avail(struct vring_packed_desc *desc, bool avail_phase)
|
||||
{
|
||||
bool avail_flag, used_flag;
|
||||
uint16_t flags = desc->flags;
|
||||
|
||||
avail_flag = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
|
||||
used_flag = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
|
||||
|
||||
/* To mark a desc as available, the driver sets the F_AVAIL bit in flags
|
||||
* to match the internal avail wrap counter. It also sets the F_USED bit to
|
||||
* match the inverse value but it's not mandatory.
|
||||
*/
|
||||
return (avail_flag != used_flag) && (avail_flag == avail_phase);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
virtio_vring_packed_is_used(struct vring_packed_desc *desc, bool used_phase)
|
||||
{
|
||||
bool avail_flag, used_flag;
|
||||
uint16_t flags = desc->flags;
|
||||
|
||||
avail_flag = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
|
||||
used_flag = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
|
||||
|
||||
/* When the descriptor is used, two flags in descriptor
|
||||
* avail flag and used flag are set to equal
|
||||
* and used flag value == used_wrap_counter.
|
||||
*/
|
||||
return (used_flag == avail_flag) && (used_flag == used_phase);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
virtio_req_iov_is_wr(struct vfu_virtio_req *req, uint32_t iov_num)
|
||||
{
|
||||
assert(iov_num <= VIRTIO_DEV_MAX_IOVS);
|
||||
return req->desc_writeable[iov_num];
|
||||
}
|
||||
|
||||
static inline struct vfu_virtio_req *
|
||||
vfu_virtio_vq_alloc_req(struct vfu_virtio_endpoint *endpoint, struct vfu_virtio_vq *vq)
|
||||
{
|
||||
assert(endpoint->virtio_ops.alloc_req != NULL);
|
||||
return endpoint->virtio_ops.alloc_req(endpoint, vq);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vfu_virtio_vq_free_req(struct vfu_virtio_endpoint *endpoint, struct vfu_virtio_vq *vq,
|
||||
struct vfu_virtio_req *req)
|
||||
{
|
||||
assert(endpoint->virtio_ops.free_req);
|
||||
endpoint->virtio_ops.free_req(endpoint, vq, req);
|
||||
}
|
||||
|
||||
void virtio_vq_used_ring_split_enqueue(struct vfu_virtio_vq *vq, uint16_t req_idx,
|
||||
uint32_t used_len);
|
||||
void virtio_vq_used_ring_packed_enqueue(struct vfu_virtio_vq *vq, uint16_t buffer_id,
|
||||
uint32_t num_descs, uint32_t used_len);
|
||||
struct vfu_virtio_req *virito_dev_packed_ring_get_next_avail_req(struct vfu_virtio_dev *dev,
|
||||
struct vfu_virtio_vq *vq);
|
||||
struct vfu_virtio_req *virito_dev_split_ring_get_next_avail_req(struct vfu_virtio_dev *dev,
|
||||
struct vfu_virtio_vq *vq);
|
||||
|
||||
int vfu_virtio_quiesce_cb(struct spdk_vfu_endpoint *endpoint);
|
||||
|
||||
void vfu_virtio_dev_put_req(struct vfu_virtio_req *req);
|
||||
void vfu_virtio_finish_req(struct vfu_virtio_req *req);
|
||||
void vfu_virtio_vq_flush_irq(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq);
|
||||
int vfu_virito_dev_process_packed_ring(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq);
|
||||
int vfu_virito_dev_process_split_ring(struct vfu_virtio_dev *dev, struct vfu_virtio_vq *vq);
|
||||
void vfu_virtio_notify_config(struct vfu_virtio_endpoint *virtio_endpoint);
|
||||
int vfu_virtio_endpoint_setup(struct vfu_virtio_endpoint *virtio_endpoint,
|
||||
struct spdk_vfu_endpoint *endpoint,
|
||||
char *basename, const char *endpoint_name,
|
||||
struct vfu_virtio_ops *ops);
|
||||
int vfu_virtio_endpoint_destruct(struct vfu_virtio_endpoint *virtio_endpoint);
|
||||
void vfu_virtio_get_device_info(struct vfu_virtio_endpoint *virtio_endpoint,
|
||||
struct spdk_vfu_pci_device *device_info);
|
||||
int vfu_virtio_attach_device(struct spdk_vfu_endpoint *endpoint);
|
||||
int vfu_virtio_detach_device(struct spdk_vfu_endpoint *endpoint);
|
||||
uint16_t vfu_virtio_get_vendor_capability(struct spdk_vfu_endpoint *endpoint, char *buf,
|
||||
uint16_t buf_len, uint16_t idx);
|
||||
int vfu_virtio_post_memory_add(struct spdk_vfu_endpoint *endpoint, void *map_start, void *map_end);
|
||||
int vfu_virtio_pre_memory_remove(struct spdk_vfu_endpoint *endpoint, void *map_start,
|
||||
void *map_end);
|
||||
int vfu_virtio_pci_reset_cb(struct spdk_vfu_endpoint *endpoint);
|
||||
int vfu_virtio_blk_add_bdev(const char *name, const char *bdev_name,
|
||||
uint16_t num_queues, uint16_t qsize, bool packed_ring);
|
||||
|
||||
#endif
|
126
module/vfu_device/vfu_virtio_rpc.c
Normal file
126
module/vfu_device/vfu_virtio_rpc.c
Normal file
@ -0,0 +1,126 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright (c) Intel Corporation.
|
||||
* All rights reserved.
|
||||
*/
|
||||
|
||||
#include "spdk/bdev.h"
|
||||
#include "spdk/log.h"
|
||||
#include "spdk/rpc.h"
|
||||
#include "spdk/env.h"
|
||||
#include "spdk/string.h"
|
||||
#include "spdk/util.h"
|
||||
#include "spdk/thread.h"
|
||||
|
||||
#include "vfu_virtio_internal.h"
|
||||
|
||||
struct rpc_delete_vfu_endpoint {
|
||||
char *name;
|
||||
};
|
||||
|
||||
static const struct spdk_json_object_decoder rpc_delete_vfu_endpoint_decode[] = {
|
||||
{"name", offsetof(struct rpc_delete_vfu_endpoint, name), spdk_json_decode_string }
|
||||
};
|
||||
|
||||
static void
|
||||
free_rpc_delete_vfu_endpoint(struct rpc_delete_vfu_endpoint *req)
|
||||
{
|
||||
free(req->name);
|
||||
}
|
||||
|
||||
static void
|
||||
rpc_vfu_virtio_delete_endpoint(struct spdk_jsonrpc_request *request,
|
||||
const struct spdk_json_val *params)
|
||||
{
|
||||
struct rpc_delete_vfu_endpoint req = {0};
|
||||
int rc;
|
||||
|
||||
if (spdk_json_decode_object(params, rpc_delete_vfu_endpoint_decode,
|
||||
SPDK_COUNTOF(rpc_delete_vfu_endpoint_decode),
|
||||
&req)) {
|
||||
SPDK_ERRLOG("spdk_json_decode_object failed\n");
|
||||
rc = -EINVAL;
|
||||
goto invalid;
|
||||
}
|
||||
|
||||
rc = spdk_vfu_delete_endpoint(req.name);
|
||||
if (rc < 0) {
|
||||
goto invalid;
|
||||
}
|
||||
free_rpc_delete_vfu_endpoint(&req);
|
||||
|
||||
spdk_jsonrpc_send_bool_response(request, true);
|
||||
return;
|
||||
|
||||
invalid:
|
||||
free_rpc_delete_vfu_endpoint(&req);
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
|
||||
spdk_strerror(-rc));
|
||||
}
|
||||
SPDK_RPC_REGISTER("vfu_virtio_delete_endpoint", rpc_vfu_virtio_delete_endpoint,
|
||||
SPDK_RPC_RUNTIME)
|
||||
|
||||
struct rpc_vfu_virtio_create_blk {
|
||||
char *name;
|
||||
char *bdev_name;
|
||||
char *cpumask;
|
||||
uint16_t num_queues;
|
||||
uint16_t qsize;
|
||||
bool packed_ring;
|
||||
};
|
||||
|
||||
static const struct spdk_json_object_decoder rpc_construct_vfu_virtio_create_blk[] = {
|
||||
{"name", offsetof(struct rpc_vfu_virtio_create_blk, name), spdk_json_decode_string },
|
||||
{"bdev_name", offsetof(struct rpc_vfu_virtio_create_blk, bdev_name), spdk_json_decode_string },
|
||||
{"cpumask", offsetof(struct rpc_vfu_virtio_create_blk, cpumask), spdk_json_decode_string, true},
|
||||
{"num_queues", offsetof(struct rpc_vfu_virtio_create_blk, num_queues), spdk_json_decode_uint16, true },
|
||||
{"qsize", offsetof(struct rpc_vfu_virtio_create_blk, qsize), spdk_json_decode_uint16, true },
|
||||
{"packed_ring", offsetof(struct rpc_vfu_virtio_create_blk, packed_ring), spdk_json_decode_bool, true},
|
||||
};
|
||||
|
||||
static void
|
||||
free_rpc_vfu_virtio_create_blk(struct rpc_vfu_virtio_create_blk *req)
|
||||
{
|
||||
free(req->name);
|
||||
free(req->bdev_name);
|
||||
free(req->cpumask);
|
||||
}
|
||||
|
||||
static void
|
||||
rpc_vfu_virtio_create_blk_endpoint(struct spdk_jsonrpc_request *request,
|
||||
const struct spdk_json_val *params)
|
||||
{
|
||||
struct rpc_vfu_virtio_create_blk req = {0};
|
||||
int rc;
|
||||
|
||||
if (spdk_json_decode_object(params, rpc_construct_vfu_virtio_create_blk,
|
||||
SPDK_COUNTOF(rpc_construct_vfu_virtio_create_blk),
|
||||
&req)) {
|
||||
SPDK_ERRLOG("spdk_json_decode_object failed\n");
|
||||
rc = -EINVAL;
|
||||
goto invalid;
|
||||
}
|
||||
|
||||
rc = spdk_vfu_create_endpoint(req.name, req.cpumask, "virtio_blk");
|
||||
if (rc) {
|
||||
SPDK_ERRLOG("Failed to create virtio_blk endpoint\n");
|
||||
goto invalid;
|
||||
}
|
||||
|
||||
rc = vfu_virtio_blk_add_bdev(req.name, req.bdev_name, req.num_queues, req.qsize,
|
||||
req.packed_ring);
|
||||
if (rc < 0) {
|
||||
spdk_vfu_delete_endpoint(req.name);
|
||||
goto invalid;
|
||||
}
|
||||
free_rpc_vfu_virtio_create_blk(&req);
|
||||
|
||||
spdk_jsonrpc_send_bool_response(request, true);
|
||||
return;
|
||||
|
||||
invalid:
|
||||
free_rpc_vfu_virtio_create_blk(&req);
|
||||
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
|
||||
spdk_strerror(-rc));
|
||||
}
|
||||
SPDK_RPC_REGISTER("vfu_virtio_create_blk_endpoint", rpc_vfu_virtio_create_blk_endpoint,
|
||||
SPDK_RPC_RUNTIME)
|
@ -9,3 +9,43 @@ def vfu_tgt_set_base_path(client, path):
|
||||
}
|
||||
|
||||
return client.call('vfu_tgt_set_base_path', params)
|
||||
|
||||
|
||||
def vfu_virtio_delete_endpoint(client, name):
|
||||
"""Delete specified endpoint name.
|
||||
|
||||
Args:
|
||||
name: endpoint name
|
||||
"""
|
||||
params = {
|
||||
'name': name
|
||||
}
|
||||
|
||||
return client.call('vfu_virtio_delete_endpoint', params)
|
||||
|
||||
|
||||
def vfu_virtio_create_blk_endpoint(client, name, bdev_name, cpumask, num_queues, qsize, packed_ring):
|
||||
"""Create virtio-blk endpoint.
|
||||
|
||||
Args:
|
||||
name: endpoint name
|
||||
bdev_name: name of block device
|
||||
cpumask: CPU core mask
|
||||
num_queues: number of vrings
|
||||
qsize: number of element of each vring
|
||||
packed_ring: enable packed ring
|
||||
"""
|
||||
params = {
|
||||
'name': name,
|
||||
'bdev_name': bdev_name
|
||||
}
|
||||
if cpumask:
|
||||
params['cpumask'] = cpumask
|
||||
if num_queues:
|
||||
params['num_queues'] = num_queues
|
||||
if qsize:
|
||||
params['qsize'] = qsize
|
||||
if packed_ring:
|
||||
params['packed_ring'] = packed_ring
|
||||
|
||||
return client.call('vfu_virtio_create_blk_endpoint', params)
|
||||
|
@ -2624,6 +2624,31 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
||||
p.add_argument('path', help='socket base path')
|
||||
p.set_defaults(func=vfu_tgt_set_base_path)
|
||||
|
||||
def vfu_virtio_delete_endpoint(args):
|
||||
rpc.vfio_user.vfu_virtio_delete_endpoint(args.client, name=args.name)
|
||||
|
||||
p = subparsers.add_parser('vfu_virtio_delete_endpoint', help='Delete the PCI device via endpoint name.')
|
||||
p.add_argument('name', help='Endpoint name')
|
||||
p.set_defaults(func=vfu_virtio_delete_endpoint)
|
||||
|
||||
def vfu_virtio_create_blk_endpoint(args):
|
||||
rpc.vfio_user.vfu_virtio_create_blk_endpoint(args.client,
|
||||
name=args.name,
|
||||
bdev_name=args.bdev_name,
|
||||
cpumask=args.cpumask,
|
||||
num_queues=args.num_queues,
|
||||
qsize=args.qsize,
|
||||
packed_ring=args.packed_ring)
|
||||
|
||||
p = subparsers.add_parser('vfu_virtio_create_blk_endpoint', help='Create virtio-blk endpoint.')
|
||||
p.add_argument('name', help='Name of the endpoint')
|
||||
p.add_argument('--bdev-name', help='block device name', type=str, required=True)
|
||||
p.add_argument('--cpumask', help='CPU masks')
|
||||
p.add_argument('--num-queues', help='number of vrings', type=int, default=0)
|
||||
p.add_argument('--qsize', help='number of element for each vring', type=int, default=0)
|
||||
p.add_argument("--packed-ring", action='store_true', help='Enable packed ring')
|
||||
p.set_defaults(func=vfu_virtio_create_blk_endpoint)
|
||||
|
||||
# accel_fw
|
||||
def accel_get_opc_assignments(args):
|
||||
print_dict(rpc.accel.accel_get_opc_assignments(args.client))
|
||||
|
Loading…
Reference in New Issue
Block a user