From 2bebd09bd724016656fe69cdcee36fd0da532233 Mon Sep 17 00:00:00 2001 From: Changpeng Liu Date: Wed, 1 Aug 2018 21:55:43 -0400 Subject: [PATCH] vhost/nvme: remove VHOST_USER_NVME_IO_CMD socket message VHOST_USER_NVME_IO_CMD is designed to deliver NVMe IO command header to slave target via socket, this can be used in BIOS which will not enable Shadow Doorbell Buffer feature, since we enabled the shadow BAR feature to support some old Guest kernel without Shadow Doorbell Buffer feature, so the message isn't required, just remove it. Change-Id: I72e55f11176af2405c8cc09da404a9f4e5e71526 Signed-off-by: Changpeng Liu Reviewed-on: https://review.gerrithub.io/420821 Chandler-Test-Pool: SPDK Automated Test System Tested-by: SPDK CI Jenkins Reviewed-by: Jim Harris Reviewed-by: Darek Stojaczyk --- lib/vhost/rte_vhost/vhost_user.c | 17 ----------------- lib/vhost/rte_vhost/vhost_user.h | 12 ------------ 2 files changed, 29 deletions(-) diff --git a/lib/vhost/rte_vhost/vhost_user.c b/lib/vhost/rte_vhost/vhost_user.c index 0c6431b9b..d530b1386 100644 --- a/lib/vhost/rte_vhost/vhost_user.c +++ b/lib/vhost/rte_vhost/vhost_user.c @@ -84,7 +84,6 @@ static const char *vhost_message_str[VHOST_USER_MAX] = { [VHOST_USER_NVME_SET_CQ_CALL] = "VHOST_USER_NVME_SET_CQ_CALL", [VHOST_USER_NVME_GET_CAP] = "VHOST_USER_NVME_GET_CAP", [VHOST_USER_NVME_START_STOP] = "VHOST_USER_NVME_START_STOP", - [VHOST_USER_NVME_IO_CMD] = "VHOST_USER_NVME_IO_CMD", [VHOST_USER_NVME_SET_BAR_MR] = "VHOST_USER_NVME_SET_BAR_MR" }; @@ -1086,14 +1085,6 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg) return alloc_vring_queue(dev, vring_idx); } -static int -vhost_user_nvme_io_request_passthrough(struct virtio_net *dev, - uint16_t qid, uint16_t tail_head, - bool is_submission_queue) -{ - return -1; -} - static int vhost_user_nvme_admin_passthrough(struct virtio_net *dev, void *cmd, void *cqe, void *buf) @@ -1221,8 +1212,6 @@ vhost_user_msg_handler(int vid, int fd) uint8_t cqe[16]; uint8_t cmd[64]; uint8_t buf[4096]; - uint16_t qid, tail_head; - bool is_submission_queue; dev = get_device(vid); if (dev == NULL) @@ -1327,12 +1316,6 @@ vhost_user_msg_handler(int vid, int fd) } } break; - case VHOST_USER_NVME_IO_CMD: - qid = msg.payload.nvme_io.qid; - tail_head = msg.payload.nvme_io.tail_head; - is_submission_queue = (msg.payload.nvme_io.queue_type == VHOST_USER_NVME_SUBMISSION_QUEUE) ? true : false; - vhost_user_nvme_io_request_passthrough(dev, qid, tail_head, is_submission_queue); - break; case VHOST_USER_NVME_SET_BAR_MR: ret = vhost_user_nvme_set_bar_mr(dev, &msg); break; diff --git a/lib/vhost/rte_vhost/vhost_user.h b/lib/vhost/rte_vhost/vhost_user.h index 8d9d33de0..d20574b64 100644 --- a/lib/vhost/rte_vhost/vhost_user.h +++ b/lib/vhost/rte_vhost/vhost_user.h @@ -127,17 +127,6 @@ typedef struct VhostUserConfig { uint8_t region[VHOST_USER_MAX_CONFIG_SIZE]; } VhostUserConfig; -enum VhostUserNvmeQueueTypes { - VHOST_USER_NVME_SUBMISSION_QUEUE = 1, - VHOST_USER_NVME_COMPLETION_QUEUE = 2, -}; - -typedef struct VhostUserNvmeIO { - enum VhostUserNvmeQueueTypes queue_type; - uint32_t qid; - uint32_t tail_head; -} VhostUserNvmeIO; - typedef struct VhostUserMsg { VhostUserRequest request; @@ -162,7 +151,6 @@ typedef struct VhostUserMsg { } cmd; uint8_t buf[4096]; } nvme; - struct VhostUserNvmeIO nvme_io; } payload; int fds[VHOST_MEMORY_MAX_NREGIONS]; } __attribute((packed)) VhostUserMsg;