vhost/nvme: remove VHOST_USER_NVME_IO_CMD socket message

VHOST_USER_NVME_IO_CMD is designed to deliver NVMe IO command
header to slave target via socket, this can be used in BIOS
which will not enable Shadow Doorbell Buffer feature, since
we enabled the shadow BAR feature to support some old Guest
kernel without Shadow Doorbell Buffer feature, so the message
isn't required, just remove it.

Change-Id: I72e55f11176af2405c8cc09da404a9f4e5e71526
Signed-off-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.gerrithub.io/420821
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
This commit is contained in:
Changpeng Liu 2018-08-01 21:55:43 -04:00 committed by Darek Stojaczyk
parent fbc53ae3fb
commit 2bebd09bd7
2 changed files with 0 additions and 29 deletions

View File

@ -84,7 +84,6 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
[VHOST_USER_NVME_SET_CQ_CALL] = "VHOST_USER_NVME_SET_CQ_CALL", [VHOST_USER_NVME_SET_CQ_CALL] = "VHOST_USER_NVME_SET_CQ_CALL",
[VHOST_USER_NVME_GET_CAP] = "VHOST_USER_NVME_GET_CAP", [VHOST_USER_NVME_GET_CAP] = "VHOST_USER_NVME_GET_CAP",
[VHOST_USER_NVME_START_STOP] = "VHOST_USER_NVME_START_STOP", [VHOST_USER_NVME_START_STOP] = "VHOST_USER_NVME_START_STOP",
[VHOST_USER_NVME_IO_CMD] = "VHOST_USER_NVME_IO_CMD",
[VHOST_USER_NVME_SET_BAR_MR] = "VHOST_USER_NVME_SET_BAR_MR" [VHOST_USER_NVME_SET_BAR_MR] = "VHOST_USER_NVME_SET_BAR_MR"
}; };
@ -1086,14 +1085,6 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
return alloc_vring_queue(dev, vring_idx); return alloc_vring_queue(dev, vring_idx);
} }
static int
vhost_user_nvme_io_request_passthrough(struct virtio_net *dev,
uint16_t qid, uint16_t tail_head,
bool is_submission_queue)
{
return -1;
}
static int static int
vhost_user_nvme_admin_passthrough(struct virtio_net *dev, vhost_user_nvme_admin_passthrough(struct virtio_net *dev,
void *cmd, void *cqe, void *buf) void *cmd, void *cqe, void *buf)
@ -1221,8 +1212,6 @@ vhost_user_msg_handler(int vid, int fd)
uint8_t cqe[16]; uint8_t cqe[16];
uint8_t cmd[64]; uint8_t cmd[64];
uint8_t buf[4096]; uint8_t buf[4096];
uint16_t qid, tail_head;
bool is_submission_queue;
dev = get_device(vid); dev = get_device(vid);
if (dev == NULL) if (dev == NULL)
@ -1327,12 +1316,6 @@ vhost_user_msg_handler(int vid, int fd)
} }
} }
break; break;
case VHOST_USER_NVME_IO_CMD:
qid = msg.payload.nvme_io.qid;
tail_head = msg.payload.nvme_io.tail_head;
is_submission_queue = (msg.payload.nvme_io.queue_type == VHOST_USER_NVME_SUBMISSION_QUEUE) ? true : false;
vhost_user_nvme_io_request_passthrough(dev, qid, tail_head, is_submission_queue);
break;
case VHOST_USER_NVME_SET_BAR_MR: case VHOST_USER_NVME_SET_BAR_MR:
ret = vhost_user_nvme_set_bar_mr(dev, &msg); ret = vhost_user_nvme_set_bar_mr(dev, &msg);
break; break;

View File

@ -127,17 +127,6 @@ typedef struct VhostUserConfig {
uint8_t region[VHOST_USER_MAX_CONFIG_SIZE]; uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
} VhostUserConfig; } VhostUserConfig;
enum VhostUserNvmeQueueTypes {
VHOST_USER_NVME_SUBMISSION_QUEUE = 1,
VHOST_USER_NVME_COMPLETION_QUEUE = 2,
};
typedef struct VhostUserNvmeIO {
enum VhostUserNvmeQueueTypes queue_type;
uint32_t qid;
uint32_t tail_head;
} VhostUserNvmeIO;
typedef struct VhostUserMsg { typedef struct VhostUserMsg {
VhostUserRequest request; VhostUserRequest request;
@ -162,7 +151,6 @@ typedef struct VhostUserMsg {
} cmd; } cmd;
uint8_t buf[4096]; uint8_t buf[4096];
} nvme; } nvme;
struct VhostUserNvmeIO nvme_io;
} payload; } payload;
int fds[VHOST_MEMORY_MAX_NREGIONS]; int fds[VHOST_MEMORY_MAX_NREGIONS];
} __attribute((packed)) VhostUserMsg; } __attribute((packed)) VhostUserMsg;