diff --git a/app/nvmf_tgt/conf.c b/app/nvmf_tgt/conf.c index 91766919b..e2eec62d9 100644 --- a/app/nvmf_tgt/conf.c +++ b/app/nvmf_tgt/conf.c @@ -44,6 +44,7 @@ #include "nvmf/transport.h" #include "spdk/conf.h" #include "spdk/log.h" +#include "spdk/bdev.h" #define MAX_LISTEN_ADDRESSES 255 #define MAX_HOSTS 255 @@ -314,6 +315,20 @@ attach_cb(void *cb_ctx, struct spdk_pci_device *dev, struct spdk_nvme_ctrlr *ctr } } +static int +spdk_nvmf_validate_sn(const char *sn) +{ + size_t len; + + len = strlen(sn); + if (len > MAX_SN_LEN) { + SPDK_ERRLOG("Invalid sn \"%s\": length %zu > max %d\n", sn, len, MAX_SN_LEN); + return -1; + } + + return 0; +} + static int spdk_nvmf_allocate_lcore(uint64_t mask, uint32_t lcore) { @@ -377,9 +392,7 @@ spdk_nvmf_parse_subsystem(struct spdk_conf_section *sp) if (strcasecmp(mode, "Direct") == 0) { subsystem->mode = NVMF_SUBSYSTEM_MODE_DIRECT; } else if (strcasecmp(mode, "Virtual") == 0) { - nvmf_delete_subsystem(subsystem); - SPDK_ERRLOG("Virtual Subsystems are not yet supported.\n"); - return -1; + subsystem->mode = NVMF_SUBSYSTEM_MODE_VIRTUAL; } else { nvmf_delete_subsystem(subsystem); SPDK_ERRLOG("Invalid Subsystem mode: %s\n", mode); @@ -457,6 +470,49 @@ spdk_nvmf_parse_subsystem(struct spdk_conf_section *sp) if (spdk_nvme_probe(&ctx, probe_cb, attach_cb, NULL)) { SPDK_ERRLOG("One or more controllers failed in spdk_nvme_probe()\n"); } + } else { + struct spdk_bdev *bdev; + const char *namespace, *sn, *val; + + sn = spdk_conf_section_get_val(sp, "SN"); + if (sn == NULL) { + SPDK_ERRLOG("Subsystem %d: missing serial number\n", sp->num); + nvmf_delete_subsystem(subsystem); + return -1; + } + if (spdk_nvmf_validate_sn(sn) != 0) { + nvmf_delete_subsystem(subsystem); + return -1; + } + + namespace = spdk_conf_section_get_val(sp, "Namespace"); + if (namespace == NULL) { + SPDK_ERRLOG("Subsystem %d: missing Namespace directive\n", sp->num); + nvmf_delete_subsystem(subsystem); + return -1; + } + + subsystem->ctrlr.dev.virtual.ns_count = 0; + snprintf(subsystem->ctrlr.dev.virtual.sn, MAX_SN_LEN, "%s", sn); + subsystem->ctrlr.ops = &spdk_nvmf_virtual_ctrlr_ops; + + for (i = 0; i < MAX_VIRTUAL_NAMESPACE; i++) { + val = spdk_conf_section_get_nval(sp, "Namespace", i); + if (val == NULL) { + break; + } + namespace = spdk_conf_section_get_nmval(sp, "Namespace", i, 0); + if (!namespace) { + SPDK_ERRLOG("Namespace %d: missing block device\n", i); + nvmf_delete_subsystem(subsystem); + return -1; + } + bdev = spdk_bdev_get_by_name(namespace); + if (spdk_nvmf_subsystem_add_ns(subsystem, bdev)) { + nvmf_delete_subsystem(subsystem); + return -1; + } + } } return 0; } diff --git a/etc/spdk/nvmf.conf.in b/etc/spdk/nvmf.conf.in index ab013e3aa..d5c684b3c 100644 --- a/etc/spdk/nvmf.conf.in +++ b/etc/spdk/nvmf.conf.in @@ -23,6 +23,16 @@ # syslog facility LogFacility "local7" +# Users may change this section to create a different number or size of +# malloc LUNs. +# This will generate 8 LUNs with a malloc-allocated backend. +# Each LUN will be size 64MB and these will be named +# Malloc0 through Malloc7. Not all LUNs defined here are necessarily +# used below. +[Malloc] + NumberOfLuns 8 + LunSizeInMB 64 + # Define NVMf protocol global options [Nvmf] # Set the maximum number of submission and completion queues per session. @@ -63,6 +73,8 @@ # - Exactly 1 NVMe directive specifying an NVMe device by PCI BDF. The # PCI domain:bus:device.function can be replaced by "*" to indicate # any PCI device. + +# Direct controller [Subsystem1] NQN nqn.2016-06.io.spdk:cnode1 Core 0 @@ -72,11 +84,13 @@ NVMe 0000:00:00.0 # Multiple subsystems are allowed. +# Virtual controller [Subsystem2] NQN nqn.2016-06.io.spdk:cnode2 Core 0 - Mode Direct + Mode Virtual Listen RDMA 192.168.2.21:4420 Host nqn.2016-06.io.spdk:init - NVMe 0000:01:00.0 - + SN SPDK00000000000001 + Namespace Malloc0 + Namespace Malloc1 diff --git a/lib/nvmf/Makefile b/lib/nvmf/Makefile index 08ae82cbe..84d05c2e0 100644 --- a/lib/nvmf/Makefile +++ b/lib/nvmf/Makefile @@ -36,9 +36,10 @@ include $(SPDK_ROOT_DIR)/mk/spdk.common.mk CFLAGS += $(DPDK_INC) LIBNAME = nvmf + C_SRCS = subsystem.c nvmf.c \ request.c session.c transport.c \ - direct.c + direct.c virtual.c C_SRCS-$(CONFIG_RDMA) += rdma.c diff --git a/lib/nvmf/direct.c b/lib/nvmf/direct.c index 46e5f67eb..0a58858b0 100644 --- a/lib/nvmf/direct.c +++ b/lib/nvmf/direct.c @@ -247,9 +247,18 @@ nvmf_direct_ctrlr_process_io_cmd(struct spdk_nvmf_request *req) return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; } +static void +nvmf_direct_ctrlr_detach(struct spdk_nvmf_subsystem *subsystem) +{ + if (subsystem->ctrlr.dev.direct.ctrlr) { + spdk_nvme_detach(subsystem->ctrlr.dev.direct.ctrlr); + } +} + const struct spdk_nvmf_ctrlr_ops spdk_nvmf_direct_ctrlr_ops = { .ctrlr_get_data = nvmf_direct_ctrlr_get_data, .process_admin_cmd = nvmf_direct_ctrlr_process_admin_cmd, .process_io_cmd = nvmf_direct_ctrlr_process_io_cmd, .poll_for_completions = nvmf_direct_ctrlr_poll_for_completions, + .detach = nvmf_direct_ctrlr_detach, }; diff --git a/lib/nvmf/session.c b/lib/nvmf/session.c index a14436117..15b9bd17e 100644 --- a/lib/nvmf/session.c +++ b/lib/nvmf/session.c @@ -258,6 +258,7 @@ spdk_nvmf_session_connect(struct spdk_nvmf_conn *conn, } TAILQ_INIT(&session->connections); + session->kato = cmd->kato; session->num_connections = 0; session->subsys = subsystem; session->max_connections_allowed = g_nvmf_tgt.max_queues_per_session; diff --git a/lib/nvmf/session.h b/lib/nvmf/session.h index db48ccbb4..7ba2bf496 100644 --- a/lib/nvmf/session.h +++ b/lib/nvmf/session.h @@ -80,7 +80,7 @@ struct nvmf_session { TAILQ_HEAD(connection_q, spdk_nvmf_conn) connections; int num_connections; int max_connections_allowed; - + uint32_t kato; const struct spdk_nvmf_transport *transport; /* This is filled in by calling the transport's diff --git a/lib/nvmf/subsystem.c b/lib/nvmf/subsystem.c index a6d574847..334886119 100644 --- a/lib/nvmf/subsystem.c +++ b/lib/nvmf/subsystem.c @@ -32,6 +32,7 @@ */ #include +#include #include "nvmf_internal.h" #include "session.h" @@ -187,9 +188,8 @@ nvmf_delete_subsystem_poller_unreg(struct spdk_event *event) if (subsystem->session) { spdk_nvmf_session_destruct(subsystem->session); } - - if (subsystem->ctrlr.dev.direct.ctrlr) { - spdk_nvme_detach(subsystem->ctrlr.dev.direct.ctrlr); + if (subsystem->ctrlr.ops) { + subsystem->ctrlr.ops->detach(subsystem); } TAILQ_REMOVE(&g_subsystems, subsystem, entries); @@ -342,3 +342,21 @@ spdk_shutdown_nvmf_subsystems(void) return 0; } + +int +spdk_nvmf_subsystem_add_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_bdev *bdev) +{ + int i = 0; + + assert(subsystem->mode == NVMF_SUBSYSTEM_MODE_VIRTUAL); + while (i < MAX_VIRTUAL_NAMESPACE && subsystem->ctrlr.dev.virtual.ns_list[i]) { + i++; + } + if (i == MAX_VIRTUAL_NAMESPACE) { + SPDK_ERRLOG("spdk_nvmf_subsystem_add_ns() failed\n"); + return -1; + } + subsystem->ctrlr.dev.virtual.ns_list[i] = bdev; + subsystem->ctrlr.dev.virtual.ns_count++; + return 0; +} diff --git a/lib/nvmf/subsystem.h b/lib/nvmf/subsystem.h index e710c58ab..7593cec2d 100644 --- a/lib/nvmf/subsystem.h +++ b/lib/nvmf/subsystem.h @@ -46,6 +46,7 @@ struct spdk_nvmf_request; struct nvmf_session; #define MAX_VIRTUAL_NAMESPACE 16 +#define MAX_SN_LEN 20 enum spdk_nvmf_subsystem_mode { NVMF_SUBSYSTEM_MODE_DIRECT = 0, @@ -64,11 +65,6 @@ struct spdk_nvmf_host { TAILQ_ENTRY(spdk_nvmf_host) link; }; -struct spdk_nvmf_ns { - uint32_t nsid; - struct spdk_bdev *bdev; -}; - struct spdk_nvmf_ctrlr_ops { /** * Get NVMe identify controller data. @@ -89,6 +85,11 @@ struct spdk_nvmf_ctrlr_ops { * Poll for completions. */ void (*poll_for_completions)(struct nvmf_session *session); + + /** + * Detach the controller. + */ + void (*detach)(struct spdk_nvmf_subsystem *subsystem); }; struct spdk_nvmf_controller { @@ -101,7 +102,8 @@ struct spdk_nvmf_controller { struct { struct nvmf_session *session; - struct spdk_nvmf_ns *ns_list[MAX_VIRTUAL_NAMESPACE]; + char sn[MAX_SN_LEN + 1]; + struct spdk_bdev *ns_list[MAX_VIRTUAL_NAMESPACE]; uint16_t ns_count; } virtual; } dev; @@ -167,5 +169,8 @@ spdk_format_discovery_log(struct spdk_nvmf_discovery_log_page *disc_log, uint32_ void spdk_nvmf_subsystem_poll(struct spdk_nvmf_subsystem *subsystem); +int +spdk_nvmf_subsystem_add_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_bdev *bdev); extern const struct spdk_nvmf_ctrlr_ops spdk_nvmf_direct_ctrlr_ops; +extern const struct spdk_nvmf_ctrlr_ops spdk_nvmf_virtual_ctrlr_ops; #endif /* SPDK_NVMF_SUBSYSTEM_H */ diff --git a/lib/nvmf/virtual.c b/lib/nvmf/virtual.c new file mode 100644 index 000000000..d6e065505 --- /dev/null +++ b/lib/nvmf/virtual.c @@ -0,0 +1,511 @@ +/*- + * BSD LICENSE + * + * Copyright (c) Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "subsystem.h" +#include "session.h" +#include "request.h" +#include "spdk/log.h" +#include "spdk/nvme.h" +#include "spdk/nvmf_spec.h" +#include "spdk/trace.h" +#include "spdk/scsi_spec.h" +#include "spdk/string.h" + +#define MIN_KEEP_ALIVE_TIMEOUT 10000 +#define MODEL_NUMBER "SPDK Virtual Controller" +#define FW_VERSION "FFFFFFFF" + +/* read command dword 12 */ +struct __attribute__((packed)) nvme_read_cdw12 { + uint16_t nlb; /* number of logical blocks */ + uint16_t rsvd : 10; + uint8_t prinfo : 4; /* protection information field */ + uint8_t fua : 1; /* force unit access */ + uint8_t lr : 1; /* limited retry */ +}; + +static void nvmf_virtual_set_dsm(struct spdk_nvmf_subsystem *subsys) +{ + int i; + + for (i = 0; i < subsys->ctrlr.dev.virtual.ns_count; i++) { + if (!strncasecmp(subsys->ctrlr.dev.virtual.ns_list[i]->name, "Nvme", 4)) { + continue; + } else { + break; + } + } + + if (i == subsys->ctrlr.dev.virtual.ns_count) { + subsys->session->vcdata.oncs.dsm = 1; + } +} + +static void +nvmf_virtual_ctrlr_get_data(struct nvmf_session *session) +{ + struct spdk_nvmf_subsystem *subsys = session->subsys; + + memset(&session->vcdata, 0, sizeof(struct spdk_nvme_ctrlr_data)); + spdk_strcpy_pad(session->vcdata.fr, FW_VERSION, sizeof(session->vcdata.fr), ' '); + spdk_strcpy_pad(session->vcdata.mn, MODEL_NUMBER, sizeof(session->vcdata.mn), ' '); + session->vcdata.vid = 0x8086; + session->vcdata.ssvid = 0x8086; + spdk_strcpy_pad(session->vcdata.sn, subsys->ctrlr.dev.virtual.sn, sizeof(session->vcdata.sn), ' '); + session->vcdata.rab = 6; + session->vcdata.ver.bits.mjr = 1; + session->vcdata.ver.bits.mnr = 2; + session->vcdata.ver.bits.ter = 1; + session->vcdata.ctratt.host_id_exhid_supported = 1; + session->vcdata.aerl = 0; + session->vcdata.frmw.slot1_ro = 1; + session->vcdata.frmw.num_slots = 1; + session->vcdata.lpa.edlp = 1; + session->vcdata.elpe = 127; + session->vcdata.sqes.min = 0x06; + session->vcdata.sqes.max = 0x06; + session->vcdata.cqes.min = 0x04; + session->vcdata.cqes.max = 0x04; + session->vcdata.maxcmd = 1024; + session->vcdata.nn = subsys->ctrlr.dev.virtual.ns_count; + session->vcdata.vwc.present = 1; + session->vcdata.sgls.supported = 1; + strncpy(session->vcdata.subnqn, session->subsys->subnqn, sizeof(session->vcdata.subnqn)); + nvmf_virtual_set_dsm(subsys); +} + +static void +nvmf_virtual_ctrlr_poll_for_completions(struct nvmf_session *session) +{ + return; +} + +static void +nvmf_virtual_ctrlr_complete_cmd(spdk_event_t event) +{ + struct spdk_bdev_io *bdev_io = spdk_event_get_arg2(event); + struct spdk_nvmf_request *req = spdk_event_get_arg1(event); + enum spdk_bdev_io_status status = bdev_io->status; + struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; + struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; + + if (cmd->opc == SPDK_NVME_OPC_DATASET_MANAGEMENT) { + free(bdev_io->u.unmap.unmap_bdesc); + } + + if (status != SPDK_BDEV_IO_STATUS_SUCCESS) { + response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; + } else { + response->status.sc = SPDK_NVME_SC_SUCCESS; + } + spdk_nvmf_request_complete(req); + spdk_bdev_free_io(bdev_io); +} + +static int +nvmf_virtual_ctrlr_admin_identify_nslist(struct spdk_nvmf_controller *ctrlr, + struct spdk_nvmf_request *req) +{ + struct spdk_nvme_ns_list *ns_list; + struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; + uint32_t req_ns_id = cmd->nsid; + uint32_t i, num_ns, count = 0; + + if (req_ns_id >= 0xfffffffeUL) { + return -1; + } + memset(req->data, 0, req->length); + num_ns = ctrlr->dev.virtual.ns_count; + + ns_list = (struct spdk_nvme_ns_list *)req->data; + for (i = 1; i <= num_ns; i++) { + if (i <= req_ns_id) { + continue; + } + ns_list->ns_list[count++] = i; + if (count == sizeof(*ns_list) / sizeof(uint32_t)) { + break; + } + } + return 0; +} + +static int +nvmf_virtual_ctrlr_get_log_page(struct spdk_nvmf_request *req) +{ + struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; + struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; + + if (req->data == NULL) { + SPDK_ERRLOG("get log command with no buffer\n"); + response->status.sc = SPDK_NVME_SC_INVALID_FIELD; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } + + switch (cmd->cdw10) { + case SPDK_NVME_LOG_ERROR: + case SPDK_NVME_LOG_HEALTH_INFORMATION: + case SPDK_NVME_LOG_FIRMWARE_SLOT: + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + default: + response->status.sc = SPDK_NVME_SC_INVALID_FIELD; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } +} + +static int +nvmf_virtual_ctrlr_identify(struct spdk_nvmf_request *req) +{ + uint32_t nsid; + uint32_t blen; + uint32_t shift = 0; + int rc = 0; + struct spdk_nvme_ns_data *nsdata; + + struct nvmf_session *session = req->conn->sess; + struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; + struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; + struct spdk_nvmf_subsystem *subsystem = session->subsys; + + if (req->data == NULL || req->length < 4096) { + SPDK_ERRLOG("identify command with invalid buffer\n"); + response->status.sc = SPDK_NVME_SC_INVALID_FIELD; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } + + if ((cmd->cdw10 & 0xFF) == SPDK_NVME_IDENTIFY_NS) { + nsid = cmd->nsid; + if (nsid > subsystem->ctrlr.dev.virtual.ns_count || nsid == 0) { + SPDK_ERRLOG("Unsuccessful query for nsid %u\n", cmd->nsid); + response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } + + nsdata = (struct spdk_nvme_ns_data *)req->data; + memset(nsdata, 0, sizeof(*nsdata)); + nsdata->nsze = subsystem->ctrlr.dev.virtual.ns_list[nsid - 1]->blockcnt; + nsdata->ncap = subsystem->ctrlr.dev.virtual.ns_list[nsid - 1]->blockcnt; + nsdata->nuse = subsystem->ctrlr.dev.virtual.ns_list[nsid - 1]->blockcnt; + nsdata->nlbaf = 0; + nsdata->flbas.format = 0; + nsdata->flbas.extended = 0; + nsdata->nmic.can_share = 1; + blen = subsystem->ctrlr.dev.virtual.ns_list[nsid - 1]->blocklen; + shift = nvmf_u32log2(blen); + nsdata->lbaf[0].lbads = shift; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } else if ((cmd->cdw10 & 0xFF) == SPDK_NVME_IDENTIFY_CTRLR) { + SPDK_TRACELOG(SPDK_TRACE_NVMF, "Identify Controller\n"); + /* pull from virtual controller context */ + memcpy(req->data, &session->vcdata, sizeof(struct spdk_nvme_ctrlr_data)); + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } else if ((cmd->cdw10 & 0xFF) == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) { + rc = nvmf_virtual_ctrlr_admin_identify_nslist(&subsystem->ctrlr, req); + if (rc < 0) { + SPDK_ERRLOG("Invalid Namespace or Format\n"); + response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; + } + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } else { + SPDK_ERRLOG("identify command with invalid code\n"); + response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } +} + +static int +nvmf_virtual_ctrlr_get_features(struct spdk_nvmf_request *req) +{ + uint8_t feature; + uint32_t nr_io_queues; + struct nvmf_session *session = req->conn->sess; + struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; + struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; + + feature = cmd->cdw10 & 0xff; /* mask out the FID value */ + switch (feature) { + case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: + SPDK_TRACELOG(SPDK_TRACE_NVMF, "Get Features - Number of Queues\n"); + nr_io_queues = session->max_connections_allowed - 1; + /* Number of IO queues has a zero based value */ + response->cdw0 = ((nr_io_queues - 1) << 16) | + (nr_io_queues - 1); + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE: + response->cdw0 = 1; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: + response->cdw0 = session->kato; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + default: + SPDK_ERRLOG("get features command with invalid code\n"); + response->status.sc = SPDK_NVME_SC_INVALID_OPCODE; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } +} + +static int +nvmf_virtual_ctrlr_set_features(struct spdk_nvmf_request *req) +{ + uint8_t feature; + uint32_t nr_io_queues = 0; + struct nvmf_session *session = req->conn->sess; + struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; + struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; + + feature = cmd->cdw10 & 0xff; /* mask out the FID value */ + switch (feature) { + case SPDK_NVME_FEAT_NUMBER_OF_QUEUES: + SPDK_TRACELOG(SPDK_TRACE_NVMF, "Set Features - Number of Queues, cdw11 0x%x\n", cmd->cdw11); + nr_io_queues = session->max_connections_allowed - 1; + /* verify that the contoller is ready to process commands */ + if (session->num_connections > 1) { + SPDK_TRACELOG(SPDK_TRACE_NVMF, "Queue pairs already active!\n"); + response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; + } else { + /* Number of IO queues has a zero based value */ + response->cdw0 = ((nr_io_queues - 1) << 16) | + (nr_io_queues - 1); + } + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER: + if (cmd->cdw11 == 0) { + response->status.sc = SPDK_NVME_SC_KEEP_ALIVE_INVALID; + } else if (cmd->cdw11 < MIN_KEEP_ALIVE_TIMEOUT) { + session->kato = MIN_KEEP_ALIVE_TIMEOUT; + } else { + session->kato = cmd->cdw11; + } + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + default: + SPDK_ERRLOG("set features command with invalid code\n"); + response->status.sc = SPDK_NVME_SC_INVALID_OPCODE; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } +} + +static int +nvmf_virtual_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req) +{ + struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; + struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; + + /* pre-set response details for this command */ + response->status.sc = SPDK_NVME_SC_SUCCESS; + + switch (cmd->opc) { + case SPDK_NVME_OPC_GET_LOG_PAGE: + return nvmf_virtual_ctrlr_get_log_page(req); + case SPDK_NVME_OPC_IDENTIFY: + return nvmf_virtual_ctrlr_identify(req); + case SPDK_NVME_OPC_GET_FEATURES: + return nvmf_virtual_ctrlr_get_features(req); + case SPDK_NVME_OPC_SET_FEATURES: + return nvmf_virtual_ctrlr_set_features(req); + case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST: + SPDK_TRACELOG(SPDK_TRACE_NVMF, "Async Event Request\n"); + /* TODO: Just release the request as consumed. AER events will never + * be triggered. */ + return SPDK_NVMF_REQUEST_EXEC_STATUS_RELEASE; + case SPDK_NVME_OPC_KEEP_ALIVE: + SPDK_TRACELOG(SPDK_TRACE_NVMF, "Keep Alive\n"); + /* + To handle keep alive just clear or reset the + session based keep alive duration counter. + When added, a separate timer based process + will monitor if the time since last recorded + keep alive has exceeded the max duration and + take appropriate action. + */ + //session->keep_alive_timestamp = ; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + + case SPDK_NVME_OPC_CREATE_IO_SQ: + case SPDK_NVME_OPC_CREATE_IO_CQ: + case SPDK_NVME_OPC_DELETE_IO_SQ: + case SPDK_NVME_OPC_DELETE_IO_CQ: + SPDK_ERRLOG("Admin opc 0x%02X not allowed in NVMf\n", cmd->opc); + response->status.sc = SPDK_NVME_SC_INVALID_OPCODE; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + default: + SPDK_ERRLOG("Unsupported admin command\n"); + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } + +} + +static int +nvmf_virtual_ctrlr_rw_cmd(struct spdk_bdev *bdev, struct spdk_nvmf_request *req) +{ + uint64_t lba_address; + uint64_t blockcnt; + off_t offset; + uint64_t llen; + struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; + struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; + struct nvme_read_cdw12 *cdw12 = (struct nvme_read_cdw12 *)&cmd->cdw12; + + blockcnt = bdev->blockcnt; + lba_address = cmd->cdw11; + lba_address = (lba_address << 32) + cmd->cdw10; + offset = lba_address * bdev->blocklen; + llen = cdw12->nlb + 1; + + if (lba_address >= blockcnt || llen > blockcnt || lba_address > (blockcnt - llen)) { + SPDK_ERRLOG("end of media\n"); + response->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } + + if (cmd->opc == SPDK_NVME_OPC_READ) { + spdk_trace_record(TRACE_NVMF_LIB_READ_START, 0, 0, (uint64_t)req, 0); + if (spdk_bdev_read(bdev, req->data, req->length, offset, nvmf_virtual_ctrlr_complete_cmd, + req) == NULL) { + response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } + } else { + spdk_trace_record(TRACE_NVMF_LIB_WRITE_START, 0, 0, (uint64_t)req, 0); + if (spdk_bdev_write(bdev, req->data, req->length, offset, nvmf_virtual_ctrlr_complete_cmd, + req) == NULL) { + response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } + } + return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; + +} + +static int +nvmf_virtual_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_nvmf_request *req) +{ + + uint64_t nbytes; + struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; + + nbytes = bdev->blockcnt * bdev->blocklen; + if (spdk_bdev_flush(bdev, 0, nbytes, nvmf_virtual_ctrlr_complete_cmd, req) == NULL) { + response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } + return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; +} + +static int +nvmf_virtual_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_nvmf_request *req) +{ + int i; + uint32_t attribute; + uint16_t nr; + struct spdk_scsi_unmap_bdesc *unmap; + struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; + struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; + + nr = ((cmd->cdw10 & 0x000000ff) + 1); + attribute = cmd->cdw11 & 0x00000007; + if (attribute == SPDK_NVME_DSM_ATTR_DEALLOCATE) { + struct spdk_nvme_dsm_range *dsm_range = (struct spdk_nvme_dsm_range *)req->data; + unmap = calloc(nr, sizeof(*unmap)); + if (unmap == NULL) { + SPDK_ERRLOG("memory allocation failure\n"); + response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } + + for (i = 0; i < nr; i++) { + unmap[i].lba = htobe64(dsm_range[i].starting_lba); + unmap[i].block_count = htobe32(dsm_range[i].length); + } + if (spdk_bdev_unmap(bdev, unmap, nr, nvmf_virtual_ctrlr_complete_cmd, req) == NULL) { + free(unmap); + response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } + } else { + SPDK_ERRLOG("dsm attribute:%x does not supported yet\n", attribute); + response->status.sc = SPDK_NVME_SC_INVALID_OPCODE; + } + return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS; +} + +static int +nvmf_virtual_ctrlr_process_io_cmd(struct spdk_nvmf_request *req) +{ + uint32_t nsid; + struct spdk_bdev *bdev; + struct spdk_nvmf_subsystem *subsystem = req->conn->sess->subsys; + struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; + struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl; + + /* pre-set response details for this command */ + response->status.sc = SPDK_NVME_SC_SUCCESS; + nsid = cmd->nsid; + + if (nsid > subsystem->ctrlr.dev.virtual.ns_count || nsid == 0) { + SPDK_ERRLOG("Unsuccessful query for nsid %u\n", cmd->nsid); + response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } + + bdev = subsystem->ctrlr.dev.virtual.ns_list[nsid - 1]; + switch (cmd->opc) { + case SPDK_NVME_OPC_READ: + case SPDK_NVME_OPC_WRITE: + return nvmf_virtual_ctrlr_rw_cmd(bdev, req); + case SPDK_NVME_OPC_FLUSH: + return nvmf_virtual_ctrlr_flush_cmd(bdev, req); + case SPDK_NVME_OPC_DATASET_MANAGEMENT: + return nvmf_virtual_ctrlr_dsm_cmd(bdev, req); + default: + SPDK_ERRLOG("Unsupported IO command opc: %x\n", cmd->opc); + response->status.sc = SPDK_NVME_SC_INVALID_OPCODE; + return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE; + } +} + +static void +nvmf_virtual_ctrlr_detach(struct spdk_nvmf_subsystem *subsystem) +{ + return; +} + +const struct spdk_nvmf_ctrlr_ops spdk_nvmf_virtual_ctrlr_ops = { + .ctrlr_get_data = nvmf_virtual_ctrlr_get_data, + .process_admin_cmd = nvmf_virtual_ctrlr_process_admin_cmd, + .process_io_cmd = nvmf_virtual_ctrlr_process_io_cmd, + .poll_for_completions = nvmf_virtual_ctrlr_poll_for_completions, + .detach = nvmf_virtual_ctrlr_detach, +}; diff --git a/test/nvmf/filesystem/filesystem.sh b/test/nvmf/filesystem/filesystem.sh index d2ab514cf..38cee74f1 100755 --- a/test/nvmf/filesystem/filesystem.sh +++ b/test/nvmf/filesystem/filesystem.sh @@ -29,6 +29,7 @@ if [ -e "/dev/nvme-fabrics" ]; then fi nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" +nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode2" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" mkdir -p /mnt/device @@ -64,6 +65,7 @@ done sync nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" +nvme disconnect -n "nqn.2016-06.io.spdk:cnode2" trap - SIGINT SIGTERM EXIT diff --git a/test/nvmf/fio/fio.sh b/test/nvmf/fio/fio.sh index 4ce99de82..3722ae867 100755 --- a/test/nvmf/fio/fio.sh +++ b/test/nvmf/fio/fio.sh @@ -29,6 +29,7 @@ if [ -e "/dev/nvme-fabrics" ]; then fi nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" +nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode2" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" $testdir/nvmf_fio.py 4096 1 write 1 verify $testdir/nvmf_fio.py 4096 1 randwrite 1 verify @@ -37,8 +38,11 @@ $testdir/nvmf_fio.py 4096 128 randwrite 1 verify sync nvme disconnect -n "nqn.2016-06.io.spdk:cnode1" +nvme disconnect -n "nqn.2016-06.io.spdk:cnode2" rm -f ./local-job0-0-verify.state +rm -f ./local-job1-1-verify.state +rm -f ./local-job2-2-verify.state trap - SIGINT SIGTERM EXIT diff --git a/test/nvmf/nvmf.conf b/test/nvmf/nvmf.conf index 2f4f6d445..55090acc7 100644 --- a/test/nvmf/nvmf.conf +++ b/test/nvmf/nvmf.conf @@ -5,6 +5,10 @@ [Rpc] Enable Yes +[Malloc] + NumberOfLuns 2 + LunSizeInMB 64 + [Nvmf] MaxQueuesPerSession 4 @@ -13,3 +17,11 @@ Mode Direct Listen RDMA 192.168.100.8:4420 NVMe * + +[Subsystem2] + NQN "nqn.2016-06.io.spdk:cnode2" + Mode Virtual + Listen RDMA 192.168.100.8:4420 + SN SPDK00000000000001 + Namespace Malloc0 + Namespace Malloc1