nvme: Update nvme_spec.h to 1.2.1

This moves some definitions from nvmf_spec.h to
nvme_spec.h based on the latest publication.

Change-Id: I51b0abd16f7d034696239894aea5089f8ac70c40
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Ben Walker 2016-06-23 14:27:40 -07:00 committed by Benjamin Walker
parent 9a2f8eb234
commit 6b10df3576
8 changed files with 139 additions and 154 deletions

View File

@ -477,7 +477,7 @@ print_controller(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_device *pci_dev)
printf("Recommended Arb Burst: %d\n", cdata->rab);
printf("IEEE OUI Identifier: %02x %02x %02x\n",
cdata->ieee[0], cdata->ieee[1], cdata->ieee[2]);
printf("Multi-Interface Cap: %02x\n", cdata->mic);
printf("Multi-path I/O: %02x\n", *(int *)&cdata->cmic);
/* TODO: Use CAP.MPSMIN to determine true memory page size. */
printf("Max Data Transfer Size: ");
if (cdata->mdts == 0)
@ -546,12 +546,18 @@ print_controller(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_device *pci_dev)
printf("Scatter-Gather List\n");
printf(" SGL Command Set: %s\n",
cdata->sgls.supported ? "Supported" : "Not Supported");
printf(" SGL Keyed: %s\n",
cdata->sgls.keyed_sgl ? "Supported" : "Not Supported");
printf(" SGL Bit Bucket Descriptor: %s\n",
cdata->sgls.bit_bucket_descriptor_supported ? "Supported" : "Not Supported");
cdata->sgls.bit_bucket_descriptor ? "Supported" : "Not Supported");
printf(" SGL Metadata Pointer: %s\n",
cdata->sgls.metadata_pointer_supported ? "Supported" : "Not Supported");
cdata->sgls.metadata_pointer ? "Supported" : "Not Supported");
printf(" Oversized SGL: %s\n",
cdata->sgls.oversized_sgl_supported ? "Supported" : "Not Supported");
cdata->sgls.oversized_sgl ? "Supported" : "Not Supported");
printf(" SGL Metadata Address: %s\n",
cdata->sgls.metadata_address ? "Supported" : "Not Supported");
printf(" SGL Offset: %s\n",
cdata->sgls.sgl_offset ? "Supported" : "Not Supported");
printf("\n");
printf("Error Log\n");

View File

@ -292,21 +292,39 @@ enum spdk_nvme_sgl_descriptor_type {
SPDK_NVME_SGL_TYPE_BIT_BUCKET = 0x1,
SPDK_NVME_SGL_TYPE_SEGMENT = 0x2,
SPDK_NVME_SGL_TYPE_LAST_SEGMENT = 0x3,
/* 0x4 - 0xe reserved */
SPDK_NVME_SGL_TYPE_VENDOR_SPECIFIC = 0xf
SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK = 0x4,
/* 0x5 - 0xE reserved */
SPDK_NVME_SGL_TYPE_VENDOR_SPECIFIC = 0xF
};
enum spdk_nvme_sgl_descriptor_subtype {
SPDK_NVME_SGL_SUBTYPE_ADDRESS = 0x0,
SPDK_NVME_SGL_SUBTYPE_OFFSET = 0x1,
};
struct __attribute__((packed)) spdk_nvme_sgl_descriptor {
uint64_t address;
union {
struct {
uint8_t reserved[7];
uint8_t subtype : 4;
uint8_t type : 4;
} generic;
struct {
uint32_t length;
uint8_t reserved[3];
/** SGL descriptor type specific */
uint8_t type_specific : 4;
/** SGL descriptor type */
uint8_t subtype : 4;
uint8_t type : 4;
} unkeyed;
struct {
uint64_t length : 24;
uint64_t key : 32;
uint64_t subtype : 4;
uint64_t type : 4;
} keyed;
};
};
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_sgl_descriptor) == 16, "Incorrect size");
@ -457,6 +475,19 @@ enum spdk_nvme_generic_command_status_code {
SPDK_NVME_SC_ABORTED_MISSING_FUSED = 0x0a,
SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT = 0x0b,
SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR = 0x0c,
SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR = 0x0d,
SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS = 0x0e,
SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID = 0x0f,
SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID = 0x10,
SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID = 0x11,
SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF = 0x12,
SPDK_NVME_SC_INVALID_PRP_OFFSET = 0x13,
SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED = 0x14,
SPDK_NVME_SC_INVALID_SGL_OFFSET = 0x16,
SPDK_NVME_SC_INVALID_SGL_SUBTYPE = 0x17,
SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT = 0x18,
SPDK_NVME_SC_KEEP_ALIVE_EXPIRED = 0x19,
SPDK_NVME_SC_KEEP_ALIVE_INVALID = 0x1A,
SPDK_NVME_SC_LBA_OUT_OF_RANGE = 0x80,
SPDK_NVME_SC_CAPACITY_EXCEEDED = 0x81,
@ -522,6 +553,8 @@ enum spdk_nvme_admin_opcode {
SPDK_NVME_OPC_NS_ATTACHMENT = 0x15,
SPDK_NVME_OPC_KEEP_ALIVE = 0x18,
SPDK_NVME_OPC_FORMAT_NVM = 0x80,
SPDK_NVME_OPC_SECURITY_SEND = 0x81,
SPDK_NVME_OPC_SECURITY_RECEIVE = 0x82,
@ -658,8 +691,13 @@ struct __attribute__((packed)) spdk_nvme_ctrlr_data {
/** ieee oui identifier */
uint8_t ieee[3];
/** multi-interface capabilities */
uint8_t mic;
/** controller multi-path I/O and namespace sharing capabilities */
struct {
uint8_t multi_port : 1;
uint8_t multi_host : 1;
uint8_t sr_iov : 1;
uint8_t reserved : 5;
} cmic;
/** maximum data transfer size */
uint8_t mdts;
@ -786,7 +824,11 @@ struct __attribute__((packed)) spdk_nvme_ctrlr_data {
uint8_t access_size;
} rpmbs;
uint8_t reserved2[196];
uint8_t reserved2[4];
uint16_t kas;
uint8_t reserved3[190];
/* bytes 512-703: nvm command set attributes */
@ -802,7 +844,7 @@ struct __attribute__((packed)) spdk_nvme_ctrlr_data {
uint8_t max : 4;
} cqes;
uint8_t reserved3[2];
uint16_t maxcmd;
/** number of namespaces */
uint32_t nn;
@ -854,16 +896,24 @@ struct __attribute__((packed)) spdk_nvme_ctrlr_data {
/** SGL support */
struct {
uint32_t supported : 1;
uint32_t reserved : 15;
uint32_t bit_bucket_descriptor_supported : 1;
uint32_t metadata_pointer_supported : 1;
uint32_t oversized_sgl_supported : 1;
uint32_t reserved0 : 1;
uint32_t keyed_sgl : 1;
uint32_t reserved1 : 13;
uint32_t bit_bucket_descriptor : 1;
uint32_t metadata_pointer : 1;
uint32_t oversized_sgl : 1;
uint32_t metadata_address : 1;
uint32_t sgl_offset : 1;
uint32_t reserved2: 11;
} sgls;
uint8_t reserved4[164];
uint8_t reserved4[228];
/* bytes 704-2047: i/o command set attributes */
uint8_t reserved5[1344];
uint8_t subnqn[256];
uint8_t reserved5[768];
uint8_t nvmf_specific[256];
/* bytes 2048-3071: power state descriptors */
struct spdk_nvme_power_state psd[32];

View File

@ -66,7 +66,7 @@ struct spdk_nvmf_capsule_rsp {
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_capsule_rsp) == 16, "Incorrect size");
/* Fabric Command Set */
#define SPDK_NVMF_FABRIC_OPCODE 0x7f
#define SPDK_NVME_OPC_FABRIC 0x7f
enum spdk_nvmf_fabric_cmd_types {
SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET = 0x00,
@ -149,7 +149,7 @@ enum spdk_nvmf_subsystem_types {
/**
* Connections shall be made over a fabric secure channel
*/
enum spdk_nvmf_tansport_requirements {
enum spdk_nvmf_transport_requirements {
SPDK_NVMF_TREQ_NOT_SPECIFIED = 0x0,
SPDK_NVMF_TREQ_REQUIRED = 0x1,
SPDK_NVMF_TREQ_NOT_REQUIRED = 0x2,
@ -455,8 +455,6 @@ struct spdk_nvmf_fabric_prop_set_rsp {
};
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_fabric_prop_set_rsp) == 16, "Incorrect size");
/* Overlays on the existing identify controller structure */
#define SPDK_NVMF_EXTENDED_CTRLR_DATA_OFFSET 1792
struct spdk_nvmf_extended_identify_ctrlr_data {
uint32_t ioccsz;
uint32_t iorcsz;
@ -467,29 +465,6 @@ struct spdk_nvmf_extended_identify_ctrlr_data {
};
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_extended_identify_ctrlr_data) == 256, "Incorrect size");
#define SPDK_NVMF_CTRLR_MAXCMD_OFFSET 514
struct spdk_nvmf_ctrlr_maxcmd {
uint16_t maxcmd;
};
#define SPDK_NVMF_CTRLR_KAS_OFFSET 320
struct spdk_nvmf_ctrlr_kas {
uint16_t kas;
};
struct spdk_nvmf_sgl_support {
uint32_t supported : 1;
uint32_t reserved1 : 1;
uint32_t keyed_sgls : 1;
uint32_t reserved2 : 13;
uint32_t bit_bucket_descriptor_supported : 1;
uint32_t metadata_pointer_supported : 1;
uint32_t oversized_sgl_supported : 1;
uint32_t single_aligned_sgl_supported : 1;
uint32_t address_as_offset_sgl_supported : 1;
uint32_t reserved3 : 11;
};
#define SPDK_NVMF_DISCOVERY_NQN "nqn.2014-08.org.nvmexpress.discovery"
struct spdk_nvmf_discovery_identify_data {
@ -548,38 +523,10 @@ struct spdk_nvmf_discovery_log_page {
};
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_discovery_log_page) == 1024, "Incorrect size");
/* Add an additional type of SGL */
#define SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK 0x4
/* Further, add SGL subtypes */
#define SPDK_NVME_SGL_SUBTYPE_ADDRESS 0x0
#define SPDK_NVME_SGL_SUBTYPE_OFFSET 0x1
struct spdk_nvmf_keyed_sgl_descriptor {
uint64_t address;
uint64_t length : 24;
uint64_t key : 32;
uint64_t subtype : 4;
uint64_t type : 4; /* SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK */
};
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_keyed_sgl_descriptor) == 16, "Incorrect size");
/* Add a new admin command */
#define SPDK_NVME_OPC_KEEP_ALIVE 0x18
/* Add new status codes */
#define SPDK_NVME_SC_SGL_OFFSET_INVALID 0x16
#define SPDK_NVME_SC_SGL_SUBTYPE_INVALID 0x17
#define SPDK_NVME_SC_HOSTID_INCONSISTENT 0x18
#define SPDK_NVME_SC_KEEP_ALIVE_EXPIRED 0x19
#define SPDK_NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID 0x1A
/* RDMA Fabric specific definitions below */
#define SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY 0xF
struct spdk_nvmf_rdma_request_private_data {
uint16_t recfmt; /* record format */
uint16_t qid; /* queue id */

View File

@ -704,7 +704,7 @@ _nvme_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_requ
sgl = tr->u.sgl;
req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_SGL;
req->cmd.dptr.sgl1.type_specific = 0;
req->cmd.dptr.sgl1.unkeyed.subtype = 0;
remaining_transfer_len = req->payload_size;
@ -723,10 +723,10 @@ _nvme_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_requ
length = nvme_min(remaining_transfer_len, length);
remaining_transfer_len -= length;
sgl->type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
sgl->length = length;
sgl->unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
sgl->unkeyed.length = length;
sgl->address = phys_addr;
sgl->type_specific = 0;
sgl->unkeyed.subtype = 0;
sgl++;
nseg++;
@ -739,14 +739,14 @@ _nvme_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_requ
* This means the SGL in the tracker is not used at all, so copy the first (and only)
* SGL element into SGL1.
*/
req->cmd.dptr.sgl1.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
req->cmd.dptr.sgl1.address = tr->u.sgl[0].address;
req->cmd.dptr.sgl1.length = tr->u.sgl[0].length;
req->cmd.dptr.sgl1.unkeyed.length = tr->u.sgl[0].unkeyed.length;
} else {
/* For now we can only support 1 SGL segment in NVMe controller */
req->cmd.dptr.sgl1.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
req->cmd.dptr.sgl1.address = tr->prp_sgl_bus_addr;
req->cmd.dptr.sgl1.length = nseg * sizeof(struct spdk_nvme_sgl_descriptor);
req->cmd.dptr.sgl1.unkeyed.length = nseg * sizeof(struct spdk_nvme_sgl_descriptor);
}
return 0;

View File

@ -484,9 +484,9 @@ static void nvmf_trace_command(struct spdk_nvmf_capsule_cmd *cap_hdr, enum conn_
SPDK_TRACELOG(SPDK_TRACE_NVMF, "NVMf %s%s Command:\n",
conn_type == CONN_TYPE_AQ ? "Admin" : "I/O",
cmd->opc == SPDK_NVMF_FABRIC_OPCODE ? " Fabrics" : "");
cmd->opc == SPDK_NVME_OPC_FABRIC ? " Fabrics" : "");
if (cmd->opc == SPDK_NVMF_FABRIC_OPCODE) {
if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
opc = cap_hdr->fctype;
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: fctype 0x%02x\n", cap_hdr->fctype);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: cid 0x%x\n", cap_hdr->cid);
@ -506,21 +506,21 @@ static void nvmf_trace_command(struct spdk_nvmf_capsule_cmd *cap_hdr, enum conn_
}
if (spdk_nvme_opc_get_data_transfer(opc) != SPDK_NVME_DATA_NONE) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL type 0x%x\n", sgl->type);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL subtype 0x%x\n", sgl->type_specific);
if (sgl->type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL type 0x%x\n", sgl->generic.type);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL subtype 0x%x\n", sgl->generic.subtype);
if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL address 0x%lx\n",
((struct spdk_nvmf_keyed_sgl_descriptor *)sgl)->address);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL key 0x%x\n",
((struct spdk_nvmf_keyed_sgl_descriptor *)sgl)->key);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL length 0x%x\n",
((struct spdk_nvmf_keyed_sgl_descriptor *)sgl)->length);
} else if (sgl->type == SPDK_NVME_SGL_TYPE_DATA_BLOCK) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL %s 0x%" PRIx64 "\n",
sgl->type_specific == SPDK_NVME_SGL_SUBTYPE_OFFSET ? "offset" : "address",
sgl->address);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL length 0x%x\n", sgl->length);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL key 0x%x\n",
sgl->keyed.key);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL length 0x%x\n",
sgl->keyed.length);
} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK) {
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL %s 0x%" PRIx64 "\n",
sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET ? "offset" : "address",
sgl->address);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " SQE: SGL length 0x%x\n", sgl->unkeyed.length);
}
}
}
@ -714,7 +714,7 @@ spdk_nvmf_request_prep_data(struct nvmf_request *req)
enum spdk_nvme_data_transfer xfer;
int ret;
if (cmd->opc == SPDK_NVMF_FABRIC_OPCODE) {
if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
xfer = spdk_nvme_opc_get_data_transfer(req->cmd->nvmf_cmd.fctype);
} else {
xfer = spdk_nvme_opc_get_data_transfer(cmd->opc);
@ -722,31 +722,30 @@ spdk_nvmf_request_prep_data(struct nvmf_request *req)
if (xfer != SPDK_NVME_DATA_NONE) {
struct spdk_nvme_sgl_descriptor *sgl = (struct spdk_nvme_sgl_descriptor *)&cmd->dptr.sgl1;
struct spdk_nvmf_keyed_sgl_descriptor *keyed_sgl = (struct spdk_nvmf_keyed_sgl_descriptor *)sgl;
if (sgl->type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK &&
(sgl->type_specific == SPDK_NVME_SGL_SUBTYPE_ADDRESS ||
sgl->type_specific == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) {
if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK &&
(sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS ||
sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) {
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Keyed data block: raddr 0x%" PRIx64 ", rkey 0x%x, length 0x%x\n",
keyed_sgl->address, keyed_sgl->key, keyed_sgl->length);
sgl->address, sgl->keyed.key, sgl->keyed.length);
if (keyed_sgl->length > rx_desc->bb_sgl.length) {
if (sgl->keyed.length > rx_desc->bb_sgl.length) {
SPDK_ERRLOG("SGL length 0x%x exceeds BB length 0x%x\n",
(uint32_t)keyed_sgl->length, rx_desc->bb_sgl.length);
sgl->keyed.length, rx_desc->bb_sgl.length);
return -1;
}
req->data = rx_desc->bb;
req->remote_addr = keyed_sgl->address;
req->rkey = keyed_sgl->key;
req->length = keyed_sgl->length;
} else if (sgl->type == SPDK_NVME_SGL_TYPE_DATA_BLOCK &&
sgl->type_specific == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
req->remote_addr = sgl->address;
req->rkey = sgl->keyed.key;
req->length = sgl->keyed.length;
} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK &&
sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
uint64_t offset = sgl->address;
uint32_t max_len = rx_desc->bb_sgl.length;
SPDK_TRACELOG(SPDK_TRACE_RDMA, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n",
offset, sgl->length);
offset, sgl->unkeyed.length);
if (conn->type == CONN_TYPE_AQ) {
SPDK_ERRLOG("In-capsule data not allowed for admin queue\n");
@ -760,17 +759,17 @@ spdk_nvmf_request_prep_data(struct nvmf_request *req)
}
max_len -= (uint32_t)offset;
if (sgl->length > max_len) {
if (sgl->unkeyed.length > max_len) {
SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n",
sgl->length, max_len);
sgl->unkeyed.length, max_len);
return -1;
}
req->data = rx_desc->bb + offset;
req->length = sgl->length;
req->length = sgl->unkeyed.length;
} else {
SPDK_ERRLOG("Invalid NVMf I/O Command SGL: Type 0x%x, Subtype 0x%x\n",
sgl->type, sgl->type_specific);
sgl->generic.type, sgl->generic.subtype);
return -1;
}
@ -787,7 +786,7 @@ spdk_nvmf_request_prep_data(struct nvmf_request *req)
* the backend NVMe device
*/
if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
if (sgl->type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) {
if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK) {
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Issuing RDMA Read to get host data\n");
/* temporarily adjust SGE to only copy what the host is prepared to send. */
@ -825,7 +824,7 @@ spdk_nvmf_request_exec(struct spdk_nvmf_conn *conn, struct nvmf_request *req)
{
struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
if (cmd->opc == SPDK_NVMF_FABRIC_OPCODE) {
if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
return nvmf_process_fabrics_command(conn, req);
} else if (conn->type == CONN_TYPE_AQ) {
return nvmf_process_admin_command(conn, req);

View File

@ -78,12 +78,6 @@
#define TRACE_NVMF_LIB_COMPLETE SPDK_TPOINT_ID(TRACE_GROUP_NVMF, 0x7)
#define TRACE_NVMF_IO_COMPLETE SPDK_TPOINT_ID(TRACE_GROUP_NVMF, 0x8)
union sgl_shift {
struct spdk_nvmf_keyed_sgl_descriptor nvmf_sgl;
struct spdk_nvme_sgl_descriptor nvme_sgl;
};
SPDK_STATIC_ASSERT(sizeof(union sgl_shift) == 16, "Incorrect size");
union nvmf_h2c_msg {
struct spdk_nvmf_capsule_cmd nvmf_cmd;
struct spdk_nvme_cmd nvme_cmd;

View File

@ -88,12 +88,7 @@ nvmf_init_session_properties(struct nvmf_session *session, int aq_depth)
/* for now base virtual controller properties on first namespace controller */
struct spdk_nvme_ctrlr *ctrlr = session->subsys->ns_list_map[0].ctrlr;
const struct spdk_nvme_ctrlr_data *cdata;
struct spdk_nvmf_ctrlr_maxcmd *maxcmd;
struct spdk_nvmf_ctrlr_kas *kas;
struct spdk_nvmf_extended_identify_ctrlr_data *nvmfdata;
struct spdk_nvmf_sgl_support *nvmfsgl;
uint8_t *vc_data;
uint32_t io_depth;
/*
Here we are going to initialize the features, properties, and
@ -117,25 +112,19 @@ nvmf_init_session_properties(struct nvmf_session *session, int aq_depth)
session->vcdata.cntlid = session->cntlid;
/* initialize the nvmf new and extension details in controller data */
vc_data = (uint8_t *)&session->vcdata;
kas = (struct spdk_nvmf_ctrlr_kas *)&vc_data[SPDK_NVMF_CTRLR_KAS_OFFSET];
kas->kas = 10; /* for keep alive granularity in seconds (10 * 100ms) */
maxcmd = (struct spdk_nvmf_ctrlr_maxcmd *)&vc_data[SPDK_NVMF_CTRLR_MAXCMD_OFFSET];
io_depth = SPDK_NVMF_DEFAULT_MAX_QUEUE_DEPTH;
maxcmd->maxcmd = io_depth;
nvmfdata = (struct spdk_nvmf_extended_identify_ctrlr_data *)
&vc_data[SPDK_NVMF_EXTENDED_CTRLR_DATA_OFFSET];
session->vcdata.kas = 10;
session->vcdata.maxcmd = SPDK_NVMF_DEFAULT_MAX_QUEUE_DEPTH;
nvmfdata = (struct spdk_nvmf_extended_identify_ctrlr_data *)session->vcdata.nvmf_specific;
nvmfdata->ioccsz = (NVMF_H2C_MAX_MSG / 16);
nvmfdata->iorcsz = (NVMF_C2H_MAX_MSG / 16);
nvmfdata->icdoff = 0; /* offset starts directly after SQE */
nvmfdata->ctrattr = 0; /* dynamic controller model */
nvmfdata->msdbd = 1; /* target supports single SGL in capsule */
nvmfsgl = (struct spdk_nvmf_sgl_support *)&session->vcdata.sgls;
nvmfsgl->keyed_sgls = 1;
nvmfsgl->address_as_offset_sgl_supported = 1;
session->vcdata.sgls.keyed_sgl = 1;
session->vcdata.sgls.sgl_offset = 1;
SPDK_TRACELOG(SPDK_TRACE_NVMF, " nvmf_init_session_properties: ctrlr data: maxcmd %x\n",
maxcmd->maxcmd);
session->vcdata.maxcmd);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " nvmf_init_session_properties: ext ctrlr data: ioccsz %x\n",
nvmfdata->ioccsz);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " nvmf_init_session_properties: ext ctrlr data: iorcsz %x\n",
@ -147,7 +136,7 @@ nvmf_init_session_properties(struct nvmf_session *session, int aq_depth)
SPDK_TRACELOG(SPDK_TRACE_NVMF, " nvmf_init_session_properties: ext ctrlr data: msdbd %x\n",
nvmfdata->msdbd);
SPDK_TRACELOG(SPDK_TRACE_NVMF, " nvmf_init_session_properties: sgls data: 0x%x\n",
*(uint32_t *)nvmfsgl);
*(uint32_t *)&session->vcdata.sgls);
/* feature: Number Of Queues. */
/* Init to zero. Host shall set before enabling the controller */
@ -156,7 +145,7 @@ nvmf_init_session_properties(struct nvmf_session *session, int aq_depth)
session->vcprop.cap_lo.raw = 0;
session->vcprop.cap_lo.bits.cqr = 0; /* queues not contiguous */
session->vcprop.cap_lo.bits.mqes = (io_depth - 1); /* max queue depth */
session->vcprop.cap_lo.bits.mqes = (session->vcdata.maxcmd - 1); /* max queue depth */
session->vcprop.cap_lo.bits.ams = 0; /* optional arb mechanisms */
session->vcprop.cap_lo.bits.to = 1; /* ready timeout - 500 msec units */

View File

@ -479,11 +479,11 @@ test_hw_sgl_req(void)
sgl_tr = LIST_FIRST(&qpair.outstanding_tr);
CU_ASSERT(sgl_tr != NULL);
CU_ASSERT(sgl_tr->u.sgl[0].type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
CU_ASSERT(sgl_tr->u.sgl[0].type_specific == 0);
CU_ASSERT(sgl_tr->u.sgl[0].length == 4096);
CU_ASSERT(sgl_tr->u.sgl[0].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
CU_ASSERT(sgl_tr->u.sgl[0].generic.subtype == 0);
CU_ASSERT(sgl_tr->u.sgl[0].unkeyed.length == 4096);
CU_ASSERT(sgl_tr->u.sgl[0].address == 0);
CU_ASSERT(req->cmd.dptr.sgl1.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
LIST_REMOVE(sgl_tr, list);
cleanup_submit_request_test(&qpair);
nvme_free_request(req);
@ -502,12 +502,12 @@ test_hw_sgl_req(void)
sgl_tr = LIST_FIRST(&qpair.outstanding_tr);
CU_ASSERT(sgl_tr != NULL);
for (i = 0; i < NVME_MAX_SGL_DESCRIPTORS; i++) {
CU_ASSERT(sgl_tr->u.sgl[i].type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
CU_ASSERT(sgl_tr->u.sgl[i].type_specific == 0);
CU_ASSERT(sgl_tr->u.sgl[i].length == 4096);
CU_ASSERT(sgl_tr->u.sgl[i].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
CU_ASSERT(sgl_tr->u.sgl[i].generic.subtype == 0);
CU_ASSERT(sgl_tr->u.sgl[i].unkeyed.length == 4096);
CU_ASSERT(sgl_tr->u.sgl[i].address == i * 4096);
}
CU_ASSERT(req->cmd.dptr.sgl1.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
LIST_REMOVE(sgl_tr, list);
cleanup_submit_request_test(&qpair);
nvme_free_request(req);