nvme: Store NVMEoF ioccsz and icdoff in ctrlr structure

This allows to avoid calculation of ioccsz bytes on each request
and removes access to "cold" ctrlr structures in data path.
Add UT to check validness of calculation

Change-Id: I55ceff99eb924156155e69a20f587a4f92b83f0b
Signed-off-by: Alexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/519
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Alexey Marchuk 2020-01-31 17:30:03 +03:00 committed by Tomasz Zawadzki
parent 450d19d117
commit c3ba9127d0
5 changed files with 104 additions and 18 deletions

View File

@ -1526,6 +1526,23 @@ nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
ctrlr->opts.admin_timeout_ms);
}
static void
nvme_ctrlr_update_nvmf_ioccsz(struct spdk_nvme_ctrlr *ctrlr)
{
if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_RDMA ||
ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_TCP ||
ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_FC) {
if (ctrlr->cdata.nvmf_specific.ioccsz < 4) {
SPDK_ERRLOG("Incorrect IOCCSZ %u, the minimum value should be 4\n",
ctrlr->cdata.nvmf_specific.ioccsz);
ctrlr->cdata.nvmf_specific.ioccsz = 4;
assert(0);
}
ctrlr->ioccsz_bytes = ctrlr->cdata.nvmf_specific.ioccsz * 16 - sizeof(struct spdk_nvme_cmd);
ctrlr->icdoff = ctrlr->cdata.nvmf_specific.icdoff;
}
}
static int
nvme_ctrlr_set_num_queues(struct spdk_nvme_ctrlr *ctrlr)
{
@ -2410,6 +2427,7 @@ nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
break;
case NVME_CTRLR_STATE_SET_NUM_QUEUES:
nvme_ctrlr_update_nvmf_ioccsz(ctrlr);
rc = nvme_ctrlr_set_num_queues(ctrlr);
break;

View File

@ -663,6 +663,12 @@ struct spdk_nvme_ctrlr {
/** Controller support flags */
uint64_t flags;
/** NVMEoF in-capsule data size in bytes */
uint32_t ioccsz_bytes;
/** NVMEoF in-capsule data offset in 16 byte units */
uint16_t icdoff;
/* Cold data (not accessed in normal I/O path) is after this point. */
union spdk_nvme_cap_register cap;

View File

@ -1509,12 +1509,6 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
return 0;
}
static inline unsigned int
nvme_rdma_icdsz_bytes(struct spdk_nvme_ctrlr *ctrlr)
{
return (ctrlr->cdata.nvmf_specific.ioccsz * 16 - sizeof(struct spdk_nvme_cmd));
}
static int
nvme_rdma_req_init(struct nvme_rdma_qpair *rqpair, struct nvme_request *req,
struct spdk_nvme_rdma_req *rdma_req)
@ -1536,8 +1530,7 @@ nvme_rdma_req_init(struct nvme_rdma_qpair *rqpair, struct nvme_request *req,
*/
if (spdk_nvme_opc_get_data_transfer(req->cmd.opc) ==
SPDK_NVME_DATA_HOST_TO_CONTROLLER &&
req->payload_size <= nvme_rdma_icdsz_bytes(ctrlr) &&
(ctrlr->cdata.nvmf_specific.icdoff == 0)) {
req->payload_size <= ctrlr->ioccsz_bytes && ctrlr->icdoff == 0) {
rc = nvme_rdma_build_contig_inline_request(rqpair, rdma_req);
} else {
rc = nvme_rdma_build_contig_request(rqpair, rdma_req);
@ -1545,8 +1538,7 @@ nvme_rdma_req_init(struct nvme_rdma_qpair *rqpair, struct nvme_request *req,
} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL) {
if (spdk_nvme_opc_get_data_transfer(req->cmd.opc) ==
SPDK_NVME_DATA_HOST_TO_CONTROLLER &&
req->payload_size <= nvme_rdma_icdsz_bytes(ctrlr) &&
ctrlr->cdata.nvmf_specific.icdoff == 0) {
req->payload_size <= ctrlr->ioccsz_bytes && ctrlr->icdoff == 0) {
rc = nvme_rdma_build_sgl_inline_request(rqpair, rdma_req);
} else {
rc = nvme_rdma_build_sgl_request(rqpair, rdma_req);

View File

@ -418,12 +418,6 @@ nvme_tcp_build_sgl_request(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *t
return 0;
}
static inline uint32_t
nvme_tcp_icdsz_bytes(struct spdk_nvme_ctrlr *ctrlr)
{
return (ctrlr->cdata.nvmf_specific.ioccsz * 16 - sizeof(struct spdk_nvme_cmd));
}
static int
nvme_tcp_req_init(struct nvme_tcp_qpair *tqpair, struct nvme_request *req,
struct nvme_tcp_req *tcp_req)
@ -460,7 +454,7 @@ nvme_tcp_req_init(struct nvme_tcp_qpair *tqpair, struct nvme_request *req,
xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc);
}
if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
max_incapsule_data_size = nvme_tcp_icdsz_bytes(ctrlr);
max_incapsule_data_size = ctrlr->ioccsz_bytes;
if ((req->cmd.opc == SPDK_NVME_OPC_FABRIC) || nvme_qpair_is_admin_queue(&tqpair->qpair)) {
max_incapsule_data_size = spdk_min(max_incapsule_data_size, NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE);
}
@ -498,7 +492,6 @@ nvme_tcp_qpair_capsule_cmd_send(struct nvme_tcp_qpair *tqpair,
plen = capsule_cmd->common.hlen = sizeof(*capsule_cmd);
capsule_cmd->ccsqe = tcp_req->req->cmd;
SPDK_DEBUGLOG(SPDK_LOG_NVME, "capsule_cmd cid=%u on tqpair(%p)\n", tcp_req->req->cmd.cid, tqpair);
if (tqpair->host_hdgst_enable) {

View File

@ -1978,6 +1978,81 @@ test_spdk_nvme_ctrlr_set_trid(void)
CU_ASSERT(strncmp(ctrlr.trid.trsvcid, "4421", SPDK_NVMF_TRSVCID_MAX_LEN) == 0);
}
static void
test_nvme_ctrlr_init_set_nvmf_ioccsz(void)
{
DECLARE_AND_CONSTRUCT_CTRLR();
/* equivalent of 4096 bytes */
ctrlr.cdata.nvmf_specific.ioccsz = 260;
ctrlr.cdata.nvmf_specific.icdoff = 1;
/* Check PCI trtype, */
ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_GET_NUM_QUEUES);
CU_ASSERT(ctrlr.ioccsz_bytes == 0);
CU_ASSERT(ctrlr.icdoff == 0);
/* Check RDMA trtype, */
ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_GET_NUM_QUEUES);
CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
CU_ASSERT(ctrlr.icdoff == 1);
ctrlr.ioccsz_bytes = 0;
ctrlr.icdoff = 0;
/* Check TCP trtype, */
ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_GET_NUM_QUEUES);
CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
CU_ASSERT(ctrlr.icdoff == 1);
ctrlr.ioccsz_bytes = 0;
ctrlr.icdoff = 0;
/* Check FC trtype, */
ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_FC;
ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_GET_NUM_QUEUES);
CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
CU_ASSERT(ctrlr.icdoff == 1);
ctrlr.ioccsz_bytes = 0;
ctrlr.icdoff = 0;
/* Check CUSTOM trtype, */
ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_GET_NUM_QUEUES);
CU_ASSERT(ctrlr.ioccsz_bytes == 0);
CU_ASSERT(ctrlr.icdoff == 0);
}
int main(int argc, char **argv)
{
CU_pSuite suite = NULL;
@ -2035,6 +2110,8 @@ int main(int argc, char **argv)
|| CU_add_test(suite, "test_spdk_nvme_ctrlr_reconnect_io_qpair",
test_spdk_nvme_ctrlr_reconnect_io_qpair) == NULL
|| CU_add_test(suite, "test_spdk_nvme_ctrlr_set_trid", test_spdk_nvme_ctrlr_set_trid) == NULL
|| CU_add_test(suite, "test_nvme_ctrlr_init_set_nvmf_ioccsz",
test_nvme_ctrlr_init_set_nvmf_ioccsz) == NULL
) {
CU_cleanup_registry();
return CU_get_error();