nvme: move cntlid to struct spdk_nvme_ctrlr

All controllers have a controller ID (cntlid), and this will be needed
in other NVMe-oF transports, so move it to the generic controller
structure.

Change-Id: Iaba5b93e1267e7bef3a6eb7c677c549a3d83985c
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
Reviewed-on: https://review.gerrithub.io/416577
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Daniel Verkamp 2018-06-22 14:20:31 -07:00
parent ee9ca4b324
commit 9f5fb75d1f
3 changed files with 23 additions and 11 deletions

View File

@ -784,6 +784,23 @@ nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
SPDK_DEBUGLOG(SPDK_LOG_NVME, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
}
SPDK_DEBUGLOG(SPDK_LOG_NVME, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cdata.cntlid);
if (ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
ctrlr->cntlid = ctrlr->cdata.cntlid;
} else {
/*
* Fabrics controllers should already have CNTLID from the Connect command.
*
* If CNTLID from Connect doesn't match CNTLID in the Identify Controller data,
* trust the one from Connect.
*/
if (ctrlr->cntlid != ctrlr->cdata.cntlid) {
SPDK_DEBUGLOG(SPDK_LOG_NVME,
"Identify CNTLID 0x%04" PRIx16 " != Connect CNTLID 0x%04" PRIx16 "\n",
ctrlr->cdata.cntlid, ctrlr->cntlid);
}
}
return 0;
}

View File

@ -464,6 +464,8 @@ struct spdk_nvme_ctrlr {
uint16_t max_sges;
uint16_t cntlid;
/** Controller support flags */
uint64_t flags;

View File

@ -74,8 +74,6 @@ struct spdk_nvme_rdma_mr_map {
/* NVMe RDMA transport extensions for spdk_nvme_ctrlr */
struct nvme_rdma_ctrlr {
struct spdk_nvme_ctrlr ctrlr;
uint16_t cntlid;
};
/* NVMe RDMA qpair extensions for spdk_nvme_qpair */
@ -516,7 +514,6 @@ nvme_rdma_connect(struct nvme_rdma_qpair *rqpair)
int ret;
struct rdma_cm_event *event;
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_rdma_ctrlr *rctrlr;
ret = ibv_query_device(rqpair->cm_id->verbs, &attr);
if (ret != 0) {
@ -531,12 +528,10 @@ nvme_rdma_connect(struct nvme_rdma_qpair *rqpair)
return -1;
}
rctrlr = nvme_rdma_ctrlr(ctrlr);
request_data.qid = rqpair->qpair.id;
request_data.hrqsize = rqpair->num_entries;
request_data.hsqsize = rqpair->num_entries - 1;
request_data.cntlid = rctrlr->cntlid;
request_data.cntlid = ctrlr->cntlid;
param.private_data = &request_data;
param.private_data_len = sizeof(request_data);
@ -608,7 +603,6 @@ nvme_rdma_qpair_fabric_connect(struct nvme_rdma_qpair *rqpair)
struct spdk_nvmf_fabric_connect_cmd cmd;
struct spdk_nvmf_fabric_connect_data *nvmf_data;
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_rdma_ctrlr *rctrlr;
int rc = 0;
ctrlr = rqpair->qpair.ctrlr;
@ -616,8 +610,6 @@ nvme_rdma_qpair_fabric_connect(struct nvme_rdma_qpair *rqpair)
return -1;
}
rctrlr = nvme_rdma_ctrlr(ctrlr);
nvmf_data = spdk_dma_zmalloc(sizeof(*nvmf_data), 0, NULL);
if (!nvmf_data) {
SPDK_ERRLOG("nvmf_data allocation error\n");
@ -636,7 +628,7 @@ nvme_rdma_qpair_fabric_connect(struct nvme_rdma_qpair *rqpair)
if (nvme_qpair_is_admin_queue(&rqpair->qpair)) {
nvmf_data->cntlid = 0xFFFF;
} else {
nvmf_data->cntlid = rctrlr->cntlid;
nvmf_data->cntlid = ctrlr->cntlid;
}
SPDK_STATIC_ASSERT(sizeof(nvmf_data->hostid) == sizeof(ctrlr->opts.extended_host_id),
@ -662,7 +654,8 @@ nvme_rdma_qpair_fabric_connect(struct nvme_rdma_qpair *rqpair)
if (nvme_qpair_is_admin_queue(&rqpair->qpair)) {
rsp = (struct spdk_nvmf_fabric_connect_rsp *)&status.cpl;
rctrlr->cntlid = rsp->status_code_specific.success.cntlid;
ctrlr->cntlid = rsp->status_code_specific.success.cntlid;
SPDK_DEBUGLOG(SPDK_LOG_NVME, "CNTLID 0x%04" PRIx16 "\n", ctrlr->cntlid);
}
ret:
spdk_dma_free(nvmf_data);