external_code/nvme: basic controller initialization flow

Added a simple controller enablement state machine based on the CC.EN
and CSTS.RDY bits.  The admin queue registers are also filled during
this process, so it's now possible to send admin requests.

To simplify the code, there are no timeouts for a controller to
transition from a specific state to the next one or for the whole
initialization process.  This means that if a controller gets stuck, the
code will hang indefinitely too.

Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I93f5a5931d7b24780da242e601dcdf2bec5f6552
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6673
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Konrad Sztyber 2021-02-19 17:43:53 +01:00 committed by Tomasz Zawadzki
parent a2d3ea8cb3
commit 5555513653

View File

@ -178,50 +178,37 @@ nvme_ctrlr_get_cap(struct nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
get_pcie_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap), &cap->raw); get_pcie_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap), &cap->raw);
} }
void static void
nvme_ctrlr_get_cc(struct nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc);
void
nvme_ctrlr_get_cc(struct nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc) nvme_ctrlr_get_cc(struct nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
{ {
get_pcie_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc), &cc->raw); get_pcie_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc), &cc->raw);
} }
void nvme_ctrlr_get_csts(struct nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts); static void
void
nvme_ctrlr_get_csts(struct nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts) nvme_ctrlr_get_csts(struct nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
{ {
get_pcie_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts), &csts->raw); get_pcie_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts), &csts->raw);
} }
void nvme_ctrlr_set_cc(struct nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc); static void
void
nvme_ctrlr_set_cc(struct nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc) nvme_ctrlr_set_cc(struct nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc)
{ {
set_pcie_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw), cc->raw); set_pcie_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw), cc->raw);
} }
void nvme_ctrlr_set_asq(struct nvme_ctrlr *ctrlr, uint64_t value); static void
void
nvme_ctrlr_set_asq(struct nvme_ctrlr *ctrlr, uint64_t value) nvme_ctrlr_set_asq(struct nvme_ctrlr *ctrlr, uint64_t value)
{ {
set_pcie_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, asq), value); set_pcie_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, asq), value);
} }
void nvme_ctrlr_set_acq(struct nvme_ctrlr *ctrlr, uint64_t value); static void
void
nvme_ctrlr_set_acq(struct nvme_ctrlr *ctrlr, uint64_t value) nvme_ctrlr_set_acq(struct nvme_ctrlr *ctrlr, uint64_t value)
{ {
set_pcie_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, acq), value); set_pcie_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, acq), value);
} }
void nvme_ctrlr_set_aqa(struct nvme_ctrlr *ctrlr, const union spdk_nvme_aqa_register *aqa); static void
void
nvme_ctrlr_set_aqa(struct nvme_ctrlr *ctrlr, const union spdk_nvme_aqa_register *aqa) nvme_ctrlr_set_aqa(struct nvme_ctrlr *ctrlr, const union spdk_nvme_aqa_register *aqa)
{ {
set_pcie_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, aqa.raw), aqa->raw); set_pcie_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, aqa.raw), aqa->raw);
@ -362,8 +349,64 @@ pcie_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
static int static int
process_ctrlr_init(struct nvme_ctrlr *ctrlr) process_ctrlr_init(struct nvme_ctrlr *ctrlr)
{ {
/* Immediately mark the controller as ready for now */ union spdk_nvme_cc_register cc;
ctrlr->state = NVME_CTRLR_STATE_READY; union spdk_nvme_csts_register csts;
union spdk_nvme_aqa_register aqa;
if (ctrlr->state == NVME_CTRLR_STATE_READY) {
return 0;
}
nvme_ctrlr_get_cc(ctrlr, &cc);
nvme_ctrlr_get_csts(ctrlr, &csts);
switch (ctrlr->state) {
case NVME_CTRLR_STATE_INIT:
if (cc.bits.en) {
if (csts.bits.rdy == 0) {
ctrlr->state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1;
break;
}
cc.bits.en = 0;
nvme_ctrlr_set_cc(ctrlr, &cc);
}
ctrlr->state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0;
break;
case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1:
if (csts.bits.rdy) {
cc.bits.en = 0;
nvme_ctrlr_set_cc(ctrlr, &cc);
ctrlr->state = NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0;
}
break;
case NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0:
if (csts.bits.rdy == 0) {
ctrlr->state = NVME_CTRLR_STATE_ENABLE;
}
break;
case NVME_CTRLR_STATE_ENABLE:
nvme_ctrlr_set_asq(ctrlr, ctrlr->admin_qpair->sq_paddr);
nvme_ctrlr_set_acq(ctrlr, ctrlr->admin_qpair->cq_paddr);
aqa.raw = 0;
aqa.bits.asqs = ctrlr->admin_qpair->num_entries - 1;
aqa.bits.acqs = ctrlr->admin_qpair->num_entries - 1;
nvme_ctrlr_set_aqa(ctrlr, &aqa);
cc.bits.en = 1;
nvme_ctrlr_set_cc(ctrlr, &cc);
ctrlr->state = NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1;
break;
case NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1:
if (csts.bits.rdy) {
ctrlr->state = NVME_CTRLR_STATE_READY;
}
break;
default:
assert(0 && "should never get here");
return -1;
}
return 0; return 0;
} }