From 06fbf4b34bace9831549413ed15540fc4a3ff449 Mon Sep 17 00:00:00 2001 From: Ed Rodriguez Date: Fri, 23 Feb 2018 09:38:24 -0500 Subject: [PATCH] nvme: Use active namespace list for enumerating namespaces - Add support for multi page CNS 0x2 - Use CNS value 0x02 (SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) to query active namespaces - Add an API to iterate the active list Change-Id: Iea524881fa6e3610a7d85ab02a2005a92fd633df Signed-off-by: John Meneghini Reviewed-on: https://review.gerrithub.io/401957 Reviewed-by: Daniel Verkamp Tested-by: SPDK Automated Test System Reviewed-by: Jim Harris --- CHANGELOG.md | 3 + examples/nvme/identify/identify.c | 6 +- examples/nvme/perf/perf.c | 6 +- include/spdk/nvme.h | 27 ++++ lib/bdev/nvme/bdev_nvme.c | 13 +- lib/nvme/nvme_ctrlr.c | 145 +++++++++++++++++- lib/nvme/nvme_internal.h | 6 + .../lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c | 100 ++++++++++++ 8 files changed, 291 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 258d867fa..7b2ddfb66 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,9 @@ experimental pending a functional allocator to free and reallocate CMB buffers. spdk_nvme_ns_get_uuid() has been added to allow retrieval of per-namespace UUIDs when available. +Added spdk_nvme_ctrlr_get_first_active_ns()/spdk_nvme_ctrlr_get_next_active_ns() to iterate +active namespaces and spdk_nvme_ctrlr_is_active_ns() to check if a ns id is active. + ### NVMe-oF Target Namespaces may now be assigned unique identifiers via new optional "eui64" and "nguid" parameters diff --git a/examples/nvme/identify/identify.c b/examples/nvme/identify/identify.c index 297faf313..e1796294b 100644 --- a/examples/nvme/identify/identify.c +++ b/examples/nvme/identify/identify.c @@ -602,6 +602,7 @@ print_controller(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_transport struct spdk_pci_addr pci_addr; struct spdk_pci_device *pci_dev; struct spdk_pci_id pci_id; + uint32_t nsid; cap = spdk_nvme_ctrlr_get_regs_cap(ctrlr); vs = spdk_nvme_ctrlr_get_regs_vs(ctrlr); @@ -1173,8 +1174,9 @@ print_controller(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_transport printf("\n"); } - for (i = 1; i <= spdk_nvme_ctrlr_get_num_ns(ctrlr); i++) { - print_namespace(spdk_nvme_ctrlr_get_ns(ctrlr, i)); + for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr); + nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) { + print_namespace(spdk_nvme_ctrlr_get_ns(ctrlr, nsid)); } if (g_discovery_page) { diff --git a/examples/nvme/perf/perf.c b/examples/nvme/perf/perf.c index fded86e01..dd9a9014e 100644 --- a/examples/nvme/perf/perf.c +++ b/examples/nvme/perf/perf.c @@ -321,10 +321,10 @@ set_latency_tracking_feature(struct spdk_nvme_ctrlr *ctrlr, bool enable) static void register_ctrlr(struct spdk_nvme_ctrlr *ctrlr) { - int nsid, num_ns; struct spdk_nvme_ns *ns; struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry)); const struct spdk_nvme_ctrlr_data *cdata = spdk_nvme_ctrlr_get_data(ctrlr); + uint32_t nsid; if (entry == NULL) { perror("ctrlr_entry malloc"); @@ -349,8 +349,8 @@ register_ctrlr(struct spdk_nvme_ctrlr *ctrlr) set_latency_tracking_feature(ctrlr, true); } - num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr); - for (nsid = 1; nsid <= num_ns; nsid++) { + for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr); + nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) { ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid); if (ns == NULL) { continue; diff --git a/include/spdk/nvme.h b/include/spdk/nvme.h index 6b8101b77..500f96f8a 100644 --- a/include/spdk/nvme.h +++ b/include/spdk/nvme.h @@ -468,6 +468,33 @@ uint32_t spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr); */ struct spdk_pci_device *spdk_nvme_ctrlr_get_pci_device(struct spdk_nvme_ctrlr *ctrlr); +/** + * \brief Return true if nsid is an active ns for the given NVMe controller. + * + * This function is thread safe and can be called at any point while the controller is attached to + * the SPDK NVMe driver. + * + */ +bool spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid); + +/** + * \brief Return the nsid of the first active namespace, 0 if there are no active namespaces. + * + * This function is thread safe and can be called at any point while the controller is attached to + * the SPDK NVMe driver. + * + */ +uint32_t spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr); + +/** + * \brief Return a next active namespace given the previous ns id, 0 when there are no more active namespaces. + * + * This function is thread safe and can be called at any point while the controller is attached to + * the SPDK NVMe driver. + * + */ +uint32_t spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid); + /** * \brief Determine if a particular log page is supported by the given NVMe controller. * diff --git a/lib/bdev/nvme/bdev_nvme.c b/lib/bdev/nvme/bdev_nvme.c index f9bb54574..9dcca4641 100644 --- a/lib/bdev/nvme/bdev_nvme.c +++ b/lib/bdev/nvme/bdev_nvme.c @@ -1105,21 +1105,22 @@ nvme_ctrlr_create_bdevs(struct nvme_ctrlr *nvme_ctrlr) struct spdk_nvme_ns *ns; const struct spdk_nvme_ctrlr_data *cdata; const struct spdk_uuid *uuid; - int ns_id, num_ns, rc; + int rc; int bdev_created = 0; + uint32_t nsid; - num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr); cdata = spdk_nvme_ctrlr_get_data(ctrlr); - for (ns_id = 1; ns_id <= num_ns; ns_id++) { - ns = spdk_nvme_ctrlr_get_ns(ctrlr, ns_id); + for (nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr); + nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(ctrlr, nsid)) { + ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid); if (!ns) { - SPDK_DEBUGLOG(SPDK_LOG_BDEV_NVME, "Skipping invalid NS %d\n", ns_id); + SPDK_DEBUGLOG(SPDK_LOG_BDEV_NVME, "Skipping invalid NS %d\n", nsid); continue; } if (!spdk_nvme_ns_is_active(ns)) { - SPDK_DEBUGLOG(SPDK_LOG_BDEV_NVME, "Skipping inactive NS %d\n", ns_id); + SPDK_DEBUGLOG(SPDK_LOG_BDEV_NVME, "Skipping inactive NS %d\n", nsid); continue; } diff --git a/lib/nvme/nvme_ctrlr.c b/lib/nvme/nvme_ctrlr.c index 625f69fc8..a63d0faa4 100644 --- a/lib/nvme/nvme_ctrlr.c +++ b/lib/nvme/nvme_ctrlr.c @@ -795,6 +795,83 @@ nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr) return 0; } + +int +nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr) +{ + struct nvme_completion_poll_status status; + int rc; + uint32_t i; + uint32_t num_pages; + uint32_t next_nsid = 0; + uint32_t *new_ns_list = NULL; + + + /* + * The allocated size must be a multiple of sizeof(struct spdk_nvme_ns_list) + */ + num_pages = (ctrlr->num_ns * sizeof(new_ns_list[0]) - 1) / sizeof(struct spdk_nvme_ns_list) + 1; + new_ns_list = spdk_dma_zmalloc(num_pages * sizeof(struct spdk_nvme_ns_list), ctrlr->page_size, + NULL); + if (!new_ns_list) { + SPDK_ERRLOG("Failed to allocate active_ns_list!\n"); + return -ENOMEM; + } + status.done = false; + if (SPDK_NVME_VERSION(ctrlr->cdata.ver.bits.mjr, ctrlr->cdata.ver.bits.mnr, + ctrlr->cdata.ver.bits.ter) >= SPDK_NVME_VERSION(1, 1, 0)) { + /* + * Iterate through the pages and fetch each chunk of 1024 namespaces until + * there are no more active namespaces + */ + for (i = 0; i < num_pages; i++) { + status.done = false; + rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, next_nsid, + &new_ns_list[1024 * i], sizeof(struct spdk_nvme_ns_list), + nvme_completion_poll_cb, &status); + if (rc != 0) { + goto fail; + } + while (status.done == false) { + spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); + } + if (spdk_nvme_cpl_is_error(&status.cpl)) { + SPDK_ERRLOG("nvme_ctrlr_cmd_identify_active_ns_list failed!\n"); + rc = -ENXIO; + goto fail; + } + next_nsid = new_ns_list[1024 * i + 1023]; + if (next_nsid == 0) { + /* + * No more active namespaces found, no need to fetch additional chunks + */ + break; + } + } + + } else { + /* + * Controller doesn't support active ns list CNS 0x02 so dummy up + * an active ns list + */ + for (i = 0; i < ctrlr->num_ns; i++) { + new_ns_list[i] = i + 1; + } + } + + /* + * Now that that the list is properly setup, we can swap it in to the ctrlr and + * free up the previous one. + */ + spdk_dma_free(ctrlr->active_ns_list); + ctrlr->active_ns_list = new_ns_list; + + return 0; +fail: + spdk_dma_free(new_ns_list); + return rc; +} + static int nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr) { @@ -1010,6 +1087,9 @@ nvme_ctrlr_destruct_namespaces(struct spdk_nvme_ctrlr *ctrlr) spdk_dma_free(ctrlr->nsdata); ctrlr->nsdata = NULL; } + + spdk_dma_free(ctrlr->active_ns_list); + ctrlr->active_ns_list = NULL; } static int @@ -1044,6 +1124,10 @@ nvme_ctrlr_construct_namespaces(struct spdk_nvme_ctrlr *ctrlr) ctrlr->num_ns = nn; } + if (nvme_ctrlr_identify_active_ns(ctrlr)) { + goto fail; + } + for (i = 0; i < nn; i++) { struct spdk_nvme_ns *ns = &ctrlr->ns[i]; uint32_t nsid = i + 1; @@ -1786,14 +1870,67 @@ spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) return ctrlr->num_ns; } -struct spdk_nvme_ns * -spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t ns_id) +static int32_t +spdk_nvme_ctrlr_active_ns_idx(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) { - if (ns_id < 1 || ns_id > ctrlr->num_ns) { + int32_t result = -1; + + if (ctrlr->active_ns_list == NULL || nsid == 0 || nsid > ctrlr->num_ns) { + return result; + } + + int32_t lower = 0; + int32_t upper = ctrlr->num_ns - 1; + int32_t mid; + + while (lower <= upper) { + mid = lower + (upper - lower) / 2; + if (ctrlr->active_ns_list[mid] == nsid) { + result = mid; + break; + } else { + if (ctrlr->active_ns_list[mid] != 0 && ctrlr->active_ns_list[mid] < nsid) { + lower = mid + 1; + } else { + upper = mid - 1; + } + + } + } + + return result; +} + +bool +spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) +{ + return spdk_nvme_ctrlr_active_ns_idx(ctrlr, nsid) != -1; +} + +uint32_t +spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) +{ + return ctrlr->active_ns_list ? ctrlr->active_ns_list[0] : 0; +} + +uint32_t +spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t prev_nsid) +{ + int32_t nsid_idx = spdk_nvme_ctrlr_active_ns_idx(ctrlr, prev_nsid); + if (ctrlr->active_ns_list && nsid_idx >= 0 && (uint32_t)nsid_idx < ctrlr->num_ns - 1) { + return ctrlr->active_ns_list[nsid_idx + 1]; + } + return 0; +} + +struct spdk_nvme_ns * +spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) +{ + if (nsid < 1 || nsid > ctrlr->num_ns) { return NULL; } - return &ctrlr->ns[ns_id - 1]; + return &ctrlr->ns[nsid - 1]; } struct spdk_pci_device * diff --git a/lib/nvme/nvme_internal.h b/lib/nvme/nvme_internal.h index adc47bc2d..28b1a4569 100644 --- a/lib/nvme/nvme_internal.h +++ b/lib/nvme/nvme_internal.h @@ -441,6 +441,11 @@ struct spdk_nvme_ctrlr { */ struct spdk_nvme_ctrlr_data cdata; + /** + * Keep track of active namespaces + */ + uint32_t *active_ns_list; + /** * Array of Identify Namespace data. * @@ -590,6 +595,7 @@ void nvme_qpair_disable(struct spdk_nvme_qpair *qpair); int nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req); +int nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr); int nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id, struct spdk_nvme_ctrlr *ctrlr); void nvme_ns_destruct(struct spdk_nvme_ns *ns); diff --git a/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c b/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c index 215dec3cd..614c12c50 100644 --- a/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c +++ b/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c @@ -276,6 +276,23 @@ nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cnt void *payload, size_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg) { + if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) { + uint32_t count = 0; + uint32_t i = 0; + struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload; + + for (i = 1; i <= ctrlr->num_ns; i++) { + if (i <= nsid) { + continue; + } + + ns_list->ns_list[count++] = i; + if (count == SPDK_COUNTOF(ns_list->ns_list)) { + break; + } + } + + } fake_cpl_success(cb_fn, cb_arg); return 0; } @@ -457,6 +474,7 @@ test_nvme_ctrlr_init_en_1_rdy_0(void) SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1); @@ -511,6 +529,7 @@ test_nvme_ctrlr_init_en_1_rdy_1(void) SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0); @@ -563,6 +582,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void) SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; /* * Case 1: default round robin arbitration mechanism selected */ @@ -596,6 +616,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void) */ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); @@ -624,6 +645,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void) */ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); @@ -652,6 +674,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void) */ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); @@ -680,6 +703,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void) */ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); @@ -726,6 +750,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void) SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; /* * Case 1: default round robin arbitration mechanism selected */ @@ -759,6 +784,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void) */ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); @@ -789,6 +815,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void) */ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); @@ -817,6 +844,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void) */ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); @@ -845,6 +873,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void) */ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); @@ -890,6 +919,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void) SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; /* * Case 1: default round robin arbitration mechanism selected */ @@ -923,6 +953,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void) */ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); @@ -951,6 +982,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void) */ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); @@ -981,6 +1013,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void) */ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); @@ -1009,6 +1042,7 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void) */ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); @@ -1049,6 +1083,7 @@ test_nvme_ctrlr_init_en_0_rdy_0(void) SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0); @@ -1086,6 +1121,7 @@ test_nvme_ctrlr_init_en_0_rdy_1(void) SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0); ctrlr.cdata.nn = 1; + ctrlr.page_size = 0x1000; CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT); CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0); @@ -1124,6 +1160,7 @@ setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues) SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0); + ctrlr->page_size = 0x1000; ctrlr->opts.num_io_queues = num_io_queues; ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1); SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL); @@ -1608,6 +1645,67 @@ test_spdk_nvme_ctrlr_doorbell_buffer_config(void) nvme_ctrlr_free_doorbell_buffer(&ctrlr); } +static void +test_nvme_ctrlr_test_active_ns(void) +{ + uint32_t nsid, minor; + size_t ns_id_count; + struct spdk_nvme_ctrlr ctrlr = {}; + + ctrlr.page_size = 0x1000; + + for (minor = 0; minor <= 2; minor++) { + ctrlr.cdata.ver.bits.mjr = 1; + ctrlr.cdata.ver.bits.mnr = minor; + ctrlr.cdata.ver.bits.ter = 0; + ctrlr.num_ns = 1531; + nvme_ctrlr_identify_active_ns(&ctrlr); + + for (nsid = 1; nsid <= ctrlr.num_ns; nsid++) { + CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true); + } + ctrlr.num_ns = 1559; + for (; nsid <= ctrlr.num_ns; nsid++) { + CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false); + } + ctrlr.num_ns = 1531; + for (nsid = 0; nsid < ctrlr.num_ns; nsid++) { + ctrlr.active_ns_list[nsid] = 0; + } + CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0); + + ctrlr.active_ns_list[0] = 1; + CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true); + CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false); + nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr); + CU_ASSERT(nsid == 1); + + ctrlr.active_ns_list[1] = 3; + CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true); + CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false); + CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true); + nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid); + CU_ASSERT(nsid == 3); + nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid); + CU_ASSERT(nsid == 0); + + memset(ctrlr.active_ns_list, 0, ctrlr.num_ns); + for (nsid = 0; nsid < ctrlr.num_ns; nsid++) { + ctrlr.active_ns_list[nsid] = nsid + 1; + } + + ns_id_count = 0; + for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr); + nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) { + CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true); + ns_id_count++; + } + CU_ASSERT(ns_id_count == ctrlr.num_ns); + + nvme_ctrlr_destruct(&ctrlr); + } +} + int main(int argc, char **argv) { CU_pSuite suite = NULL; @@ -1656,6 +1754,8 @@ int main(int argc, char **argv) || CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_alloc_cmb", test_nvme_ctrlr_alloc_cmb) == NULL #endif + || CU_add_test(suite, "test nvme ctrlr function test_nvme_ctrlr_test_active_ns", + test_nvme_ctrlr_test_active_ns) == NULL ) { CU_cleanup_registry(); return CU_get_error();