diff --git a/examples/nvme/identify/identify.c b/examples/nvme/identify/identify.c index 15b33e84b..32b2260fa 100644 --- a/examples/nvme/identify/identify.c +++ b/examples/nvme/identify/identify.c @@ -55,7 +55,7 @@ struct feature { static struct feature features[256]; -static struct nvme_health_information_page *health_page; +static struct spdk_nvme_health_information_page *health_page; static struct spdk_nvme_intel_smart_information_page *intel_smart_page; @@ -110,11 +110,12 @@ hex_dump(const void *data, size_t size) } static void -get_feature_completion(void *cb_arg, const struct nvme_completion *cpl) +get_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) { struct feature *feature = cb_arg; int fid = feature - features; - if (nvme_completion_is_error(cpl)) { + + if (spdk_nvme_cpl_is_error(cpl)) { printf("get_feature(0x%02X) failed\n", fid); } else { feature->result = cpl->cdw0; @@ -124,9 +125,9 @@ get_feature_completion(void *cb_arg, const struct nvme_completion *cpl) } static void -get_log_page_completion(void *cb_arg, const struct nvme_completion *cpl) +get_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) { - if (nvme_completion_is_error(cpl)) { + if (spdk_nvme_cpl_is_error(cpl)) { printf("get log page failed\n"); } outstanding_commands--; @@ -135,9 +136,9 @@ get_log_page_completion(void *cb_arg, const struct nvme_completion *cpl) static int get_feature(struct nvme_controller *ctrlr, uint8_t fid) { - struct nvme_command cmd = {}; + struct spdk_nvme_cmd cmd = {}; - cmd.opc = NVME_OPC_GET_FEATURES; + cmd.opc = SPDK_NVME_OPC_GET_FEATURES; cmd.cdw10 = fid; return nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, get_feature_completion, &features[fid]); @@ -149,10 +150,10 @@ get_features(struct nvme_controller *ctrlr) size_t i; uint8_t features_to_get[] = { - NVME_FEAT_ARBITRATION, - NVME_FEAT_POWER_MANAGEMENT, - NVME_FEAT_TEMPERATURE_THRESHOLD, - NVME_FEAT_ERROR_RECOVERY, + SPDK_NVME_FEAT_ARBITRATION, + SPDK_NVME_FEAT_POWER_MANAGEMENT, + SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD, + SPDK_NVME_FEAT_ERROR_RECOVERY, }; /* Submit several GET FEATURES commands and wait for them to complete */ @@ -181,7 +182,7 @@ get_health_log_page(struct nvme_controller *ctrlr) exit(1); } - if (nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION, NVME_GLOBAL_NAMESPACE_TAG, + if (nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, SPDK_NVME_GLOBAL_NS_TAG, health_page, sizeof(*health_page), get_log_page_completion, NULL)) { printf("nvme_ctrlr_cmd_get_log_page() failed\n"); exit(1); @@ -201,7 +202,7 @@ get_intel_smart_log_page(struct nvme_controller *ctrlr) exit(1); } - if (nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_SMART, NVME_GLOBAL_NAMESPACE_TAG, + if (nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_SMART, SPDK_NVME_GLOBAL_NS_TAG, intel_smart_page, sizeof(*intel_smart_page), get_log_page_completion, NULL)) { printf("nvme_ctrlr_cmd_get_log_page() failed\n"); exit(1); @@ -222,7 +223,7 @@ get_intel_temperature_log_page(struct nvme_controller *ctrlr) exit(1); } - if (nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE, NVME_GLOBAL_NAMESPACE_TAG, + if (nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE, SPDK_NVME_GLOBAL_NS_TAG, intel_temperature_page, sizeof(*intel_temperature_page), get_log_page_completion, NULL)) { printf("nvme_ctrlr_cmd_get_log_page() failed\n"); exit(1); @@ -233,7 +234,7 @@ get_intel_temperature_log_page(struct nvme_controller *ctrlr) static void get_log_pages(struct nvme_controller *ctrlr) { - const struct nvme_controller_data *ctrlr_data; + const struct spdk_nvme_ctrlr_data *ctrlr_data; outstanding_commands = 0; if (get_health_log_page(ctrlr) == 0) { @@ -322,7 +323,7 @@ print_uint_var_dec(uint8_t *array, unsigned int len) static void print_namespace(struct nvme_namespace *ns) { - const struct nvme_namespace_data *nsdata; + const struct spdk_nvme_ns_data *nsdata; uint32_t i; uint32_t flags; @@ -365,7 +366,7 @@ print_namespace(struct nvme_namespace *ns) static void print_controller(struct nvme_controller *ctrlr, struct spdk_pci_device *pci_dev) { - const struct nvme_controller_data *cdata; + const struct spdk_nvme_ctrlr_data *cdata; uint8_t str[128]; uint32_t i; @@ -405,8 +406,8 @@ print_controller(struct nvme_controller *ctrlr, struct spdk_pci_device *pci_dev) printf("Unlimited\n"); else printf("%d\n", 4096 * (1 << cdata->mdts)); - if (features[NVME_FEAT_ERROR_RECOVERY].valid) { - unsigned tler = features[NVME_FEAT_ERROR_RECOVERY].result & 0xFFFF; + if (features[SPDK_NVME_FEAT_ERROR_RECOVERY].valid) { + unsigned tler = features[SPDK_NVME_FEAT_ERROR_RECOVERY].result & 0xFFFF; printf("Error Recovery Timeout: "); if (tler == 0) { printf("Unlimited\n"); @@ -475,8 +476,8 @@ print_controller(struct nvme_controller *ctrlr, struct spdk_pci_device *pci_dev) cdata->sgls.oversized_sgl_supported ? "Supported" : "Not Supported"); printf("\n"); - if (features[NVME_FEAT_ARBITRATION].valid) { - uint32_t arb = features[NVME_FEAT_ARBITRATION].result; + if (features[SPDK_NVME_FEAT_ARBITRATION].valid) { + uint32_t arb = features[SPDK_NVME_FEAT_ARBITRATION].result; unsigned ab, lpw, mpw, hpw; ab = arb & 0x3; @@ -498,14 +499,14 @@ print_controller(struct nvme_controller *ctrlr, struct spdk_pci_device *pci_dev) printf("\n"); } - if (features[NVME_FEAT_POWER_MANAGEMENT].valid) { - unsigned ps = features[NVME_FEAT_POWER_MANAGEMENT].result & 0x1F; + if (features[SPDK_NVME_FEAT_POWER_MANAGEMENT].valid) { + unsigned ps = features[SPDK_NVME_FEAT_POWER_MANAGEMENT].result & 0x1F; printf("Power Management\n"); printf("================\n"); printf("Number of Power States: %u\n", cdata->npss + 1); printf("Current Power State: Power State #%u\n", ps); for (i = 0; i <= cdata->npss; i++) { - const struct nvme_power_state *psd = &cdata->psd[i]; + const struct spdk_nvme_power_state *psd = &cdata->psd[i]; printf("Power State #%u: ", i); if (psd->mps) { /* MP scale is 0.0001 W */ @@ -523,7 +524,7 @@ print_controller(struct nvme_controller *ctrlr, struct spdk_pci_device *pci_dev) printf("\n"); } - if (features[NVME_FEAT_TEMPERATURE_THRESHOLD].valid && health_page) { + if (features[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD].valid && health_page) { printf("Health Information\n"); printf("==================\n"); @@ -547,8 +548,8 @@ print_controller(struct nvme_controller *ctrlr, struct spdk_pci_device *pci_dev) health_page->temperature, health_page->temperature - 273); printf("Temperature Threshold: %u Kelvin (%u Celsius)\n", - features[NVME_FEAT_TEMPERATURE_THRESHOLD].result, - features[NVME_FEAT_TEMPERATURE_THRESHOLD].result - 273); + features[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD].result, + features[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD].result - 273); printf("Available Spare: %u%%\n", health_page->available_spare); printf("Life Percentage Used: %u%%\n", health_page->percentage_used); printf("Data Units Read: "); diff --git a/examples/nvme/perf/perf.c b/examples/nvme/perf/perf.c index 38e056921..df9d796b3 100644 --- a/examples/nvme/perf/perf.c +++ b/examples/nvme/perf/perf.c @@ -148,7 +148,7 @@ static void register_ns(struct nvme_controller *ctrlr, struct nvme_namespace *ns) { struct ns_entry *entry; - const struct nvme_controller_data *cdata; + const struct spdk_nvme_ctrlr_data *cdata; cdata = nvme_ctrlr_get_data(ctrlr); @@ -182,9 +182,9 @@ register_ns(struct nvme_controller *ctrlr, struct nvme_namespace *ns) } static void -enable_latency_tracking_complete(void *cb_arg, const struct nvme_completion *cpl) +enable_latency_tracking_complete(void *cb_arg, const struct spdk_nvme_cpl *cpl) { - if (nvme_completion_is_error(cpl)) { + if (spdk_nvme_cpl_is_error(cpl)) { printf("enable_latency_tracking_complete failed\n"); } g_outstanding_commands--; @@ -220,7 +220,7 @@ register_ctrlr(struct nvme_controller *ctrlr) { int nsid, num_ns; struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry)); - const struct nvme_controller_data *cdata = nvme_ctrlr_get_data(ctrlr); + const struct spdk_nvme_ctrlr_data *cdata = nvme_ctrlr_get_data(ctrlr); if (entry == NULL) { perror("ctrlr_entry malloc"); @@ -361,7 +361,7 @@ static void task_ctor(struct rte_mempool *mp, void *arg, void *__task, unsigned } } -static void io_complete(void *ctx, const struct nvme_completion *completion); +static void io_complete(void *ctx, const struct spdk_nvme_cpl *completion); static __thread unsigned int seed = 0; @@ -444,7 +444,7 @@ task_complete(struct perf_task *task) } static void -io_complete(void *ctx, const struct nvme_completion *completion) +io_complete(void *ctx, const struct spdk_nvme_cpl *completion) { task_complete((struct perf_task *)ctx); } @@ -615,7 +615,7 @@ print_latency_statistics(const char *op_name, enum spdk_nvme_intel_log_page log_ ctrlr = g_controllers; while (ctrlr) { if (nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) { - if (nvme_ctrlr_cmd_get_log_page(ctrlr->ctrlr, log_page, NVME_GLOBAL_NAMESPACE_TAG, + if (nvme_ctrlr_cmd_get_log_page(ctrlr->ctrlr, log_page, SPDK_NVME_GLOBAL_NS_TAG, ctrlr->latency_page, sizeof(struct spdk_nvme_intel_rw_latency_page), enable_latency_tracking_complete, NULL)) { diff --git a/examples/nvme/reserve/reservation.c b/examples/nvme/reserve/reservation.c index 936457b1d..3bc4e79b6 100644 --- a/examples/nvme/reserve/reservation.c +++ b/examples/nvme/reserve/reservation.c @@ -74,11 +74,12 @@ static struct feature features[256]; #define CR_KEY 0xDEADBEAF5A5A5A5B static void -get_feature_completion(void *cb_arg, const struct nvme_completion *cpl) +get_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) { struct feature *feature = cb_arg; int fid = feature - features; - if (nvme_completion_is_error(cpl)) { + + if (spdk_nvme_cpl_is_error(cpl)) { fprintf(stdout, "get_feature(0x%02X) failed\n", fid); } else { feature->result = cpl->cdw0; @@ -88,11 +89,12 @@ get_feature_completion(void *cb_arg, const struct nvme_completion *cpl) } static void -set_feature_completion(void *cb_arg, const struct nvme_completion *cpl) +set_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) { struct feature *feature = cb_arg; int fid = feature - features; - if (nvme_completion_is_error(cpl)) { + + if (spdk_nvme_cpl_is_error(cpl)) { fprintf(stdout, "set_feature(0x%02X) failed\n", fid); set_feature_result = -1; } else { @@ -106,16 +108,16 @@ get_host_identifier(struct nvme_controller *ctrlr) { int ret; uint64_t *host_id; - struct nvme_command cmd = {}; + struct spdk_nvme_cmd cmd = {}; - cmd.opc = NVME_OPC_GET_FEATURES; - cmd.cdw10 = NVME_FEAT_HOST_IDENTIFIER; + cmd.opc = SPDK_NVME_OPC_GET_FEATURES; + cmd.cdw10 = SPDK_NVME_FEAT_HOST_IDENTIFIER; outstanding_commands = 0; host_id = rte_malloc(NULL, 8, 0); ret = nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, host_id, 8, - get_feature_completion, &features[NVME_FEAT_HOST_IDENTIFIER]); + get_feature_completion, &features[SPDK_NVME_FEAT_HOST_IDENTIFIER]); if (ret) { fprintf(stdout, "Get Feature: Failed\n"); return -1; @@ -127,7 +129,7 @@ get_host_identifier(struct nvme_controller *ctrlr) nvme_ctrlr_process_admin_completions(ctrlr); } - if (features[NVME_FEAT_HOST_IDENTIFIER].valid) { + if (features[SPDK_NVME_FEAT_HOST_IDENTIFIER].valid) { fprintf(stdout, "Get Feature: Host Identifier 0x%"PRIx64"\n", *host_id); } @@ -139,10 +141,10 @@ set_host_identifier(struct nvme_controller *ctrlr) { int ret; uint64_t *host_id; - struct nvme_command cmd = {}; + struct spdk_nvme_cmd cmd = {}; - cmd.opc = NVME_OPC_SET_FEATURES; - cmd.cdw10 = NVME_FEAT_HOST_IDENTIFIER; + cmd.opc = SPDK_NVME_OPC_SET_FEATURES; + cmd.cdw10 = SPDK_NVME_FEAT_HOST_IDENTIFIER; host_id = rte_malloc(NULL, 8, 0); *host_id = HOST_ID; @@ -152,7 +154,7 @@ set_host_identifier(struct nvme_controller *ctrlr) fprintf(stdout, "Set Feature: Host Identifier 0x%"PRIx64"\n", *host_id); ret = nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, host_id, 8, - set_feature_completion, &features[NVME_FEAT_HOST_IDENTIFIER]); + set_feature_completion, &features[SPDK_NVME_FEAT_HOST_IDENTIFIER]); if (ret) { fprintf(stdout, "Set Feature: Failed\n"); rte_free(host_id); @@ -173,9 +175,9 @@ set_host_identifier(struct nvme_controller *ctrlr) } static void -reservation_ns_completion(void *cb_arg, const struct nvme_completion *cpl) +reservation_ns_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) { - if (nvme_completion_is_error(cpl)) { + if (spdk_nvme_cpl_is_error(cpl)) { reserve_command_result = -1; } else { reserve_command_result = 0; @@ -188,12 +190,12 @@ static int reservation_ns_register(struct nvme_controller *ctrlr, uint16_t ns_id) { int ret; - struct nvme_reservation_register_data *rr_data; + struct spdk_nvme_reservation_register_data *rr_data; struct nvme_namespace *ns; ns = nvme_ctrlr_get_ns(ctrlr, ns_id); - rr_data = rte_zmalloc(NULL, sizeof(struct nvme_reservation_register_data), 0); + rr_data = rte_zmalloc(NULL, sizeof(struct spdk_nvme_reservation_register_data), 0); rr_data->crkey = CR_KEY; rr_data->nrkey = CR_KEY; @@ -201,8 +203,8 @@ reservation_ns_register(struct nvme_controller *ctrlr, uint16_t ns_id) reserve_command_result = -1; ret = nvme_ns_cmd_reservation_register(ns, rr_data, 1, - NVME_RESERVE_REGISTER_KEY, - NVME_RESERVE_PTPL_NO_CHANGES, + SPDK_NVME_RESERVE_REGISTER_KEY, + SPDK_NVME_RESERVE_PTPL_NO_CHANGES, reservation_ns_completion, NULL); if (ret) { fprintf(stderr, "Reservation Register Failed\n"); @@ -227,8 +229,8 @@ reservation_ns_report(struct nvme_controller *ctrlr, uint16_t ns_id) { int ret, i; uint8_t *payload; - struct nvme_reservation_status_data *status; - struct nvme_reservation_controller_data *cdata; + struct spdk_nvme_reservation_status_data *status; + struct spdk_nvme_reservation_ctrlr_data *cdata; struct nvme_namespace *ns; ns = nvme_ctrlr_get_ns(ctrlr, ns_id); @@ -256,14 +258,14 @@ reservation_ns_report(struct nvme_controller *ctrlr, uint16_t ns_id) return 0; } - status = (struct nvme_reservation_status_data *)payload; + status = (struct spdk_nvme_reservation_status_data *)payload; fprintf(stdout, "Reservation Generation Counter %u\n", status->generation); fprintf(stdout, "Reservation type %u\n", status->type); fprintf(stdout, "Reservation Number of Registered Controllers %u\n", status->nr_regctl); fprintf(stdout, "Reservation Persist Through Power Loss State %u\n", status->ptpl_state); for (i = 0; i < status->nr_regctl; i++) { - cdata = (struct nvme_reservation_controller_data *)(payload + sizeof(struct - nvme_reservation_status_data) * (i + 1)); + cdata = (struct spdk_nvme_reservation_ctrlr_data *)(payload + sizeof(struct + spdk_nvme_reservation_status_data) * (i + 1)); fprintf(stdout, "Controller ID %u\n", cdata->ctrlr_id); fprintf(stdout, "Controller Reservation Status %u\n", cdata->rcsts.status); fprintf(stdout, "Controller Host ID 0x%"PRIx64"\n", cdata->host_id); @@ -278,11 +280,11 @@ static int reservation_ns_acquire(struct nvme_controller *ctrlr, uint16_t ns_id) { int ret; - struct nvme_reservation_acquire_data *cdata; + struct spdk_nvme_reservation_acquire_data *cdata; struct nvme_namespace *ns; ns = nvme_ctrlr_get_ns(ctrlr, ns_id); - cdata = rte_zmalloc(NULL, sizeof(struct nvme_reservation_acquire_data), 0); + cdata = rte_zmalloc(NULL, sizeof(struct spdk_nvme_reservation_acquire_data), 0); cdata->crkey = CR_KEY; outstanding_commands = 0; @@ -290,8 +292,8 @@ reservation_ns_acquire(struct nvme_controller *ctrlr, uint16_t ns_id) ret = nvme_ns_cmd_reservation_acquire(ns, cdata, 0, - NVME_RESERVE_ACQUIRE, - NVME_RESERVE_WRITE_EXCLUSIVE, + SPDK_NVME_RESERVE_ACQUIRE, + SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, reservation_ns_completion, NULL); if (ret) { fprintf(stderr, "Reservation Acquire Failed\n"); @@ -315,11 +317,11 @@ static int reservation_ns_release(struct nvme_controller *ctrlr, uint16_t ns_id) { int ret; - struct nvme_reservation_key_data *cdata; + struct spdk_nvme_reservation_key_data *cdata; struct nvme_namespace *ns; ns = nvme_ctrlr_get_ns(ctrlr, ns_id); - cdata = rte_zmalloc(NULL, sizeof(struct nvme_reservation_key_data), 0); + cdata = rte_zmalloc(NULL, sizeof(struct spdk_nvme_reservation_key_data), 0); cdata->crkey = CR_KEY; outstanding_commands = 0; @@ -327,8 +329,8 @@ reservation_ns_release(struct nvme_controller *ctrlr, uint16_t ns_id) ret = nvme_ns_cmd_reservation_release(ns, cdata, 0, - NVME_RESERVE_RELEASE, - NVME_RESERVE_WRITE_EXCLUSIVE, + SPDK_NVME_RESERVE_RELEASE, + SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, reservation_ns_completion, NULL); if (ret) { fprintf(stderr, "Reservation Release Failed\n"); @@ -351,7 +353,7 @@ reservation_ns_release(struct nvme_controller *ctrlr, uint16_t ns_id) static void reserve_controller(struct nvme_controller *ctrlr, struct spdk_pci_device *pci_dev) { - const struct nvme_controller_data *cdata; + const struct spdk_nvme_ctrlr_data *cdata; cdata = nvme_ctrlr_get_data(ctrlr); diff --git a/include/spdk/nvme.h b/include/spdk/nvme.h index f2627cb3b..574a638b1 100644 --- a/include/spdk/nvme.h +++ b/include/spdk/nvme.h @@ -111,7 +111,7 @@ int nvme_ctrlr_reset(struct nvme_controller *ctrlr); * the SPDK NVMe driver. * */ -const struct nvme_controller_data *nvme_ctrlr_get_data(struct nvme_controller *ctrlr); +const struct spdk_nvme_ctrlr_data *nvme_ctrlr_get_data(struct nvme_controller *ctrlr); /** * \brief Get the number of namespaces for the given NVMe controller. @@ -150,7 +150,7 @@ bool nvme_ctrlr_is_feature_supported(struct nvme_controller *ctrlr, uint8_t feat * * The nvme_completion parameter contains the completion status. */ -typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *); +typedef void (*nvme_cb_fn_t)(void *, const struct spdk_nvme_cpl *); /** * Signature for callback function invoked when an asynchronous error @@ -162,7 +162,7 @@ typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *); * asynchronous event request that was completed. */ typedef void (*nvme_aer_cb_fn_t)(void *aer_cb_arg, - const struct nvme_completion *); + const struct spdk_nvme_cpl *); void nvme_ctrlr_register_aer_callback(struct nvme_controller *ctrlr, nvme_aer_cb_fn_t aer_cb_fn, @@ -183,7 +183,7 @@ void nvme_ctrlr_register_aer_callback(struct nvme_controller *ctrlr, * */ int nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr, - struct nvme_command *cmd, + struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, nvme_cb_fn_t cb_fn, void *cb_arg); @@ -223,7 +223,7 @@ int32_t nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr, uint32_ * of commands submitted through this function. */ int nvme_ctrlr_cmd_admin_raw(struct nvme_controller *ctrlr, - struct nvme_command *cmd, + struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, nvme_cb_fn_t cb_fn, void *cb_arg); @@ -341,7 +341,7 @@ int nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, * This function is thread safe and can be called at any point while the controller is attached to * the SPDK NVMe driver. */ -const struct nvme_namespace_data *nvme_ns_get_data(struct nvme_namespace *ns); +const struct spdk_nvme_ns_data *nvme_ns_get_data(struct nvme_namespace *ns); /** * \brief Get the namespace id (index number) from the given namespace handle. @@ -588,10 +588,10 @@ int nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, * nvme_register_io_thread(). */ int nvme_ns_cmd_reservation_register(struct nvme_namespace *ns, - struct nvme_reservation_register_data *payload, + struct spdk_nvme_reservation_register_data *payload, bool ignore_key, - enum nvme_reservation_register_action action, - enum nvme_reservation_register_cptpl cptpl, + enum spdk_nvme_reservation_register_action action, + enum spdk_nvme_reservation_register_cptpl cptpl, nvme_cb_fn_t cb_fn, void *cb_arg); /** @@ -612,10 +612,10 @@ int nvme_ns_cmd_reservation_register(struct nvme_namespace *ns, * nvme_register_io_thread(). */ int nvme_ns_cmd_reservation_release(struct nvme_namespace *ns, - struct nvme_reservation_key_data *payload, + struct spdk_nvme_reservation_key_data *payload, bool ignore_key, - enum nvme_reservation_release_action action, - enum nvme_reservation_type type, + enum spdk_nvme_reservation_release_action action, + enum spdk_nvme_reservation_type type, nvme_cb_fn_t cb_fn, void *cb_arg); /** @@ -636,10 +636,10 @@ int nvme_ns_cmd_reservation_release(struct nvme_namespace *ns, * nvme_register_io_thread(). */ int nvme_ns_cmd_reservation_acquire(struct nvme_namespace *ns, - struct nvme_reservation_acquire_data *payload, + struct spdk_nvme_reservation_acquire_data *payload, bool ignore_key, - enum nvme_reservation_acquire_action action, - enum nvme_reservation_type type, + enum spdk_nvme_reservation_acquire_action action, + enum spdk_nvme_reservation_type type, nvme_cb_fn_t cb_fn, void *cb_arg); /** diff --git a/include/spdk/nvme_spec.h b/include/spdk/nvme_spec.h index 1486f0bd8..ab8a90c3d 100644 --- a/include/spdk/nvme_spec.h +++ b/include/spdk/nvme_spec.h @@ -48,17 +48,17 @@ * Use to mark a command to apply to all namespaces, or to retrieve global * log pages. */ -#define NVME_GLOBAL_NAMESPACE_TAG ((uint32_t)0xFFFFFFFF) +#define SPDK_NVME_GLOBAL_NS_TAG ((uint32_t)0xFFFFFFFF) -#define NVME_MAX_IO_QUEUES (1 << 16) +#define SPDK_NVME_MAX_IO_QUEUES (1 << 16) /** * Indicates the maximum number of range sets that may be specified * in the dataset mangement command. */ -#define NVME_DATASET_MANAGEMENT_MAX_RANGES 256 +#define SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES 256 -union nvme_cap_lo_register { +union spdk_nvme_cap_lo_register { uint32_t raw; struct { /** maximum queue entries supported */ @@ -76,9 +76,9 @@ union nvme_cap_lo_register { uint32_t to : 8; } bits; }; -SPDK_STATIC_ASSERT(sizeof(union nvme_cap_lo_register) == 4, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(union spdk_nvme_cap_lo_register) == 4, "Incorrect size"); -union nvme_cap_hi_register { +union spdk_nvme_cap_hi_register { uint32_t raw; struct { /** doorbell stride */ @@ -101,9 +101,9 @@ union nvme_cap_hi_register { uint32_t reserved1 : 8; } bits; }; -SPDK_STATIC_ASSERT(sizeof(union nvme_cap_hi_register) == 4, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(union spdk_nvme_cap_hi_register) == 4, "Incorrect size"); -union nvme_cc_register { +union spdk_nvme_cc_register { uint32_t raw; struct { /** enable */ @@ -132,14 +132,14 @@ union nvme_cc_register { uint32_t reserved2 : 8; } bits; }; -SPDK_STATIC_ASSERT(sizeof(union nvme_cc_register) == 4, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(union spdk_nvme_cc_register) == 4, "Incorrect size"); -enum nvme_shn_value { - NVME_SHN_NORMAL = 0x1, - NVME_SHN_ABRUPT = 0x2, +enum spdk_nvme_shn_value { + SPDK_NVME_SHN_NORMAL = 0x1, + SPDK_NVME_SHN_ABRUPT = 0x2, }; -union nvme_csts_register { +union spdk_nvme_csts_register { uint32_t raw; struct { /** ready */ @@ -154,15 +154,15 @@ union nvme_csts_register { uint32_t reserved1 : 28; } bits; }; -SPDK_STATIC_ASSERT(sizeof(union nvme_csts_register) == 4, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(union spdk_nvme_csts_register) == 4, "Incorrect size"); -enum nvme_shst_value { - NVME_SHST_NORMAL = 0x0, - NVME_SHST_OCCURRING = 0x1, - NVME_SHST_COMPLETE = 0x2, +enum spdk_nvme_shst_value { + SPDK_NVME_SHST_NORMAL = 0x0, + SPDK_NVME_SHST_OCCURRING = 0x1, + SPDK_NVME_SHST_COMPLETE = 0x2, }; -union nvme_aqa_register { +union spdk_nvme_aqa_register { uint32_t raw; struct { /** admin submission queue size */ @@ -176,26 +176,26 @@ union nvme_aqa_register { uint32_t reserved2 : 4; } bits; }; -SPDK_STATIC_ASSERT(sizeof(union nvme_aqa_register) == 4, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(union spdk_nvme_aqa_register) == 4, "Incorrect size"); -struct nvme_registers { +struct spdk_nvme_registers { /** controller capabilities */ - union nvme_cap_lo_register cap_lo; - union nvme_cap_hi_register cap_hi; + union spdk_nvme_cap_lo_register cap_lo; + union spdk_nvme_cap_hi_register cap_hi; uint32_t vs; /* version */ uint32_t intms; /* interrupt mask set */ uint32_t intmc; /* interrupt mask clear */ /** controller configuration */ - union nvme_cc_register cc; + union spdk_nvme_cc_register cc; uint32_t reserved1; uint32_t csts; /* controller status */ uint32_t nssr; /* NVM subsystem reset */ /** admin queue attributes */ - union nvme_aqa_register aqa; + union spdk_nvme_aqa_register aqa; uint64_t asq; /* admin submission queue base addr */ uint64_t acq; /* admin completion queue base addr */ @@ -208,27 +208,30 @@ struct nvme_registers { }; /* NVMe controller register space offsets */ -SPDK_STATIC_ASSERT(0x00 == offsetof(struct nvme_registers, cap_lo), "Incorrect register offset"); -SPDK_STATIC_ASSERT(0x08 == offsetof(struct nvme_registers, vs), "Incorrect register offset"); -SPDK_STATIC_ASSERT(0x0C == offsetof(struct nvme_registers, intms), "Incorrect register offset"); -SPDK_STATIC_ASSERT(0x10 == offsetof(struct nvme_registers, intmc), "Incorrect register offset"); -SPDK_STATIC_ASSERT(0x14 == offsetof(struct nvme_registers, cc), "Incorrect register offset"); -SPDK_STATIC_ASSERT(0x1C == offsetof(struct nvme_registers, csts), "Incorrect register offset"); -SPDK_STATIC_ASSERT(0x20 == offsetof(struct nvme_registers, nssr), "Incorrect register offset"); -SPDK_STATIC_ASSERT(0x24 == offsetof(struct nvme_registers, aqa), "Incorrect register offset"); -SPDK_STATIC_ASSERT(0x28 == offsetof(struct nvme_registers, asq), "Incorrect register offset"); -SPDK_STATIC_ASSERT(0x30 == offsetof(struct nvme_registers, acq), "Incorrect register offset"); +SPDK_STATIC_ASSERT(0x00 == offsetof(struct spdk_nvme_registers, cap_lo), + "Incorrect register offset"); +SPDK_STATIC_ASSERT(0x08 == offsetof(struct spdk_nvme_registers, vs), "Incorrect register offset"); +SPDK_STATIC_ASSERT(0x0C == offsetof(struct spdk_nvme_registers, intms), + "Incorrect register offset"); +SPDK_STATIC_ASSERT(0x10 == offsetof(struct spdk_nvme_registers, intmc), + "Incorrect register offset"); +SPDK_STATIC_ASSERT(0x14 == offsetof(struct spdk_nvme_registers, cc), "Incorrect register offset"); +SPDK_STATIC_ASSERT(0x1C == offsetof(struct spdk_nvme_registers, csts), "Incorrect register offset"); +SPDK_STATIC_ASSERT(0x20 == offsetof(struct spdk_nvme_registers, nssr), "Incorrect register offset"); +SPDK_STATIC_ASSERT(0x24 == offsetof(struct spdk_nvme_registers, aqa), "Incorrect register offset"); +SPDK_STATIC_ASSERT(0x28 == offsetof(struct spdk_nvme_registers, asq), "Incorrect register offset"); +SPDK_STATIC_ASSERT(0x30 == offsetof(struct spdk_nvme_registers, acq), "Incorrect register offset"); -enum nvme_sgl_descriptor_type { - NVME_SGL_TYPE_DATA_BLOCK = 0x0, - NVME_SGL_TYPE_BIT_BUCKET = 0x1, - NVME_SGL_TYPE_SEGMENT = 0x2, - NVME_SGL_TYPE_LAST_SEGMENT = 0x3, +enum spdk_nvme_sgl_descriptor_type { + SPDK_NVME_SGL_TYPE_DATA_BLOCK = 0x0, + SPDK_NVME_SGL_TYPE_BIT_BUCKET = 0x1, + SPDK_NVME_SGL_TYPE_SEGMENT = 0x2, + SPDK_NVME_SGL_TYPE_LAST_SEGMENT = 0x3, /* 0x4 - 0xe reserved */ - NVME_SGL_TYPE_VENDOR_SPECIFIC = 0xf + SPDK_NVME_SGL_TYPE_VENDOR_SPECIFIC = 0xf }; -struct __attribute__((packed)) nvme_sgl_descriptor { +struct __attribute__((packed)) spdk_nvme_sgl_descriptor { uint64_t address; uint32_t length; uint8_t reserved[3]; @@ -239,16 +242,16 @@ struct __attribute__((packed)) nvme_sgl_descriptor { /** SGL descriptor type specific */ uint8_t type_specific : 4; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_sgl_descriptor) == 16, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_sgl_descriptor) == 16, "Incorrect size"); -enum nvme_psdt_value { - NVME_PSDT_PRP = 0x0, - NVME_PSDT_SGL_MPTR_CONTIG = 0x1, - NVME_PSDT_SGL_MPTR_SGL = 0x2, - NVME_PSDT_RESERVED = 0x3 +enum spdk_nvme_psdt_value { + SPDK_NVME_PSDT_PRP = 0x0, + SPDK_NVME_PSDT_SGL_MPTR_CONTIG = 0x1, + SPDK_NVME_PSDT_SGL_MPTR_SGL = 0x2, + SPDK_NVME_PSDT_RESERVED = 0x3 }; -struct nvme_command { +struct spdk_nvme_cmd { /* dword 0 */ uint16_t opc : 8; /* opcode */ uint16_t fuse : 2; /* fused operation */ @@ -273,7 +276,7 @@ struct nvme_command { uint64_t prp2; /* prp entry 2 */ } prp; - struct nvme_sgl_descriptor sgl1; + struct spdk_nvme_sgl_descriptor sgl1; } dptr; /* dword 10-15 */ @@ -284,9 +287,9 @@ struct nvme_command { uint32_t cdw14; /* command-specific */ uint32_t cdw15; /* command-specific */ }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_command) == 64, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_cmd) == 64, "Incorrect size"); -struct nvme_status { +struct spdk_nvme_status { uint16_t p : 1; /* phase tag */ uint16_t sc : 8; /* status code */ uint16_t sct : 3; /* status code type */ @@ -294,9 +297,12 @@ struct nvme_status { uint16_t m : 1; /* more */ uint16_t dnr : 1; /* do not retry */ }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_status) == 2, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_status) == 2, "Incorrect size"); -struct nvme_completion { +/** + * Completion queue entry + */ +struct spdk_nvme_cpl { /* dword 0 */ uint32_t cdw0; /* command-specific */ @@ -309,155 +315,170 @@ struct nvme_completion { /* dword 3 */ uint16_t cid; /* command identifier */ - struct nvme_status status; + struct spdk_nvme_status status; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_completion) == 16, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_cpl) == 16, "Incorrect size"); -struct nvme_dsm_range { +/** + * Dataset Management range + */ +struct spdk_nvme_dsm_range { uint32_t attributes; uint32_t length; uint64_t starting_lba; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_dsm_range) == 16, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_dsm_range) == 16, "Incorrect size"); -/* status code types */ -enum nvme_status_code_type { - NVME_SCT_GENERIC = 0x0, - NVME_SCT_COMMAND_SPECIFIC = 0x1, - NVME_SCT_MEDIA_ERROR = 0x2, +/** + * Status code types + */ +enum spdk_nvme_status_code_type { + SPDK_NVME_SCT_GENERIC = 0x0, + SPDK_NVME_SCT_COMMAND_SPECIFIC = 0x1, + SPDK_NVME_SCT_MEDIA_ERROR = 0x2, /* 0x3-0x6 - reserved */ - NVME_SCT_VENDOR_SPECIFIC = 0x7, + SPDK_NVME_SCT_VENDOR_SPECIFIC = 0x7, }; -/* generic command status codes */ -enum nvme_generic_command_status_code { - NVME_SC_SUCCESS = 0x00, - NVME_SC_INVALID_OPCODE = 0x01, - NVME_SC_INVALID_FIELD = 0x02, - NVME_SC_COMMAND_ID_CONFLICT = 0x03, - NVME_SC_DATA_TRANSFER_ERROR = 0x04, - NVME_SC_ABORTED_POWER_LOSS = 0x05, - NVME_SC_INTERNAL_DEVICE_ERROR = 0x06, - NVME_SC_ABORTED_BY_REQUEST = 0x07, - NVME_SC_ABORTED_SQ_DELETION = 0x08, - NVME_SC_ABORTED_FAILED_FUSED = 0x09, - NVME_SC_ABORTED_MISSING_FUSED = 0x0a, - NVME_SC_INVALID_NAMESPACE_OR_FORMAT = 0x0b, - NVME_SC_COMMAND_SEQUENCE_ERROR = 0x0c, +/** + * Generic command status codes + */ +enum spdk_nvme_generic_command_status_code { + SPDK_NVME_SC_SUCCESS = 0x00, + SPDK_NVME_SC_INVALID_OPCODE = 0x01, + SPDK_NVME_SC_INVALID_FIELD = 0x02, + SPDK_NVME_SC_COMMAND_ID_CONFLICT = 0x03, + SPDK_NVME_SC_DATA_TRANSFER_ERROR = 0x04, + SPDK_NVME_SC_ABORTED_POWER_LOSS = 0x05, + SPDK_NVME_SC_INTERNAL_DEVICE_ERROR = 0x06, + SPDK_NVME_SC_ABORTED_BY_REQUEST = 0x07, + SPDK_NVME_SC_ABORTED_SQ_DELETION = 0x08, + SPDK_NVME_SC_ABORTED_FAILED_FUSED = 0x09, + SPDK_NVME_SC_ABORTED_MISSING_FUSED = 0x0a, + SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT = 0x0b, + SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR = 0x0c, - NVME_SC_LBA_OUT_OF_RANGE = 0x80, - NVME_SC_CAPACITY_EXCEEDED = 0x81, - NVME_SC_NAMESPACE_NOT_READY = 0x82, + SPDK_NVME_SC_LBA_OUT_OF_RANGE = 0x80, + SPDK_NVME_SC_CAPACITY_EXCEEDED = 0x81, + SPDK_NVME_SC_NAMESPACE_NOT_READY = 0x82, }; -/* command specific status codes */ -enum nvme_command_specific_status_code { - NVME_SC_COMPLETION_QUEUE_INVALID = 0x00, - NVME_SC_INVALID_QUEUE_IDENTIFIER = 0x01, - NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED = 0x02, - NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED = 0x03, +/** + * Command specific status codes + */ +enum spdk_nvme_command_specific_status_code { + SPDK_NVME_SC_COMPLETION_QUEUE_INVALID = 0x00, + SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER = 0x01, + SPDK_NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED = 0x02, + SPDK_NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED = 0x03, /* 0x04 - reserved */ - NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05, - NVME_SC_INVALID_FIRMWARE_SLOT = 0x06, - NVME_SC_INVALID_FIRMWARE_IMAGE = 0x07, - NVME_SC_INVALID_INTERRUPT_VECTOR = 0x08, - NVME_SC_INVALID_LOG_PAGE = 0x09, - NVME_SC_INVALID_FORMAT = 0x0a, - NVME_SC_FIRMWARE_REQUIRES_RESET = 0x0b, + SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05, + SPDK_NVME_SC_INVALID_FIRMWARE_SLOT = 0x06, + SPDK_NVME_SC_INVALID_FIRMWARE_IMAGE = 0x07, + SPDK_NVME_SC_INVALID_INTERRUPT_VECTOR = 0x08, + SPDK_NVME_SC_INVALID_LOG_PAGE = 0x09, + SPDK_NVME_SC_INVALID_FORMAT = 0x0a, + SPDK_NVME_SC_FIRMWARE_REQUIRES_RESET = 0x0b, - NVME_SC_CONFLICTING_ATTRIBUTES = 0x80, - NVME_SC_INVALID_PROTECTION_INFO = 0x81, - NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE = 0x82, + SPDK_NVME_SC_CONFLICTING_ATTRIBUTES = 0x80, + SPDK_NVME_SC_INVALID_PROTECTION_INFO = 0x81, + SPDK_NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE = 0x82, }; -/* media error status codes */ -enum nvme_media_error_status_code { - NVME_SC_WRITE_FAULTS = 0x80, - NVME_SC_UNRECOVERED_READ_ERROR = 0x81, - NVME_SC_GUARD_CHECK_ERROR = 0x82, - NVME_SC_APPLICATION_TAG_CHECK_ERROR = 0x83, - NVME_SC_REFERENCE_TAG_CHECK_ERROR = 0x84, - NVME_SC_COMPARE_FAILURE = 0x85, - NVME_SC_ACCESS_DENIED = 0x86, +/** + * Media error status codes + */ +enum spdk_nvme_media_error_status_code { + SPDK_NVME_SC_WRITE_FAULTS = 0x80, + SPDK_NVME_SC_UNRECOVERED_READ_ERROR = 0x81, + SPDK_NVME_SC_GUARD_CHECK_ERROR = 0x82, + SPDK_NVME_SC_APPLICATION_TAG_CHECK_ERROR = 0x83, + SPDK_NVME_SC_REFERENCE_TAG_CHECK_ERROR = 0x84, + SPDK_NVME_SC_COMPARE_FAILURE = 0x85, + SPDK_NVME_SC_ACCESS_DENIED = 0x86, }; -/* admin opcodes */ -enum nvme_admin_opcode { - NVME_OPC_DELETE_IO_SQ = 0x00, - NVME_OPC_CREATE_IO_SQ = 0x01, - NVME_OPC_GET_LOG_PAGE = 0x02, +/** + * Admin opcodes + */ +enum spdk_nvme_admin_opcode { + SPDK_NVME_OPC_DELETE_IO_SQ = 0x00, + SPDK_NVME_OPC_CREATE_IO_SQ = 0x01, + SPDK_NVME_OPC_GET_LOG_PAGE = 0x02, /* 0x03 - reserved */ - NVME_OPC_DELETE_IO_CQ = 0x04, - NVME_OPC_CREATE_IO_CQ = 0x05, - NVME_OPC_IDENTIFY = 0x06, + SPDK_NVME_OPC_DELETE_IO_CQ = 0x04, + SPDK_NVME_OPC_CREATE_IO_CQ = 0x05, + SPDK_NVME_OPC_IDENTIFY = 0x06, /* 0x07 - reserved */ - NVME_OPC_ABORT = 0x08, - NVME_OPC_SET_FEATURES = 0x09, - NVME_OPC_GET_FEATURES = 0x0a, + SPDK_NVME_OPC_ABORT = 0x08, + SPDK_NVME_OPC_SET_FEATURES = 0x09, + SPDK_NVME_OPC_GET_FEATURES = 0x0a, /* 0x0b - reserved */ - NVME_OPC_ASYNC_EVENT_REQUEST = 0x0c, - NVME_OPC_NAMESPACE_MANAGEMENT = 0x0d, + SPDK_NVME_OPC_ASYNC_EVENT_REQUEST = 0x0c, + SPDK_NVME_OPC_NS_MANAGEMENT = 0x0d, /* 0x0e-0x0f - reserved */ - NVME_OPC_FIRMWARE_COMMIT = 0x10, - NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD = 0x11, + SPDK_NVME_OPC_FIRMWARE_COMMIT = 0x10, + SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD = 0x11, - NVME_OPC_NAMESPACE_ATTACHMENT = 0x15, + SPDK_NVME_OPC_NS_ATTACHMENT = 0x15, - NVME_OPC_FORMAT_NVM = 0x80, - NVME_OPC_SECURITY_SEND = 0x81, - NVME_OPC_SECURITY_RECEIVE = 0x82, + SPDK_NVME_OPC_FORMAT_NVM = 0x80, + SPDK_NVME_OPC_SECURITY_SEND = 0x81, + SPDK_NVME_OPC_SECURITY_RECEIVE = 0x82, }; -/* nvme nvm opcodes */ -enum nvme_nvm_opcode { - NVME_OPC_FLUSH = 0x00, - NVME_OPC_WRITE = 0x01, - NVME_OPC_READ = 0x02, +/** + * NVM command set opcodes + */ +enum spdk_nvme_nvm_opcode { + SPDK_NVME_OPC_FLUSH = 0x00, + SPDK_NVME_OPC_WRITE = 0x01, + SPDK_NVME_OPC_READ = 0x02, /* 0x03 - reserved */ - NVME_OPC_WRITE_UNCORRECTABLE = 0x04, - NVME_OPC_COMPARE = 0x05, + SPDK_NVME_OPC_WRITE_UNCORRECTABLE = 0x04, + SPDK_NVME_OPC_COMPARE = 0x05, /* 0x06-0x07 - reserved */ - NVME_OPC_WRITE_ZEROES = 0x08, - NVME_OPC_DATASET_MANAGEMENT = 0x09, + SPDK_NVME_OPC_WRITE_ZEROES = 0x08, + SPDK_NVME_OPC_DATASET_MANAGEMENT = 0x09, - NVME_OPC_RESERVATION_REGISTER = 0x0d, - NVME_OPC_RESERVATION_REPORT = 0x0e, + SPDK_NVME_OPC_RESERVATION_REGISTER = 0x0d, + SPDK_NVME_OPC_RESERVATION_REPORT = 0x0e, - NVME_OPC_RESERVATION_ACQUIRE = 0x11, - NVME_OPC_RESERVATION_RELEASE = 0x15, + SPDK_NVME_OPC_RESERVATION_ACQUIRE = 0x11, + SPDK_NVME_OPC_RESERVATION_RELEASE = 0x15, }; -enum nvme_feature { +enum spdk_nvme_feat { /* 0x00 - reserved */ - NVME_FEAT_ARBITRATION = 0x01, - NVME_FEAT_POWER_MANAGEMENT = 0x02, - NVME_FEAT_LBA_RANGE_TYPE = 0x03, - NVME_FEAT_TEMPERATURE_THRESHOLD = 0x04, - NVME_FEAT_ERROR_RECOVERY = 0x05, - NVME_FEAT_VOLATILE_WRITE_CACHE = 0x06, - NVME_FEAT_NUMBER_OF_QUEUES = 0x07, - NVME_FEAT_INTERRUPT_COALESCING = 0x08, - NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09, - NVME_FEAT_WRITE_ATOMICITY = 0x0A, - NVME_FEAT_ASYNC_EVENT_CONFIGURATION = 0x0B, - NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION = 0x0C, - NVME_FEAT_HOST_MEM_BUFFER = 0x0D, + SPDK_NVME_FEAT_ARBITRATION = 0x01, + SPDK_NVME_FEAT_POWER_MANAGEMENT = 0x02, + SPDK_NVME_FEAT_LBA_RANGE_TYPE = 0x03, + SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD = 0x04, + SPDK_NVME_FEAT_ERROR_RECOVERY = 0x05, + SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE = 0x06, + SPDK_NVME_FEAT_NUMBER_OF_QUEUES = 0x07, + SPDK_NVME_FEAT_INTERRUPT_COALESCING = 0x08, + SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09, + SPDK_NVME_FEAT_WRITE_ATOMICITY = 0x0A, + SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION = 0x0B, + SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION = 0x0C, + SPDK_NVME_FEAT_HOST_MEM_BUFFER = 0x0D, /* 0x0C-0x7F - reserved */ - NVME_FEAT_SOFTWARE_PROGRESS_MARKER = 0x80, + SPDK_NVME_FEAT_SOFTWARE_PROGRESS_MARKER = 0x80, /* 0x81-0xBF - command set specific */ - NVME_FEAT_HOST_IDENTIFIER = 0x81, - NVME_FEAT_HOST_RESERVE_MASK = 0x82, - NVME_FEAT_HOST_RESERVE_PERSIST = 0x83, + SPDK_NVME_FEAT_HOST_IDENTIFIER = 0x81, + SPDK_NVME_FEAT_HOST_RESERVE_MASK = 0x82, + SPDK_NVME_FEAT_HOST_RESERVE_PERSIST = 0x83, /* 0xC0-0xFF - vendor specific */ }; -enum nvme_dsm_attribute { - NVME_DSM_ATTR_INTEGRAL_READ = 0x1, - NVME_DSM_ATTR_INTEGRAL_WRITE = 0x2, - NVME_DSM_ATTR_DEALLOCATE = 0x4, +enum spdk_nvme_dsm_attribute { + SPDK_NVME_DSM_ATTR_INTEGRAL_READ = 0x1, + SPDK_NVME_DSM_ATTR_INTEGRAL_WRITE = 0x2, + SPDK_NVME_DSM_ATTR_DEALLOCATE = 0x4, }; -struct nvme_power_state { +struct spdk_nvme_power_state { uint16_t mp; /* bits 15:00: maximum power */ uint8_t reserved1; @@ -483,9 +504,9 @@ struct nvme_power_state { uint8_t reserved7[16]; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_power_state) == 32, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_power_state) == 32, "Incorrect size"); -struct __attribute__((packed)) nvme_controller_data { +struct __attribute__((packed)) spdk_nvme_ctrlr_data { /* bytes 0-255: controller capabilities and features */ /** pci vendor id */ @@ -704,14 +725,14 @@ struct __attribute__((packed)) nvme_controller_data { uint8_t reserved5[1344]; /* bytes 2048-3071: power state descriptors */ - struct nvme_power_state psd[32]; + struct spdk_nvme_power_state psd[32]; /* bytes 3072-4095: vendor specific */ uint8_t vs[1024]; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_controller_data) == 4096, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_ctrlr_data) == 4096, "Incorrect size"); -struct nvme_namespace_data { +struct spdk_nvme_ns_data { /** namespace size */ uint64_t nsze; @@ -868,53 +889,53 @@ struct nvme_namespace_data { uint8_t vendor_specific[3712]; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_namespace_data) == 4096, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_ns_data) == 4096, "Incorrect size"); /** * Reservation Type Encoding */ -enum nvme_reservation_type { +enum spdk_nvme_reservation_type { /* 0x00 - reserved */ /* Write Exclusive Reservation */ - NVME_RESERVE_WRITE_EXCLUSIVE = 0x1, + SPDK_NVME_RESERVE_WRITE_EXCLUSIVE = 0x1, /* Exclusive Access Reservation */ - NVME_RESERVE_EXCLUSIVE_ACCESS = 0x2, + SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS = 0x2, /* Write Exclusive - Registrants Only Reservation */ - NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY = 0x3, + SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY = 0x3, /* Exclusive Access - Registrants Only Reservation */ - NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY = 0x4, + SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY = 0x4, /* Write Exclusive - All Registrants Reservation */ - NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS = 0x5, + SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS = 0x5, /* Exclusive Access - All Registrants Reservation */ - NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS = 0x6, + SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS = 0x6, /* 0x7-0xFF - Reserved */ }; -struct nvme_reservation_acquire_data { +struct spdk_nvme_reservation_acquire_data { /** current reservation key */ uint64_t crkey; /** preempt reservation key */ uint64_t prkey; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_reservation_acquire_data) == 16, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_reservation_acquire_data) == 16, "Incorrect size"); /** * Reservation Acquire action */ -enum nvme_reservation_acquire_action { - NVME_RESERVE_ACQUIRE = 0x0, - NVME_RESERVE_PREEMPT = 0x1, - NVME_RESERVE_PREEMPT_ABORT = 0x2, +enum spdk_nvme_reservation_acquire_action { + SPDK_NVME_RESERVE_ACQUIRE = 0x0, + SPDK_NVME_RESERVE_PREEMPT = 0x1, + SPDK_NVME_RESERVE_PREEMPT_ABORT = 0x2, }; -struct __attribute__((packed)) nvme_reservation_status_data { +struct __attribute__((packed)) spdk_nvme_reservation_status_data { /** reservation action generation counter */ uint32_t generation; /** reservation type */ @@ -926,9 +947,9 @@ struct __attribute__((packed)) nvme_reservation_status_data { uint8_t ptpl_state; uint8_t reserved[14]; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_reservation_status_data) == 24, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_reservation_status_data) == 24, "Incorrect size"); -struct __attribute__((packed)) nvme_reservation_controller_data { +struct __attribute__((packed)) spdk_nvme_reservation_ctrlr_data { uint16_t ctrlr_id; /** reservation status */ struct { @@ -941,74 +962,74 @@ struct __attribute__((packed)) nvme_reservation_controller_data { /** reservation key */ uint64_t key; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_reservation_controller_data) == 24, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_reservation_ctrlr_data) == 24, "Incorrect size"); /** * Change persist through power loss state for * Reservation Register command */ -enum nvme_reservation_register_cptpl { - NVME_RESERVE_PTPL_NO_CHANGES = 0x0, - NVME_RESERVE_PTPL_CLEAR_POWER_ON = 0x2, - NVME_RESERVE_PTPL_PERSIST_POWER_LOSS = 0x3, +enum spdk_nvme_reservation_register_cptpl { + SPDK_NVME_RESERVE_PTPL_NO_CHANGES = 0x0, + SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON = 0x2, + SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS = 0x3, }; /** * Registration action for Reservation Register command */ -enum nvme_reservation_register_action { - NVME_RESERVE_REGISTER_KEY = 0x0, - NVME_RESERVE_UNREGISTER_KEY = 0x1, - NVME_RESERVE_REPLACE_KEY = 0x2, +enum spdk_nvme_reservation_register_action { + SPDK_NVME_RESERVE_REGISTER_KEY = 0x0, + SPDK_NVME_RESERVE_UNREGISTER_KEY = 0x1, + SPDK_NVME_RESERVE_REPLACE_KEY = 0x2, }; -struct nvme_reservation_register_data { +struct spdk_nvme_reservation_register_data { /** current reservation key */ uint64_t crkey; /** new reservation key */ uint64_t nrkey; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_reservation_register_data) == 16, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_reservation_register_data) == 16, "Incorrect size"); -struct nvme_reservation_key_data { +struct spdk_nvme_reservation_key_data { /** current reservation key */ uint64_t crkey; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_reservation_key_data) == 8, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_reservation_key_data) == 8, "Incorrect size"); /** * Reservation Release action */ -enum nvme_reservation_release_action { - NVME_RESERVE_RELEASE = 0x0, - NVME_RESERVE_CLEAR = 0x1, +enum spdk_nvme_reservation_release_action { + SPDK_NVME_RESERVE_RELEASE = 0x0, + SPDK_NVME_RESERVE_CLEAR = 0x1, }; /** - * Log page identifiers for NVME_OPC_GET_LOG_PAGE + * Log page identifiers for SPDK_NVME_OPC_GET_LOG_PAGE */ -enum nvme_log_page { +enum spdk_nvme_log_page { /* 0x00 - reserved */ - /** Error information (mandatory) - \ref nvme_error_information_entry */ - NVME_LOG_ERROR = 0x01, + /** Error information (mandatory) - \ref spdk_nvme_error_information_entry */ + SPDK_NVME_LOG_ERROR = 0x01, - /** SMART / health information (mandatory) - \ref nvme_health_information_page */ - NVME_LOG_HEALTH_INFORMATION = 0x02, + /** SMART / health information (mandatory) - \ref spdk_nvme_health_information_page */ + SPDK_NVME_LOG_HEALTH_INFORMATION = 0x02, - /** Firmware slot information (mandatory) - \ref nvme_firmware_page */ - NVME_LOG_FIRMWARE_SLOT = 0x03, + /** Firmware slot information (mandatory) - \ref spdk_nvme_firmware_page */ + SPDK_NVME_LOG_FIRMWARE_SLOT = 0x03, /** Changed namespace list (optional) */ - NVME_LOG_CHANGED_NS_LIST = 0x04, + SPDK_NVME_LOG_CHANGED_NS_LIST = 0x04, /** Command effects log (optional) */ - NVME_LOG_COMMAND_EFFECTS_LOG = 0x05, + SPDK_NVME_LOG_COMMAND_EFFECTS_LOG = 0x05, /* 0x06-0x7F - reserved */ /** Reservation notification (optional) */ - NVME_LOG_RESERVATION_NOTIFICATION = 0x80, + SPDK_NVME_LOG_RESERVATION_NOTIFICATION = 0x80, /* 0x81-0xBF - I/O command set specific */ @@ -1016,22 +1037,22 @@ enum nvme_log_page { }; /** - * Error information log page (\ref NVME_LOG_ERROR) + * Error information log page (\ref SPDK_NVME_LOG_ERROR) */ -struct nvme_error_information_entry { +struct spdk_nvme_error_information_entry { uint64_t error_count; uint16_t sqid; uint16_t cid; - struct nvme_status status; + struct spdk_nvme_status status; uint16_t error_location; uint64_t lba; uint32_t nsid; uint8_t vendor_specific; uint8_t reserved[35]; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_error_information_entry) == 64, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_error_information_entry) == 64, "Incorrect size"); -union nvme_critical_warning_state { +union spdk_nvme_critical_warning_state { uint8_t raw; struct { @@ -1043,13 +1064,13 @@ union nvme_critical_warning_state { uint8_t reserved : 3; } bits; }; -SPDK_STATIC_ASSERT(sizeof(union nvme_critical_warning_state) == 1, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(union spdk_nvme_critical_warning_state) == 1, "Incorrect size"); /** - * SMART / health information page (\ref NVME_LOG_HEALTH_INFORMATION) + * SMART / health information page (\ref SPDK_NVME_LOG_HEALTH_INFORMATION) */ -struct __attribute__((packed)) nvme_health_information_page { - union nvme_critical_warning_state critical_warning; +struct __attribute__((packed)) spdk_nvme_health_information_page { + union spdk_nvme_critical_warning_state critical_warning; uint16_t temperature; uint8_t available_spare; @@ -1079,12 +1100,12 @@ struct __attribute__((packed)) nvme_health_information_page { uint8_t reserved2[320]; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_health_information_page) == 512, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_health_information_page) == 512, "Incorrect size"); /** - * Firmware slot information page (\ref NVME_LOG_FIRMWARE_SLOT) + * Firmware slot information page (\ref SPDK_NVME_LOG_FIRMWARE_SLOT) */ -struct nvme_firmware_page { +struct spdk_nvme_firmware_page { struct { uint8_t slot : 3; /* slot for current FW */ uint8_t reserved : 5; @@ -1094,17 +1115,17 @@ struct nvme_firmware_page { uint64_t revision[7]; /* revisions for 7 slots */ uint8_t reserved2[448]; }; -SPDK_STATIC_ASSERT(sizeof(struct nvme_firmware_page) == 512, "Incorrect size"); +SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_firmware_page) == 512, "Incorrect size"); /** * Namespace attachment Type Encoding */ -enum nvme_namespace_attach_type { +enum spdk_nvme_ns_attach_type { /* Controller attach */ - NVME_NAMESPACE_CONTROLLER_ATTACH = 0x0, + SPDK_NVME_NS_CTRLR_ATTACH = 0x0, /* Controller detach */ - NVME_NAMESPACE_CONTROLLER_DETACH = 0x1, + SPDK_NVME_NS_CTRLR_DETACH = 0x1, /* 0x2-0xF - Reserved */ }; @@ -1112,20 +1133,20 @@ enum nvme_namespace_attach_type { /** * Namespace management Type Encoding */ -enum nvme_namespace_management_type { +enum spdk_nvme_ns_management_type { /* Create */ - NVME_NAMESPACE_MANAGEMENT_CREATE = 0x0, + SPDK_NVME_NS_MANAGEMENT_CREATE = 0x0, /* Delete */ - NVME_NAMESPACE_MANAGEMENT_DELETE = 0x1, + SPDK_NVME_NS_MANAGEMENT_DELETE = 0x1, /* 0x2-0xF - Reserved */ }; -#define nvme_completion_is_error(cpl) \ +#define spdk_nvme_cpl_is_error(cpl) \ ((cpl)->status.sc != 0 || (cpl)->status.sct != 0) -#define NVME_IO_FLAGS_FORCE_UNIT_ACCESS (1U << 30) -#define NVME_IO_FLAGS_LIMITED_RETRY (1U << 31) +#define SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS (1U << 30) +#define SPDK_NVME_IO_FLAGS_LIMITED_RETRY (1U << 31) #endif diff --git a/lib/nvme/nvme.c b/lib/nvme/nvme.c index 23adeef55..8e2842c1d 100644 --- a/lib/nvme/nvme.c +++ b/lib/nvme/nvme.c @@ -107,7 +107,7 @@ nvme_detach(struct nvme_controller *ctrlr) } void -nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl) +nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl) { struct nvme_completion_poll_status *status = arg; diff --git a/lib/nvme/nvme_ctrlr.c b/lib/nvme/nvme_ctrlr.c index d56f34ab1..1b8e099dc 100644 --- a/lib/nvme/nvme_ctrlr.c +++ b/lib/nvme/nvme_ctrlr.c @@ -91,14 +91,14 @@ static int nvme_ctrlr_set_intel_support_log_pages(struct nvme_controller *ctrlr) } status.done = false; - nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY, NVME_GLOBAL_NAMESPACE_TAG, + nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY, SPDK_NVME_GLOBAL_NS_TAG, log_page_directory, sizeof(struct spdk_nvme_intel_log_page_directory), nvme_completion_poll_cb, &status); while (status.done == false) { nvme_qpair_process_completions(&ctrlr->adminq, 0); } - if (nvme_completion_is_error(&status.cpl)) { + if (spdk_nvme_cpl_is_error(&status.cpl)) { nvme_free(log_page_directory); nvme_printf(ctrlr, "nvme_ctrlr_cmd_get_log_page failed!\n"); return ENXIO; @@ -114,11 +114,11 @@ nvme_ctrlr_set_supported_log_pages(struct nvme_controller *ctrlr) { memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported)); /* Mandatory pages */ - ctrlr->log_page_supported[NVME_LOG_ERROR] = true; - ctrlr->log_page_supported[NVME_LOG_HEALTH_INFORMATION] = true; - ctrlr->log_page_supported[NVME_LOG_FIRMWARE_SLOT] = true; + ctrlr->log_page_supported[SPDK_NVME_LOG_ERROR] = true; + ctrlr->log_page_supported[SPDK_NVME_LOG_HEALTH_INFORMATION] = true; + ctrlr->log_page_supported[SPDK_NVME_LOG_FIRMWARE_SLOT] = true; if (ctrlr->cdata.lpa.celp) { - ctrlr->log_page_supported[NVME_LOG_COMMAND_EFFECTS_LOG] = true; + ctrlr->log_page_supported[SPDK_NVME_LOG_COMMAND_EFFECTS_LOG] = true; } if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) { nvme_ctrlr_set_intel_support_log_pages(ctrlr); @@ -142,24 +142,24 @@ nvme_ctrlr_set_supported_features(struct nvme_controller *ctrlr) { memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported)); /* Mandatory features */ - ctrlr->feature_supported[NVME_FEAT_ARBITRATION] = true; - ctrlr->feature_supported[NVME_FEAT_POWER_MANAGEMENT] = true; - ctrlr->feature_supported[NVME_FEAT_TEMPERATURE_THRESHOLD] = true; - ctrlr->feature_supported[NVME_FEAT_ERROR_RECOVERY] = true; - ctrlr->feature_supported[NVME_FEAT_NUMBER_OF_QUEUES] = true; - ctrlr->feature_supported[NVME_FEAT_INTERRUPT_COALESCING] = true; - ctrlr->feature_supported[NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true; - ctrlr->feature_supported[NVME_FEAT_WRITE_ATOMICITY] = true; - ctrlr->feature_supported[NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true; + ctrlr->feature_supported[SPDK_NVME_FEAT_ARBITRATION] = true; + ctrlr->feature_supported[SPDK_NVME_FEAT_POWER_MANAGEMENT] = true; + ctrlr->feature_supported[SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD] = true; + ctrlr->feature_supported[SPDK_NVME_FEAT_ERROR_RECOVERY] = true; + ctrlr->feature_supported[SPDK_NVME_FEAT_NUMBER_OF_QUEUES] = true; + ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_COALESCING] = true; + ctrlr->feature_supported[SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION] = true; + ctrlr->feature_supported[SPDK_NVME_FEAT_WRITE_ATOMICITY] = true; + ctrlr->feature_supported[SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION] = true; /* Optional features */ if (ctrlr->cdata.vwc.present) { - ctrlr->feature_supported[NVME_FEAT_VOLATILE_WRITE_CACHE] = true; + ctrlr->feature_supported[SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE] = true; } if (ctrlr->cdata.apsta.supported) { - ctrlr->feature_supported[NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true; + ctrlr->feature_supported[SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION] = true; } if (ctrlr->cdata.hmpre) { - ctrlr->feature_supported[NVME_FEAT_HOST_MEM_BUFFER] = true; + ctrlr->feature_supported[SPDK_NVME_FEAT_HOST_MEM_BUFFER] = true; } if (ctrlr->cdata.vid == SPDK_PCI_VID_INTEL) { nvme_ctrlr_set_intel_supported_features(ctrlr); @@ -180,7 +180,7 @@ static int nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr) { struct nvme_qpair *qpair; - union nvme_cap_lo_register cap_lo; + union spdk_nvme_cap_lo_register cap_lo; uint32_t i, num_entries, num_trackers; int rc; @@ -251,8 +251,8 @@ static int _nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_ready_value) { int ms_waited, ready_timeout_in_ms; - union nvme_csts_register csts; - union nvme_cap_lo_register cap_lo; + union spdk_nvme_csts_register csts; + union spdk_nvme_cap_lo_register cap_lo; /* Get ready timeout value from controller, in units of 500ms. */ cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo.raw); @@ -278,7 +278,7 @@ _nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_ready_valu static int nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr) { - union nvme_cc_register cc; + union spdk_nvme_cc_register cc; cc.raw = nvme_mmio_read_4(ctrlr, cc.raw); @@ -293,8 +293,8 @@ nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr) static void nvme_ctrlr_disable(struct nvme_controller *ctrlr) { - union nvme_cc_register cc; - union nvme_csts_register csts; + union spdk_nvme_cc_register cc; + union spdk_nvme_csts_register csts; cc.raw = nvme_mmio_read_4(ctrlr, cc.raw); csts.raw = nvme_mmio_read_4(ctrlr, csts); @@ -312,12 +312,12 @@ nvme_ctrlr_disable(struct nvme_controller *ctrlr) static void nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) { - union nvme_cc_register cc; - union nvme_csts_register csts; + union spdk_nvme_cc_register cc; + union spdk_nvme_csts_register csts; int ms_waited = 0; cc.raw = nvme_mmio_read_4(ctrlr, cc.raw); - cc.bits.shn = NVME_SHN_NORMAL; + cc.bits.shn = SPDK_NVME_SHN_NORMAL; nvme_mmio_write_4(ctrlr, cc.raw, cc.raw); csts.raw = nvme_mmio_read_4(ctrlr, csts); @@ -327,22 +327,22 @@ nvme_ctrlr_shutdown(struct nvme_controller *ctrlr) * 5 seconds as a reasonable amount of time to * wait before proceeding. */ - while (csts.bits.shst != NVME_SHST_COMPLETE) { + while (csts.bits.shst != SPDK_NVME_SHST_COMPLETE) { nvme_delay(1000); csts.raw = nvme_mmio_read_4(ctrlr, csts); if (ms_waited++ >= 5000) break; } - if (csts.bits.shst != NVME_SHST_COMPLETE) + if (csts.bits.shst != SPDK_NVME_SHST_COMPLETE) nvme_printf(ctrlr, "did not shutdown within 5 seconds\n"); } static int nvme_ctrlr_enable(struct nvme_controller *ctrlr) { - union nvme_cc_register cc; - union nvme_csts_register csts; - union nvme_aqa_register aqa; + union spdk_nvme_cc_register cc; + union spdk_nvme_csts_register csts; + union spdk_nvme_aqa_register aqa; cc.raw = nvme_mmio_read_4(ctrlr, cc.raw); csts.raw = nvme_mmio_read_4(ctrlr, csts); @@ -384,7 +384,7 @@ nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) { uint32_t i; int rc; - union nvme_cc_register cc; + union spdk_nvme_cc_register cc; cc.raw = nvme_mmio_read_4(ctrlr, cc.raw); if (cc.bits.en) { @@ -451,7 +451,7 @@ nvme_ctrlr_identify(struct nvme_controller *ctrlr) while (status.done == false) { nvme_qpair_process_completions(&ctrlr->adminq, 0); } - if (nvme_completion_is_error(&status.cpl)) { + if (spdk_nvme_cpl_is_error(&status.cpl)) { nvme_printf(ctrlr, "nvme_identify_controller failed!\n"); return ENXIO; } @@ -487,7 +487,7 @@ nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr) while (status.done == false) { nvme_qpair_process_completions(&ctrlr->adminq, 0); } - if (nvme_completion_is_error(&status.cpl)) { + if (spdk_nvme_cpl_is_error(&status.cpl)) { nvme_printf(ctrlr, "nvme_set_num_queues failed!\n"); return ENXIO; } @@ -530,7 +530,7 @@ nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) while (status.done == false) { nvme_qpair_process_completions(&ctrlr->adminq, 0); } - if (nvme_completion_is_error(&status.cpl)) { + if (spdk_nvme_cpl_is_error(&status.cpl)) { nvme_printf(ctrlr, "nvme_create_io_cq failed!\n"); return ENXIO; } @@ -541,7 +541,7 @@ nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr) while (status.done == false) { nvme_qpair_process_completions(&ctrlr->adminq, 0); } - if (nvme_completion_is_error(&status.cpl)) { + if (spdk_nvme_cpl_is_error(&status.cpl)) { nvme_printf(ctrlr, "nvme_create_io_sq failed!\n"); return ENXIO; } @@ -596,7 +596,7 @@ nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr) } ctrlr->nsdata = nvme_malloc("nvme_namespaces", - nn * sizeof(struct nvme_namespace_data), 64, + nn * sizeof(struct spdk_nvme_ns_data), 64, &phys_addr); if (ctrlr->nsdata == NULL) { goto fail; @@ -622,12 +622,12 @@ fail: } static void -nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) +nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl) { struct nvme_async_event_request *aer = arg; struct nvme_controller *ctrlr = aer->ctrlr; - if (cpl->status.sc == NVME_SC_ABORTED_SQ_DELETION) { + if (cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) { /* * This is simulated when controller is being shut down, to * effectively abort outstanding asynchronous event requests @@ -672,7 +672,7 @@ nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, * nature never be timed out. */ req->timeout = false; - req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; + req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; nvme_ctrlr_submit_admin_request(ctrlr, req); return 0; @@ -681,7 +681,7 @@ nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr, static int nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) { - union nvme_critical_warning_state state; + union spdk_nvme_critical_warning_state state; struct nvme_async_event_request *aer; uint32_t i; struct nvme_completion_poll_status status; @@ -695,7 +695,7 @@ nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr) while (status.done == false) { nvme_qpair_process_completions(&ctrlr->adminq, 0); } - if (nvme_completion_is_error(&status.cpl)) { + if (spdk_nvme_cpl_is_error(&status.cpl)) { nvme_printf(ctrlr, "nvme_ctrlr_cmd_set_async_event_config failed!\n"); return ENXIO; } @@ -757,7 +757,7 @@ nvme_ctrlr_allocate_bars(struct nvme_controller *ctrlr) void *addr; rc = nvme_pcicfg_map_bar(ctrlr->devhandle, 0, 0 /* writable */, &addr); - ctrlr->regs = (volatile struct nvme_registers *)addr; + ctrlr->regs = (volatile struct spdk_nvme_registers *)addr; if ((ctrlr->regs == NULL) || (rc != 0)) { nvme_printf(ctrlr, "pci_device_map_range failed with error code %d\n", rc); return -1; @@ -781,7 +781,7 @@ nvme_ctrlr_free_bars(struct nvme_controller *ctrlr) int nvme_ctrlr_construct(struct nvme_controller *ctrlr, void *devhandle) { - union nvme_cap_hi_register cap_hi; + union spdk_nvme_cap_hi_register cap_hi; uint32_t cmd_reg; int status; int rc; @@ -878,7 +878,7 @@ nvme_ctrlr_process_admin_completions(struct nvme_controller *ctrlr) return num_completions; } -const struct nvme_controller_data * +const struct spdk_nvme_ctrlr_data * nvme_ctrlr_get_data(struct nvme_controller *ctrlr) { diff --git a/lib/nvme/nvme_ctrlr_cmd.c b/lib/nvme/nvme_ctrlr_cmd.c index 564349ce7..ce3aff808 100644 --- a/lib/nvme/nvme_ctrlr_cmd.c +++ b/lib/nvme/nvme_ctrlr_cmd.c @@ -35,7 +35,7 @@ int nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr, - struct nvme_command *cmd, + struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, nvme_cb_fn_t cb_fn, void *cb_arg) { @@ -55,7 +55,7 @@ nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr, int nvme_ctrlr_cmd_admin_raw(struct nvme_controller *ctrlr, - struct nvme_command *cmd, + struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, nvme_cb_fn_t cb_fn, void *cb_arg) { @@ -81,14 +81,14 @@ nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; req = nvme_allocate_request_contig(payload, - sizeof(struct nvme_controller_data), + sizeof(struct spdk_nvme_ctrlr_data), cb_fn, cb_arg); cmd = &req->cmd; - cmd->opc = NVME_OPC_IDENTIFY; + cmd->opc = SPDK_NVME_OPC_IDENTIFY; /* * TODO: create an identify command data structure, which @@ -104,14 +104,14 @@ nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid, void *payload, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; req = nvme_allocate_request_contig(payload, - sizeof(struct nvme_namespace_data), + sizeof(struct spdk_nvme_ns_data), cb_fn, cb_arg); cmd = &req->cmd; - cmd->opc = NVME_OPC_IDENTIFY; + cmd->opc = SPDK_NVME_OPC_IDENTIFY; /* * TODO: create an identify command data structure @@ -127,12 +127,12 @@ nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; req = nvme_allocate_request_null(cb_fn, cb_arg); cmd = &req->cmd; - cmd->opc = NVME_OPC_CREATE_IO_CQ; + cmd->opc = SPDK_NVME_OPC_CREATE_IO_CQ; /* * TODO: create a create io completion queue command data @@ -154,12 +154,12 @@ nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr, struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; req = nvme_allocate_request_null(cb_fn, cb_arg); cmd = &req->cmd; - cmd->opc = NVME_OPC_CREATE_IO_SQ; + cmd->opc = SPDK_NVME_OPC_CREATE_IO_SQ; /* * TODO: create a create io submission queue command data @@ -179,7 +179,7 @@ nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; nvme_mutex_lock(&ctrlr->ctrlr_lock); req = nvme_allocate_request_null(cb_fn, cb_arg); @@ -189,7 +189,7 @@ nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature, } cmd = &req->cmd; - cmd->opc = NVME_OPC_SET_FEATURES; + cmd->opc = SPDK_NVME_OPC_SET_FEATURES; cmd->cdw10 = feature; cmd->cdw11 = cdw11; cmd->cdw12 = cdw12; @@ -206,7 +206,7 @@ nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; nvme_mutex_lock(&ctrlr->ctrlr_lock); req = nvme_allocate_request_null(cb_fn, cb_arg); @@ -216,7 +216,7 @@ nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature, } cmd = &req->cmd; - cmd->opc = NVME_OPC_GET_FEATURES; + cmd->opc = SPDK_NVME_OPC_GET_FEATURES; cmd->cdw10 = feature; cmd->cdw11 = cdw11; @@ -233,20 +233,20 @@ nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, uint32_t cdw11; cdw11 = ((num_queues - 1) << 16) | (num_queues - 1); - nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_NUMBER_OF_QUEUES, cdw11, 0, + nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, cdw11, 0, NULL, 0, cb_fn, cb_arg); } void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr, - union nvme_critical_warning_state state, nvme_cb_fn_t cb_fn, + union spdk_nvme_critical_warning_state state, nvme_cb_fn_t cb_fn, void *cb_arg) { uint32_t cdw11; cdw11 = state.raw; nvme_ctrlr_cmd_set_feature(ctrlr, - NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0, NULL, 0, cb_fn, + SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0, NULL, 0, cb_fn, cb_arg); } @@ -256,7 +256,7 @@ nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; nvme_mutex_lock(&ctrlr->ctrlr_lock); req = nvme_allocate_request_contig(payload, payload_size, cb_fn, cb_arg); @@ -266,7 +266,7 @@ nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page, } cmd = &req->cmd; - cmd->opc = NVME_OPC_GET_LOG_PAGE; + cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE; cmd->nsid = nsid; cmd->cdw10 = ((payload_size / sizeof(uint32_t)) - 1) << 16; cmd->cdw10 |= log_page; @@ -282,12 +282,12 @@ nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid, uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; req = nvme_allocate_request_null(cb_fn, cb_arg); cmd = &req->cmd; - cmd->opc = NVME_OPC_ABORT; + cmd->opc = SPDK_NVME_OPC_ABORT; cmd->cdw10 = (cid << 16) | sqid; nvme_ctrlr_submit_admin_request(ctrlr, req); diff --git a/lib/nvme/nvme_impl.h b/lib/nvme/nvme_impl.h index c3169693c..b4ed54ce6 100644 --- a/lib/nvme/nvme_impl.h +++ b/lib/nvme/nvme_impl.h @@ -280,6 +280,6 @@ nvme_mutex_init_recursive(nvme_mutex_t *mtx) /** * Copy a struct nvme_command from one memory location to another. */ -#define nvme_copy_command(dst, src) rte_memcpy((dst), (src), sizeof(struct nvme_command)) +#define nvme_copy_command(dst, src) rte_memcpy((dst), (src), sizeof(struct spdk_nvme_cmd)) #endif /* __NVME_IMPL_H__ */ diff --git a/lib/nvme/nvme_internal.h b/lib/nvme/nvme_internal.h index a67a6a654..7af0fa034 100644 --- a/lib/nvme/nvme_internal.h +++ b/lib/nvme/nvme_internal.h @@ -152,7 +152,7 @@ struct __attribute__((packed)) nvme_payload { }; struct nvme_request { - struct nvme_command cmd; + struct spdk_nvme_cmd cmd; /** * Data payload for this request's command. @@ -212,18 +212,18 @@ struct nvme_request { * to ensure that the parent request is also completed with error * status once all child requests are completed. */ - struct nvme_completion parent_status; + struct spdk_nvme_cpl parent_status; }; struct nvme_completion_poll_status { - struct nvme_completion cpl; + struct spdk_nvme_cpl cpl; bool done; }; struct nvme_async_event_request { struct nvme_controller *ctrlr; struct nvme_request *req; - struct nvme_completion cpl; + struct spdk_nvme_cpl cpl; }; struct nvme_tracker { @@ -243,12 +243,12 @@ struct nvme_qpair { /** * Submission queue */ - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; /** * Completion queue */ - struct nvme_completion *cpl; + struct spdk_nvme_cpl *cpl; LIST_HEAD(, nvme_tracker) free_tr; LIST_HEAD(, nvme_tracker) outstanding_tr; @@ -293,7 +293,7 @@ struct nvme_controller { /* Hot data (accessed in I/O path) starts here. */ /** NVMe MMIO register space */ - volatile struct nvme_registers *regs; + volatile struct spdk_nvme_registers *regs; /** I/O queue pairs */ struct nvme_qpair *ioq; @@ -345,14 +345,14 @@ struct nvme_controller { /** * Identify Controller data. */ - struct nvme_controller_data cdata; + struct spdk_nvme_ctrlr_data cdata; /** * Array of Identify Namespace data. * * Stored separately from ns since nsdata should not normally be accessed during I/O. */ - struct nvme_namespace_data *nsdata; + struct spdk_nvme_ns_data *nsdata; }; extern __thread int nvme_thread_ioq_index; @@ -423,12 +423,12 @@ void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr, uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg); void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr, - union nvme_critical_warning_state state, + union spdk_nvme_critical_warning_state state, nvme_cb_fn_t cb_fn, void *cb_arg); void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid, uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg); -void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl); +void nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl); int nvme_ctrlr_construct(struct nvme_controller *ctrlr, void *devhandle); void nvme_ctrlr_destruct(struct nvme_controller *ctrlr); diff --git a/lib/nvme/nvme_ns.c b/lib/nvme/nvme_ns.c index e2e6b640d..dbb2d25d2 100644 --- a/lib/nvme/nvme_ns.c +++ b/lib/nvme/nvme_ns.c @@ -33,7 +33,7 @@ #include "nvme_internal.h" -static inline struct nvme_namespace_data * +static inline struct spdk_nvme_ns_data * _nvme_ns_get_data(struct nvme_namespace *ns) { return &ns->ctrlr->nsdata[ns->id - 1]; @@ -75,7 +75,7 @@ nvme_ns_get_flags(struct nvme_namespace *ns) return ns->flags; } -const struct nvme_namespace_data * +const struct spdk_nvme_ns_data * nvme_ns_get_data(struct nvme_namespace *ns) { return _nvme_ns_get_data(ns); @@ -86,7 +86,7 @@ nvme_ns_construct(struct nvme_namespace *ns, uint16_t id, struct nvme_controller *ctrlr) { struct nvme_completion_poll_status status; - struct nvme_namespace_data *nsdata; + struct spdk_nvme_ns_data *nsdata; uint32_t pci_devid; nvme_assert(id > 0, ("invalid namespace id %d", id)); @@ -108,7 +108,7 @@ nvme_ns_construct(struct nvme_namespace *ns, uint16_t id, while (status.done == false) { nvme_qpair_process_completions(&ctrlr->adminq, 0); } - if (nvme_completion_is_error(&status.cpl)) { + if (spdk_nvme_cpl_is_error(&status.cpl)) { nvme_printf(ctrlr, "nvme_identify_namespace failed\n"); return ENXIO; } diff --git a/lib/nvme/nvme_ns_cmd.c b/lib/nvme/nvme_ns_cmd.c index e2621d508..4872885a1 100644 --- a/lib/nvme/nvme_ns_cmd.c +++ b/lib/nvme/nvme_ns_cmd.c @@ -44,7 +44,7 @@ static struct nvme_request *_nvme_ns_cmd_rw(struct nvme_namespace *ns, void *cb_arg, uint32_t opc, uint32_t io_flags); static void -nvme_cb_complete_child(void *child_arg, const struct nvme_completion *cpl) +nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl) { struct nvme_request *child = child_arg; struct nvme_request *parent = child->parent; @@ -52,7 +52,7 @@ nvme_cb_complete_child(void *child_arg, const struct nvme_completion *cpl) parent->num_children--; TAILQ_REMOVE(&parent->children, child, child_tailq); - if (nvme_completion_is_error(cpl)) { + if (spdk_nvme_cpl_is_error(cpl)) { memcpy(&parent->parent_status, cpl, sizeof(*cpl)); } @@ -76,7 +76,7 @@ nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child) */ TAILQ_INIT(&parent->children); parent->parent = NULL; - memset(&parent->parent_status, 0, sizeof(struct nvme_completion)); + memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl)); } parent->num_children++; @@ -125,7 +125,7 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, const struct nvme_payload *payload, uint32_t io_flags) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; uint64_t *tmp_lba; uint32_t sector_size; uint32_t sectors_per_max_io; @@ -185,7 +185,7 @@ nvme_ns_cmd_read(struct nvme_namespace *ns, void *buffer, uint64_t lba, payload.type = NVME_PAYLOAD_TYPE_CONTIG; payload.u.contig = buffer; - req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, io_flags); + req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, io_flags); if (req != NULL) { nvme_ctrlr_submit_io_request(ns->ctrlr, req); return 0; @@ -210,7 +210,7 @@ nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count, payload.u.sgl.reset_sgl_fn = reset_sgl_fn; payload.u.sgl.next_sge_fn = next_sge_fn; - req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_READ, io_flags); + req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ, io_flags); if (req != NULL) { nvme_ctrlr_submit_io_request(ns->ctrlr, req); return 0; @@ -230,7 +230,7 @@ nvme_ns_cmd_write(struct nvme_namespace *ns, void *buffer, uint64_t lba, payload.type = NVME_PAYLOAD_TYPE_CONTIG; payload.u.contig = buffer; - req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, io_flags); + req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, io_flags); if (req != NULL) { nvme_ctrlr_submit_io_request(ns->ctrlr, req); return 0; @@ -255,7 +255,7 @@ nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count, payload.u.sgl.reset_sgl_fn = reset_sgl_fn; payload.u.sgl.next_sge_fn = next_sge_fn; - req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, NVME_OPC_WRITE, io_flags); + req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, io_flags); if (req != NULL) { nvme_ctrlr_submit_io_request(ns->ctrlr, req); return 0; @@ -270,7 +270,7 @@ nvme_ns_cmd_write_zeroes(struct nvme_namespace *ns, uint64_t lba, uint32_t io_flags) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; uint64_t *tmp_lba; if (lba_count == 0) { @@ -283,7 +283,7 @@ nvme_ns_cmd_write_zeroes(struct nvme_namespace *ns, uint64_t lba, } cmd = &req->cmd; - cmd->opc = NVME_OPC_WRITE_ZEROES; + cmd->opc = SPDK_NVME_OPC_WRITE_ZEROES; cmd->nsid = ns->id; tmp_lba = (uint64_t *)&cmd->cdw10; @@ -301,26 +301,26 @@ nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload, uint16_t num_ranges, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; - if (num_ranges == 0 || num_ranges > NVME_DATASET_MANAGEMENT_MAX_RANGES) { + if (num_ranges == 0 || num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES) { return EINVAL; } req = nvme_allocate_request_contig(payload, - num_ranges * sizeof(struct nvme_dsm_range), + num_ranges * sizeof(struct spdk_nvme_dsm_range), cb_fn, cb_arg); if (req == NULL) { return ENOMEM; } cmd = &req->cmd; - cmd->opc = NVME_OPC_DATASET_MANAGEMENT; + cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; cmd->nsid = ns->id; /* TODO: create a delete command data structure */ cmd->cdw10 = num_ranges - 1; - cmd->cdw11 = NVME_DSM_ATTR_DEALLOCATE; + cmd->cdw11 = SPDK_NVME_DSM_ATTR_DEALLOCATE; nvme_ctrlr_submit_io_request(ns->ctrlr, req); @@ -331,7 +331,7 @@ int nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; req = nvme_allocate_request_null(cb_fn, cb_arg); if (req == NULL) { @@ -339,7 +339,7 @@ nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg) } cmd = &req->cmd; - cmd->opc = NVME_OPC_FLUSH; + cmd->opc = SPDK_NVME_OPC_FLUSH; cmd->nsid = ns->id; nvme_ctrlr_submit_io_request(ns->ctrlr, req); @@ -349,24 +349,24 @@ nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg) int nvme_ns_cmd_reservation_register(struct nvme_namespace *ns, - struct nvme_reservation_register_data *payload, + struct spdk_nvme_reservation_register_data *payload, bool ignore_key, - enum nvme_reservation_register_action action, - enum nvme_reservation_register_cptpl cptpl, + enum spdk_nvme_reservation_register_action action, + enum spdk_nvme_reservation_register_cptpl cptpl, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; req = nvme_allocate_request_contig(payload, - sizeof(struct nvme_reservation_register_data), + sizeof(struct spdk_nvme_reservation_register_data), cb_fn, cb_arg); if (req == NULL) { return ENOMEM; } cmd = &req->cmd; - cmd->opc = NVME_OPC_RESERVATION_REGISTER; + cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER; cmd->nsid = ns->id; /* Bits 0-2 */ @@ -383,23 +383,23 @@ nvme_ns_cmd_reservation_register(struct nvme_namespace *ns, int nvme_ns_cmd_reservation_release(struct nvme_namespace *ns, - struct nvme_reservation_key_data *payload, + struct spdk_nvme_reservation_key_data *payload, bool ignore_key, - enum nvme_reservation_release_action action, - enum nvme_reservation_type type, + enum spdk_nvme_reservation_release_action action, + enum spdk_nvme_reservation_type type, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; - req = nvme_allocate_request_contig(payload, sizeof(struct nvme_reservation_key_data), cb_fn, + req = nvme_allocate_request_contig(payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn, cb_arg); if (req == NULL) { return ENOMEM; } cmd = &req->cmd; - cmd->opc = NVME_OPC_RESERVATION_RELEASE; + cmd->opc = SPDK_NVME_OPC_RESERVATION_RELEASE; cmd->nsid = ns->id; /* Bits 0-2 */ @@ -416,24 +416,24 @@ nvme_ns_cmd_reservation_release(struct nvme_namespace *ns, int nvme_ns_cmd_reservation_acquire(struct nvme_namespace *ns, - struct nvme_reservation_acquire_data *payload, + struct spdk_nvme_reservation_acquire_data *payload, bool ignore_key, - enum nvme_reservation_acquire_action action, - enum nvme_reservation_type type, + enum spdk_nvme_reservation_acquire_action action, + enum spdk_nvme_reservation_type type, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; req = nvme_allocate_request_contig(payload, - sizeof(struct nvme_reservation_acquire_data), + sizeof(struct spdk_nvme_reservation_acquire_data), cb_fn, cb_arg); if (req == NULL) { return ENOMEM; } cmd = &req->cmd; - cmd->opc = NVME_OPC_RESERVATION_ACQUIRE; + cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE; cmd->nsid = ns->id; /* Bits 0-2 */ @@ -454,7 +454,7 @@ nvme_ns_cmd_reservation_report(struct nvme_namespace *ns, void *payload, { uint32_t num_dwords; struct nvme_request *req; - struct nvme_command *cmd; + struct spdk_nvme_cmd *cmd; if (len % 4) return EINVAL; @@ -466,7 +466,7 @@ nvme_ns_cmd_reservation_report(struct nvme_namespace *ns, void *payload, } cmd = &req->cmd; - cmd->opc = NVME_OPC_RESERVATION_REPORT; + cmd->opc = SPDK_NVME_OPC_RESERVATION_REPORT; cmd->nsid = ns->id; cmd->cdw10 = num_dwords; diff --git a/lib/nvme/nvme_qpair.c b/lib/nvme/nvme_qpair.c index 2dabe8af1..218b6344a 100644 --- a/lib/nvme/nvme_qpair.c +++ b/lib/nvme/nvme_qpair.c @@ -54,38 +54,38 @@ struct nvme_string { }; static const struct nvme_string admin_opcode[] = { - { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" }, - { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" }, - { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" }, - { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" }, - { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" }, - { NVME_OPC_IDENTIFY, "IDENTIFY" }, - { NVME_OPC_ABORT, "ABORT" }, - { NVME_OPC_SET_FEATURES, "SET FEATURES" }, - { NVME_OPC_GET_FEATURES, "GET FEATURES" }, - { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" }, - { NVME_OPC_NAMESPACE_MANAGEMENT, "NAMESPACE MANAGEMENT" }, - { NVME_OPC_FIRMWARE_COMMIT, "FIRMWARE COMMIT" }, - { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" }, - { NVME_OPC_NAMESPACE_ATTACHMENT, "NAMESPACE ATTACHMENT" }, - { NVME_OPC_FORMAT_NVM, "FORMAT NVM" }, - { NVME_OPC_SECURITY_SEND, "SECURITY SEND" }, - { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" }, + { SPDK_NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" }, + { SPDK_NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" }, + { SPDK_NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" }, + { SPDK_NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" }, + { SPDK_NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" }, + { SPDK_NVME_OPC_IDENTIFY, "IDENTIFY" }, + { SPDK_NVME_OPC_ABORT, "ABORT" }, + { SPDK_NVME_OPC_SET_FEATURES, "SET FEATURES" }, + { SPDK_NVME_OPC_GET_FEATURES, "GET FEATURES" }, + { SPDK_NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" }, + { SPDK_NVME_OPC_NS_MANAGEMENT, "NAMESPACE MANAGEMENT" }, + { SPDK_NVME_OPC_FIRMWARE_COMMIT, "FIRMWARE COMMIT" }, + { SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" }, + { SPDK_NVME_OPC_NS_ATTACHMENT, "NAMESPACE ATTACHMENT" }, + { SPDK_NVME_OPC_FORMAT_NVM, "FORMAT NVM" }, + { SPDK_NVME_OPC_SECURITY_SEND, "SECURITY SEND" }, + { SPDK_NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" }, { 0xFFFF, "ADMIN COMMAND" } }; static const struct nvme_string io_opcode[] = { - { NVME_OPC_FLUSH, "FLUSH" }, - { NVME_OPC_WRITE, "WRITE" }, - { NVME_OPC_READ, "READ" }, - { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" }, - { NVME_OPC_COMPARE, "COMPARE" }, - { NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" }, - { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" }, - { NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" }, - { NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" }, - { NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" }, - { NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" }, + { SPDK_NVME_OPC_FLUSH, "FLUSH" }, + { SPDK_NVME_OPC_WRITE, "WRITE" }, + { SPDK_NVME_OPC_READ, "READ" }, + { SPDK_NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" }, + { SPDK_NVME_OPC_COMPARE, "COMPARE" }, + { SPDK_NVME_OPC_WRITE_ZEROES, "WRITE ZEROES" }, + { SPDK_NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" }, + { SPDK_NVME_OPC_RESERVATION_REGISTER, "RESERVATION REGISTER" }, + { SPDK_NVME_OPC_RESERVATION_REPORT, "RESERVATION REPORT" }, + { SPDK_NVME_OPC_RESERVATION_ACQUIRE, "RESERVATION ACQUIRE" }, + { SPDK_NVME_OPC_RESERVATION_RELEASE, "RESERVATION RELEASE" }, { 0xFFFF, "IO COMMAND" } }; @@ -107,7 +107,7 @@ nvme_get_string(const struct nvme_string *strings, uint16_t value) static void nvme_admin_qpair_print_command(struct nvme_qpair *qpair, - struct nvme_command *cmd) + struct spdk_nvme_cmd *cmd) { nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x " @@ -118,14 +118,14 @@ nvme_admin_qpair_print_command(struct nvme_qpair *qpair, static void nvme_io_qpair_print_command(struct nvme_qpair *qpair, - struct nvme_command *cmd) + struct spdk_nvme_cmd *cmd) { switch ((int)cmd->opc) { - case NVME_OPC_WRITE: - case NVME_OPC_READ: - case NVME_OPC_WRITE_UNCORRECTABLE: - case NVME_OPC_COMPARE: + case SPDK_NVME_OPC_WRITE: + case SPDK_NVME_OPC_READ: + case SPDK_NVME_OPC_WRITE_UNCORRECTABLE: + case SPDK_NVME_OPC_COMPARE: nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d " "lba:%llu len:%d\n", nvme_get_string(io_opcode, cmd->opc), qpair->id, cmd->cid, @@ -133,8 +133,8 @@ nvme_io_qpair_print_command(struct nvme_qpair *qpair, ((unsigned long long)cmd->cdw11 << 32) + cmd->cdw10, (cmd->cdw12 & 0xFFFF) + 1); break; - case NVME_OPC_FLUSH: - case NVME_OPC_DATASET_MANAGEMENT: + case SPDK_NVME_OPC_FLUSH: + case SPDK_NVME_OPC_DATASET_MANAGEMENT: nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n", nvme_get_string(io_opcode, cmd->opc), qpair->id, cmd->cid, cmd->nsid); @@ -148,7 +148,7 @@ nvme_io_qpair_print_command(struct nvme_qpair *qpair, } static void -nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd) +nvme_qpair_print_command(struct nvme_qpair *qpair, struct spdk_nvme_cmd *cmd) { nvme_assert(qpair != NULL, ("qpair can not be NULL")); nvme_assert(cmd != NULL, ("cmd can not be NULL")); @@ -161,51 +161,51 @@ nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd) } static const struct nvme_string generic_status[] = { - { NVME_SC_SUCCESS, "SUCCESS" }, - { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" }, - { NVME_SC_INVALID_FIELD, "INVALID_FIELD" }, - { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" }, - { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" }, - { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" }, - { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" }, - { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" }, - { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" }, - { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" }, - { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" }, - { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" }, - { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" }, - { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" }, - { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" }, - { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" }, + { SPDK_NVME_SC_SUCCESS, "SUCCESS" }, + { SPDK_NVME_SC_INVALID_OPCODE, "INVALID OPCODE" }, + { SPDK_NVME_SC_INVALID_FIELD, "INVALID_FIELD" }, + { SPDK_NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" }, + { SPDK_NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" }, + { SPDK_NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" }, + { SPDK_NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" }, + { SPDK_NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" }, + { SPDK_NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" }, + { SPDK_NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" }, + { SPDK_NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" }, + { SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" }, + { SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" }, + { SPDK_NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" }, + { SPDK_NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" }, + { SPDK_NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" }, { 0xFFFF, "GENERIC" } }; static const struct nvme_string command_specific_status[] = { - { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" }, - { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" }, - { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" }, - { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" }, - { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" }, - { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" }, - { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" }, - { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" }, - { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" }, - { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" }, - { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" }, - { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" }, - { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" }, - { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" }, + { SPDK_NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" }, + { SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" }, + { SPDK_NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" }, + { SPDK_NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" }, + { SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" }, + { SPDK_NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" }, + { SPDK_NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" }, + { SPDK_NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" }, + { SPDK_NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" }, + { SPDK_NVME_SC_INVALID_FORMAT, "INVALID FORMAT" }, + { SPDK_NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" }, + { SPDK_NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" }, + { SPDK_NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" }, + { SPDK_NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" }, { 0xFFFF, "COMMAND SPECIFIC" } }; static const struct nvme_string media_error_status[] = { - { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" }, - { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" }, - { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" }, - { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" }, - { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" }, - { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" }, - { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" }, + { SPDK_NVME_SC_WRITE_FAULTS, "WRITE FAULTS" }, + { SPDK_NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" }, + { SPDK_NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" }, + { SPDK_NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" }, + { SPDK_NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" }, + { SPDK_NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" }, + { SPDK_NVME_SC_ACCESS_DENIED, "ACCESS DENIED" }, { 0xFFFF, "MEDIA ERROR" } }; @@ -215,16 +215,16 @@ get_status_string(uint16_t sct, uint16_t sc) const struct nvme_string *entry; switch (sct) { - case NVME_SCT_GENERIC: + case SPDK_NVME_SCT_GENERIC: entry = generic_status; break; - case NVME_SCT_COMMAND_SPECIFIC: + case SPDK_NVME_SCT_COMMAND_SPECIFIC: entry = command_specific_status; break; - case NVME_SCT_MEDIA_ERROR: + case SPDK_NVME_SCT_MEDIA_ERROR: entry = media_error_status; break; - case NVME_SCT_VENDOR_SPECIFIC: + case SPDK_NVME_SCT_VENDOR_SPECIFIC: return "VENDOR SPECIFIC"; default: return "RESERVED"; @@ -235,7 +235,7 @@ get_status_string(uint16_t sct, uint16_t sc) static void nvme_qpair_print_completion(struct nvme_qpair *qpair, - struct nvme_completion *cpl) + struct spdk_nvme_cpl *cpl) { nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x sqhd:%04x p:%x m:%x dnr:%x\n", get_status_string(cpl->status.sct, cpl->status.sc), @@ -244,7 +244,7 @@ nvme_qpair_print_completion(struct nvme_qpair *qpair, } static bool -nvme_completion_is_retry(const struct nvme_completion *cpl) +nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl) { /* * TODO: spec is not clear how commands that are aborted due @@ -253,34 +253,34 @@ nvme_completion_is_retry(const struct nvme_completion *cpl) * look at the DNR bit. */ switch ((int)cpl->status.sct) { - case NVME_SCT_GENERIC: + case SPDK_NVME_SCT_GENERIC: switch ((int)cpl->status.sc) { - case NVME_SC_ABORTED_BY_REQUEST: - case NVME_SC_NAMESPACE_NOT_READY: + case SPDK_NVME_SC_ABORTED_BY_REQUEST: + case SPDK_NVME_SC_NAMESPACE_NOT_READY: if (cpl->status.dnr) { return false; } else { return true; } - case NVME_SC_INVALID_OPCODE: - case NVME_SC_INVALID_FIELD: - case NVME_SC_COMMAND_ID_CONFLICT: - case NVME_SC_DATA_TRANSFER_ERROR: - case NVME_SC_ABORTED_POWER_LOSS: - case NVME_SC_INTERNAL_DEVICE_ERROR: - case NVME_SC_ABORTED_SQ_DELETION: - case NVME_SC_ABORTED_FAILED_FUSED: - case NVME_SC_ABORTED_MISSING_FUSED: - case NVME_SC_INVALID_NAMESPACE_OR_FORMAT: - case NVME_SC_COMMAND_SEQUENCE_ERROR: - case NVME_SC_LBA_OUT_OF_RANGE: - case NVME_SC_CAPACITY_EXCEEDED: + case SPDK_NVME_SC_INVALID_OPCODE: + case SPDK_NVME_SC_INVALID_FIELD: + case SPDK_NVME_SC_COMMAND_ID_CONFLICT: + case SPDK_NVME_SC_DATA_TRANSFER_ERROR: + case SPDK_NVME_SC_ABORTED_POWER_LOSS: + case SPDK_NVME_SC_INTERNAL_DEVICE_ERROR: + case SPDK_NVME_SC_ABORTED_SQ_DELETION: + case SPDK_NVME_SC_ABORTED_FAILED_FUSED: + case SPDK_NVME_SC_ABORTED_MISSING_FUSED: + case SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT: + case SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR: + case SPDK_NVME_SC_LBA_OUT_OF_RANGE: + case SPDK_NVME_SC_CAPACITY_EXCEEDED: default: return false; } - case NVME_SCT_COMMAND_SPECIFIC: - case NVME_SCT_MEDIA_ERROR: - case NVME_SCT_VENDOR_SPECIFIC: + case SPDK_NVME_SCT_COMMAND_SPECIFIC: + case SPDK_NVME_SCT_MEDIA_ERROR: + case SPDK_NVME_SCT_VENDOR_SPECIFIC: default: return false; } @@ -314,7 +314,7 @@ nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr) static void nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr, - struct nvme_completion *cpl, bool print_on_error) + struct spdk_nvme_cpl *cpl, bool print_on_error) { struct nvme_request *req; bool retry, error; @@ -323,7 +323,7 @@ nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr, nvme_assert(req != NULL, ("tr has NULL req\n")); - error = nvme_completion_is_error(cpl); + error = spdk_nvme_cpl_is_error(cpl); retry = error && nvme_completion_is_retry(cpl) && req->retries < nvme_retry_count; @@ -369,7 +369,7 @@ nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr, bool print_on_error) { - struct nvme_completion cpl; + struct spdk_nvme_cpl cpl; memset(&cpl, 0, sizeof(cpl)); cpl.sqid = qpair->id; @@ -385,7 +385,7 @@ nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, struct nvme_request *req, uint32_t sct, uint32_t sc, bool print_on_error) { - struct nvme_completion cpl; + struct spdk_nvme_cpl cpl; bool error; memset(&cpl, 0, sizeof(cpl)); @@ -393,7 +393,7 @@ nvme_qpair_manual_complete_request(struct nvme_qpair *qpair, cpl.status.sct = sct; cpl.status.sc = sc; - error = nvme_completion_is_error(&cpl); + error = spdk_nvme_cpl_is_error(&cpl); if (error && print_on_error) { nvme_qpair_print_command(qpair, &req->cmd); @@ -465,7 +465,7 @@ int32_t nvme_qpair_process_completions(struct nvme_qpair *qpair, uint32_t max_completions) { struct nvme_tracker *tr; - struct nvme_completion *cpl; + struct spdk_nvme_cpl *cpl; uint32_t num_completions = 0; if (!nvme_qpair_check_enabled(qpair)) { @@ -541,7 +541,7 @@ nvme_qpair_construct(struct nvme_qpair *qpair, uint16_t id, /* cmd and cpl rings must be aligned on 4KB boundaries. */ qpair->cmd = nvme_malloc("qpair_cmd", - qpair->num_entries * sizeof(struct nvme_command), + qpair->num_entries * sizeof(struct spdk_nvme_cmd), 0x1000, &qpair->cmd_bus_addr); if (qpair->cmd == NULL) { @@ -549,7 +549,7 @@ nvme_qpair_construct(struct nvme_qpair *qpair, uint16_t id, goto fail; } qpair->cpl = nvme_malloc("qpair_cpl", - qpair->num_entries * sizeof(struct nvme_completion), + qpair->num_entries * sizeof(struct spdk_nvme_cpl), 0x1000, &qpair->cpl_bus_addr); if (qpair->cpl == NULL) { @@ -599,9 +599,9 @@ nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair) tr = LIST_FIRST(&qpair->outstanding_tr); while (tr != NULL) { - if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) { + if (tr->req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) { nvme_qpair_manual_complete_tracker(qpair, tr, - NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0, + SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_ABORTED_SQ_DELETION, 0, false); tr = LIST_FIRST(&qpair->outstanding_tr); } else { @@ -658,16 +658,16 @@ _nvme_fail_request_bad_vtophys(struct nvme_qpair *qpair, struct nvme_tracker *tr * Bad vtophys translation, so abort this request and return * immediately. */ - nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, - NVME_SC_INVALID_FIELD, + nvme_qpair_manual_complete_tracker(qpair, tr, SPDK_NVME_SCT_GENERIC, + SPDK_NVME_SC_INVALID_FIELD, 1 /* do not retry */, true); } static void _nvme_fail_request_ctrlr_failed(struct nvme_qpair *qpair, struct nvme_request *req) { - nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC, - NVME_SC_ABORTED_BY_REQUEST, true); + nvme_qpair_manual_complete_request(qpair, req, SPDK_NVME_SCT_GENERIC, + SPDK_NVME_SC_ABORTED_BY_REQUEST, true); } /** @@ -694,7 +694,7 @@ _nvme_qpair_build_contig_request(struct nvme_qpair *qpair, struct nvme_request * nseg += 1 + ((modulo + unaligned - 1) >> nvme_u32log2(PAGE_SIZE)); } - tr->req->cmd.psdt = NVME_PSDT_PRP; + tr->req->cmd.psdt = SPDK_NVME_PSDT_PRP; tr->req->cmd.dptr.prp.prp1 = phys_addr; if (nseg == 2) { seg_addr = payload + PAGE_SIZE - unaligned; @@ -760,7 +760,7 @@ _nvme_qpair_build_sgl_request(struct nvme_qpair *qpair, struct nvme_request *req } if (total_nseg == 0) { - req->cmd.psdt = NVME_PSDT_PRP; + req->cmd.psdt = SPDK_NVME_PSDT_PRP; req->cmd.dptr.prp.prp1 = phys_addr; } @@ -890,9 +890,9 @@ nvme_qpair_reset(struct nvme_qpair *qpair) qpair->phase = 1; memset(qpair->cmd, 0, - qpair->num_entries * sizeof(struct nvme_command)); + qpair->num_entries * sizeof(struct spdk_nvme_cmd)); memset(qpair->cpl, 0, - qpair->num_entries * sizeof(struct nvme_completion)); + qpair->num_entries * sizeof(struct spdk_nvme_cpl)); } static void @@ -910,8 +910,8 @@ _nvme_admin_qpair_enable(struct nvme_qpair *qpair) LIST_FOREACH_SAFE(tr, &qpair->outstanding_tr, list, tr_temp) { nvme_printf(qpair->ctrlr, "aborting outstanding admin command\n"); - nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, - NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, true); + nvme_qpair_manual_complete_tracker(qpair, tr, SPDK_NVME_SCT_GENERIC, + SPDK_NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, true); } qpair->is_enabled = true; @@ -933,8 +933,8 @@ _nvme_io_qpair_enable(struct nvme_qpair *qpair) */ LIST_FOREACH_SAFE(tr, &qpair->outstanding_tr, list, tr_temp) { nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n"); - nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, - NVME_SC_ABORTED_BY_REQUEST, 0, true); + nvme_qpair_manual_complete_tracker(qpair, tr, SPDK_NVME_SCT_GENERIC, + SPDK_NVME_SC_ABORTED_BY_REQUEST, 0, true); } @@ -994,8 +994,8 @@ nvme_qpair_fail(struct nvme_qpair *qpair) req = STAILQ_FIRST(&qpair->queued_req); STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq); nvme_printf(qpair->ctrlr, "failing queued i/o\n"); - nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC, - NVME_SC_ABORTED_BY_REQUEST, true); + nvme_qpair_manual_complete_request(qpair, req, SPDK_NVME_SCT_GENERIC, + SPDK_NVME_SC_ABORTED_BY_REQUEST, true); } /* Manually abort each outstanding I/O. */ @@ -1006,8 +1006,8 @@ nvme_qpair_fail(struct nvme_qpair *qpair) * do that for us. */ nvme_printf(qpair->ctrlr, "failing outstanding i/o\n"); - nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC, - NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, true); + nvme_qpair_manual_complete_tracker(qpair, tr, SPDK_NVME_SCT_GENERIC, + SPDK_NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, true); } } diff --git a/test/lib/nvme/aer/aer.c b/test/lib/nvme/aer/aer.c index 16b9a7fa5..510c15aa9 100644 --- a/test/lib/nvme/aer/aer.c +++ b/test/lib/nvme/aer/aer.c @@ -46,11 +46,11 @@ struct rte_mempool *request_mempool; #define MAX_DEVS 64 struct dev { - struct spdk_pci_device *pci_dev; - struct nvme_controller *ctrlr; - struct nvme_health_information_page *health_page; - uint32_t orig_temp_threshold; - char name[100]; + struct spdk_pci_device *pci_dev; + struct nvme_controller *ctrlr; + struct spdk_nvme_health_information_page *health_page; + uint32_t orig_temp_threshold; + char name[100]; }; static struct dev devs[MAX_DEVS]; @@ -66,11 +66,11 @@ static int aer_done = 0; static int temperature_done = 0; static int failed = 0; -static void set_feature_completion(void *cb_arg, const struct nvme_completion *cpl) +static void set_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) { struct dev *dev = cb_arg; - if (nvme_completion_is_error(cpl)) { + if (spdk_nvme_cpl_is_error(cpl)) { printf("%s: set feature (temp threshold) failed\n", dev->name); failed = 1; return; @@ -84,21 +84,21 @@ static void set_feature_completion(void *cb_arg, const struct nvme_completion *c static int set_temp_threshold(struct dev *dev, uint32_t temp) { - struct nvme_command cmd = {}; + struct spdk_nvme_cmd cmd = {}; - cmd.opc = NVME_OPC_SET_FEATURES; - cmd.cdw10 = NVME_FEAT_TEMPERATURE_THRESHOLD; + cmd.opc = SPDK_NVME_OPC_SET_FEATURES; + cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; cmd.cdw11 = temp; return nvme_ctrlr_cmd_admin_raw(dev->ctrlr, &cmd, NULL, 0, set_feature_completion, dev); } static void -get_feature_completion(void *cb_arg, const struct nvme_completion *cpl) +get_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) { struct dev *dev = cb_arg; - if (nvme_completion_is_error(cpl)) { + if (spdk_nvme_cpl_is_error(cpl)) { printf("%s: get feature (temp threshold) failed\n", dev->name); failed = 1; return; @@ -115,27 +115,27 @@ get_feature_completion(void *cb_arg, const struct nvme_completion *cpl) static int get_temp_threshold(struct dev *dev) { - struct nvme_command cmd = {}; + struct spdk_nvme_cmd cmd = {}; - cmd.opc = NVME_OPC_GET_FEATURES; - cmd.cdw10 = NVME_FEAT_TEMPERATURE_THRESHOLD; + cmd.opc = SPDK_NVME_OPC_GET_FEATURES; + cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD; return nvme_ctrlr_cmd_admin_raw(dev->ctrlr, &cmd, NULL, 0, get_feature_completion, dev); } static void -print_health_page(struct dev *dev, struct nvme_health_information_page *hip) +print_health_page(struct dev *dev, struct spdk_nvme_health_information_page *hip) { printf("%s: Current Temperature: %u Kelvin (%d Celsius)\n", dev->name, hip->temperature, hip->temperature - 273); } static void -get_log_page_completion(void *cb_arg, const struct nvme_completion *cpl) +get_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl) { struct dev *dev = cb_arg; - if (nvme_completion_is_error(cpl)) { + if (spdk_nvme_cpl_is_error(cpl)) { printf("%s: get log page failed\n", dev->name); failed = 1; return; @@ -148,8 +148,8 @@ get_log_page_completion(void *cb_arg, const struct nvme_completion *cpl) static int get_health_log_page(struct dev *dev) { - return nvme_ctrlr_cmd_get_log_page(dev->ctrlr, NVME_LOG_HEALTH_INFORMATION, - NVME_GLOBAL_NAMESPACE_TAG, dev->health_page, sizeof(*dev->health_page), + return nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, + SPDK_NVME_GLOBAL_NS_TAG, dev->health_page, sizeof(*dev->health_page), get_log_page_completion, dev); } @@ -165,12 +165,12 @@ cleanup(void) } } -static void aer_cb(void *arg, const struct nvme_completion *cpl) +static void aer_cb(void *arg, const struct spdk_nvme_cpl *cpl) { uint32_t log_page_id = (cpl->cdw0 & 0xFF0000) >> 16; struct dev *dev = arg; - if (nvme_completion_is_error(cpl)) { + if (spdk_nvme_cpl_is_error(cpl)) { printf("%s: AER failed\n", dev->name); failed = 1; return; diff --git a/test/lib/nvme/reset/reset.c b/test/lib/nvme/reset/reset.c index c34c48957..c78976848 100644 --- a/test/lib/nvme/reset/reset.c +++ b/test/lib/nvme/reset/reset.c @@ -104,7 +104,7 @@ static void register_ns(struct nvme_controller *ctrlr, struct nvme_namespace *ns) { struct ns_entry *entry; - const struct nvme_controller_data *cdata; + const struct spdk_nvme_ctrlr_data *cdata; entry = malloc(sizeof(struct ns_entry)); if (entry == NULL) { @@ -158,7 +158,7 @@ static void task_ctor(struct rte_mempool *mp, void *arg, void *__task, unsigned } } -static void io_complete(void *ctx, const struct nvme_completion *completion); +static void io_complete(void *ctx, const struct spdk_nvme_cpl *completion); static __thread unsigned int seed = 0; @@ -204,14 +204,14 @@ submit_single_io(struct ns_worker_ctx *ns_ctx) } static void -task_complete(struct reset_task *task, const struct nvme_completion *completion) +task_complete(struct reset_task *task, const struct spdk_nvme_cpl *completion) { struct ns_worker_ctx *ns_ctx; ns_ctx = task->ns_ctx; ns_ctx->current_queue_depth--; - if (nvme_completion_is_error(completion)) { + if (spdk_nvme_cpl_is_error(completion)) { ns_ctx->io_completed_error++; } else { ns_ctx->io_completed++; @@ -231,7 +231,7 @@ task_complete(struct reset_task *task, const struct nvme_completion *completion) } static void -io_complete(void *ctx, const struct nvme_completion *completion) +io_complete(void *ctx, const struct spdk_nvme_cpl *completion) { task_complete((struct reset_task *)ctx, completion); } diff --git a/test/lib/nvme/sgl/nvme_sgl.c b/test/lib/nvme/sgl/nvme_sgl.c index 2d4721874..9d8aa99b5 100644 --- a/test/lib/nvme/sgl/nvme_sgl.c +++ b/test/lib/nvme/sgl/nvme_sgl.c @@ -121,9 +121,9 @@ static int nvme_request_next_sge(void *cb_arg, uint64_t *address, uint32_t *leng } static void -io_complete(void *ctx, const struct nvme_completion *cpl) +io_complete(void *ctx, const struct spdk_nvme_cpl *cpl) { - if (nvme_completion_is_error(cpl)) + if (spdk_nvme_cpl_is_error(cpl)) io_complete_flag = 2; else io_complete_flag = 1; @@ -274,7 +274,7 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn) struct io_request *req; struct nvme_namespace *ns; - const struct nvme_namespace_data *nsdata; + const struct spdk_nvme_ns_data *nsdata; ns = nvme_ctrlr_get_ns(dev->ctrlr, 1); if (!ns) { diff --git a/test/lib/nvme/unit/nvme_ctrlr_c/nvme_ctrlr_ut.c b/test/lib/nvme/unit/nvme_ctrlr_c/nvme_ctrlr_ut.c index 3c6ae1063..82e17c4e6 100644 --- a/test/lib/nvme/unit/nvme_ctrlr_c/nvme_ctrlr_ut.c +++ b/test/lib/nvme/unit/nvme_ctrlr_c/nvme_ctrlr_ut.c @@ -96,7 +96,7 @@ nvme_qpair_fail(struct nvme_qpair *qpair) void nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req) { - CU_ASSERT(req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST); + CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST); } int32_t @@ -126,13 +126,13 @@ nvme_qpair_reset(struct nvme_qpair *qpair) } void -nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl) +nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl) { } void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr, - union nvme_critical_warning_state state, nvme_cb_fn_t cb_fn, + union spdk_nvme_critical_warning_state state, nvme_cb_fn_t cb_fn, void *cb_arg) { } @@ -282,14 +282,14 @@ test_nvme_ctrlr_set_supported_features(void) /* set a invalid vendor id */ ctrlr.cdata.vid = 0xFFFF; nvme_ctrlr_set_supported_features(&ctrlr); - res = nvme_ctrlr_is_feature_supported(&ctrlr, NVME_FEAT_ARBITRATION); + res = nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION); CU_ASSERT(res == true); res = nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA); CU_ASSERT(res == false); ctrlr.cdata.vid = SPDK_PCI_VID_INTEL; nvme_ctrlr_set_supported_features(&ctrlr); - res = nvme_ctrlr_is_feature_supported(&ctrlr, NVME_FEAT_ARBITRATION); + res = nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION); CU_ASSERT(res == true); res = nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA); CU_ASSERT(res == true); diff --git a/test/lib/nvme/unit/nvme_ctrlr_cmd_c/nvme_ctrlr_cmd_ut.c b/test/lib/nvme/unit/nvme_ctrlr_cmd_c/nvme_ctrlr_cmd_ut.c index a8e5b5aad..1724262b0 100644 --- a/test/lib/nvme/unit/nvme_ctrlr_cmd_c/nvme_ctrlr_cmd_ut.c +++ b/test/lib/nvme/unit/nvme_ctrlr_cmd_c/nvme_ctrlr_cmd_ut.c @@ -60,11 +60,11 @@ static void verify_firmware_log_page(struct nvme_request *req) { uint32_t temp_cdw10; - CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE); - CU_ASSERT(req->cmd.nsid == NVME_GLOBAL_NAMESPACE_TAG); + CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE); + CU_ASSERT(req->cmd.nsid == SPDK_NVME_GLOBAL_NS_TAG); - temp_cdw10 = ((sizeof(struct nvme_firmware_page) / sizeof(uint32_t) - 1) << 16) | - NVME_LOG_FIRMWARE_SLOT; + temp_cdw10 = ((sizeof(struct spdk_nvme_firmware_page) / sizeof(uint32_t) - 1) << 16) | + SPDK_NVME_LOG_FIRMWARE_SLOT; CU_ASSERT(req->cmd.cdw10 == temp_cdw10); } @@ -72,11 +72,11 @@ static void verify_health_log_page(struct nvme_request *req) { uint32_t temp_cdw10; - CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE); + CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE); CU_ASSERT(req->cmd.nsid == health_log_nsid); - temp_cdw10 = ((sizeof(struct nvme_health_information_page) / sizeof(uint32_t) - 1) << 16) | - NVME_LOG_HEALTH_INFORMATION; + temp_cdw10 = ((sizeof(struct spdk_nvme_health_information_page) / sizeof(uint32_t) - 1) << 16) | + SPDK_NVME_LOG_HEALTH_INFORMATION; CU_ASSERT(req->cmd.cdw10 == temp_cdw10); } @@ -84,17 +84,17 @@ static void verify_error_log_page(struct nvme_request *req) { uint32_t temp_cdw10; - CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE); - CU_ASSERT(req->cmd.nsid == NVME_GLOBAL_NAMESPACE_TAG); + CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE); + CU_ASSERT(req->cmd.nsid == SPDK_NVME_GLOBAL_NS_TAG); - temp_cdw10 = (((sizeof(struct nvme_error_information_entry) * error_num_entries) / sizeof( - uint32_t) - 1) << 16) | NVME_LOG_ERROR; + temp_cdw10 = (((sizeof(struct spdk_nvme_error_information_entry) * error_num_entries) / + sizeof(uint32_t) - 1) << 16) | SPDK_NVME_LOG_ERROR; CU_ASSERT(req->cmd.cdw10 == temp_cdw10); } static void verify_set_feature_cmd(struct nvme_request *req) { - CU_ASSERT(req->cmd.opc == NVME_OPC_SET_FEATURES); + CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SET_FEATURES); CU_ASSERT(req->cmd.cdw10 == feature); CU_ASSERT(req->cmd.cdw11 == feature_cdw11); CU_ASSERT(req->cmd.cdw12 == feature_cdw12); @@ -102,20 +102,20 @@ static void verify_set_feature_cmd(struct nvme_request *req) static void verify_get_feature_cmd(struct nvme_request *req) { - CU_ASSERT(req->cmd.opc == NVME_OPC_GET_FEATURES); + CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_FEATURES); CU_ASSERT(req->cmd.cdw10 == get_feature); CU_ASSERT(req->cmd.cdw11 == get_feature_cdw11); } static void verify_abort_cmd(struct nvme_request *req) { - CU_ASSERT(req->cmd.opc == NVME_OPC_ABORT); + CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ABORT); CU_ASSERT(req->cmd.cdw10 == (((uint32_t)abort_cid << 16) | abort_sqid)); } static void verify_io_raw_cmd(struct nvme_request *req) { - struct nvme_command command = {}; + struct spdk_nvme_cmd command = {}; CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0); } @@ -124,7 +124,7 @@ static void verify_intel_smart_log_page(struct nvme_request *req) { uint32_t temp_cdw10; - CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE); + CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE); CU_ASSERT(req->cmd.nsid == health_log_nsid); temp_cdw10 = ((sizeof(struct spdk_nvme_intel_smart_information_page) / @@ -137,7 +137,7 @@ static void verify_intel_temperature_log_page(struct nvme_request *req) { uint32_t temp_cdw10; - CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE); + CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE); temp_cdw10 = ((sizeof(struct spdk_nvme_intel_temperature_page) / sizeof(uint32_t) - 1) << 16) | SPDK_NVME_INTEL_LOG_TEMPERATURE; @@ -148,7 +148,7 @@ static void verify_intel_read_latency_log_page(struct nvme_request *req) { uint32_t temp_cdw10; - CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE); + CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE); temp_cdw10 = ((sizeof(struct spdk_nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) | SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY; @@ -159,7 +159,7 @@ static void verify_intel_write_latency_log_page(struct nvme_request *req) { uint32_t temp_cdw10; - CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE); + CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE); temp_cdw10 = ((sizeof(struct spdk_nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) | SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY; @@ -170,7 +170,7 @@ static void verify_intel_get_log_page_directory(struct nvme_request *req) { uint32_t temp_cdw10; - CU_ASSERT(req->cmd.opc == NVME_OPC_GET_LOG_PAGE); + CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE); temp_cdw10 = ((sizeof(struct spdk_nvme_intel_log_page_directory) / sizeof(uint32_t) - 1) << 16) | SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY; @@ -233,11 +233,11 @@ static void test_firmware_get_log_page(void) { struct nvme_controller ctrlr = {}; - struct nvme_firmware_page payload = {}; + struct spdk_nvme_firmware_page payload = {}; verify_fn = verify_firmware_log_page; - nvme_ctrlr_cmd_get_log_page(&ctrlr, NVME_LOG_FIRMWARE_SLOT, NVME_GLOBAL_NAMESPACE_TAG, + nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_FIRMWARE_SLOT, SPDK_NVME_GLOBAL_NS_TAG, &payload, sizeof(payload), NULL, NULL); } @@ -245,20 +245,20 @@ test_firmware_get_log_page(void) static void test_health_get_log_page(void) { - struct nvme_controller ctrlr = {}; - struct nvme_health_information_page payload = {}; + struct nvme_controller ctrlr = {}; + struct spdk_nvme_health_information_page payload = {}; verify_fn = verify_health_log_page; - nvme_ctrlr_cmd_get_log_page(&ctrlr, NVME_LOG_HEALTH_INFORMATION, health_log_nsid, &payload, + nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, health_log_nsid, &payload, sizeof(payload), NULL, NULL); } static void test_error_get_log_page(void) { - struct nvme_controller ctrlr = {}; - struct nvme_error_information_entry payload = {}; + struct nvme_controller ctrlr = {}; + struct spdk_nvme_error_information_entry payload = {}; ctrlr.cdata.elpe = CTRLR_CDATA_ELPE; @@ -266,7 +266,7 @@ test_error_get_log_page(void) /* valid page */ error_num_entries = 1; - nvme_ctrlr_cmd_get_log_page(&ctrlr, NVME_LOG_ERROR, NVME_GLOBAL_NAMESPACE_TAG, &payload, + nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_ERROR, SPDK_NVME_GLOBAL_NS_TAG, &payload, sizeof(payload), NULL, NULL); } @@ -288,7 +288,7 @@ static void test_intel_temperature_get_log_page(void) verify_fn = verify_intel_temperature_log_page; - nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE, NVME_GLOBAL_NAMESPACE_TAG, + nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE, SPDK_NVME_GLOBAL_NS_TAG, &payload, sizeof(payload), NULL, NULL); } @@ -300,7 +300,7 @@ static void test_intel_read_latency_get_log_page(void) verify_fn = verify_intel_read_latency_log_page; nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY, - NVME_GLOBAL_NAMESPACE_TAG, + SPDK_NVME_GLOBAL_NS_TAG, &payload, sizeof(payload), NULL, NULL); } @@ -312,7 +312,7 @@ static void test_intel_write_latency_get_log_page(void) verify_fn = verify_intel_write_latency_log_page; nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY, - NVME_GLOBAL_NAMESPACE_TAG, + SPDK_NVME_GLOBAL_NS_TAG, &payload, sizeof(payload), NULL, NULL); } @@ -323,7 +323,7 @@ static void test_intel_get_log_page_directory(void) verify_fn = verify_intel_get_log_page_directory; - nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY, NVME_GLOBAL_NAMESPACE_TAG, + nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY, SPDK_NVME_GLOBAL_NS_TAG, &payload, sizeof(payload), NULL, NULL); } @@ -378,7 +378,7 @@ static void test_io_raw_cmd(void) { struct nvme_controller ctrlr = {}; - struct nvme_command cmd = {}; + struct spdk_nvme_cmd cmd = {}; verify_fn = verify_io_raw_cmd; diff --git a/test/lib/nvme/unit/nvme_impl.h b/test/lib/nvme/unit/nvme_impl.h index dce7300e6..02bbad12f 100644 --- a/test/lib/nvme/unit/nvme_impl.h +++ b/test/lib/nvme/unit/nvme_impl.h @@ -129,6 +129,6 @@ nvme_mutex_init_recursive(nvme_mutex_t *mtx) /** * Copy a struct nvme_command from one memory location to another. */ -#define nvme_copy_command(dst, src) memcpy((dst), (src), sizeof(struct nvme_command)) +#define nvme_copy_command(dst, src) memcpy((dst), (src), sizeof(struct spdk_nvme_cmd)) #endif /* __NVME_IMPL_H__ */ diff --git a/test/lib/nvme/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut.c b/test/lib/nvme/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut.c index 75614b833..9990214db 100644 --- a/test/lib/nvme/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut.c +++ b/test/lib/nvme/unit/nvme_ns_cmd_c/nvme_ns_cmd_ut.c @@ -97,7 +97,7 @@ prepare_for_test(struct nvme_namespace *ns, struct nvme_controller *ctrlr, } static void -nvme_cmd_interpret_rw(const struct nvme_command *cmd, +nvme_cmd_interpret_rw(const struct spdk_nvme_cmd *cmd, uint64_t *lba, uint32_t *num_blocks) { *lba = *(const uint64_t *)&cmd->cdw10; @@ -268,7 +268,7 @@ split_test4(void) lba_count = (256 * 1024) / 512; rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, - NVME_IO_FLAGS_FORCE_UNIT_ACCESS); + SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS); CU_ASSERT(rc == 0); SPDK_CU_ASSERT_FATAL(g_request != NULL); @@ -282,8 +282,8 @@ split_test4(void) CU_ASSERT(child->payload_size == (256 - 10) * 512); CU_ASSERT(cmd_lba == 10); CU_ASSERT(cmd_lba_count == 256 - 10); - CU_ASSERT((child->cmd.cdw12 & NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0); - CU_ASSERT((child->cmd.cdw12 & NVME_IO_FLAGS_LIMITED_RETRY) == 0); + CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0); + CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0); nvme_free_request(child); child = TAILQ_FIRST(&g_request->children); @@ -293,8 +293,8 @@ split_test4(void) CU_ASSERT(child->payload_size == 128 * 1024); CU_ASSERT(cmd_lba == 256); CU_ASSERT(cmd_lba_count == 256); - CU_ASSERT((child->cmd.cdw12 & NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0); - CU_ASSERT((child->cmd.cdw12 & NVME_IO_FLAGS_LIMITED_RETRY) == 0); + CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0); + CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0); nvme_free_request(child); child = TAILQ_FIRST(&g_request->children); @@ -304,8 +304,8 @@ split_test4(void) CU_ASSERT(child->payload_size == 10 * 512); CU_ASSERT(cmd_lba == 512); CU_ASSERT(cmd_lba_count == 10); - CU_ASSERT((child->cmd.cdw12 & NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0); - CU_ASSERT((child->cmd.cdw12 & NVME_IO_FLAGS_LIMITED_RETRY) == 0); + CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0); + CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0); nvme_free_request(child); CU_ASSERT(TAILQ_EMPTY(&g_request->children)); @@ -325,7 +325,7 @@ test_nvme_ns_cmd_flush(void) prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0); nvme_ns_cmd_flush(&ns, cb_fn, cb_arg); - CU_ASSERT(g_request->cmd.opc == NVME_OPC_FLUSH); + CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_FLUSH); CU_ASSERT(g_request->cmd.nsid == ns.id); nvme_free_request(g_request); @@ -344,7 +344,7 @@ test_nvme_ns_cmd_write_zeroes(void) prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0); nvme_ns_cmd_write_zeroes(&ns, 0, 2, cb_fn, cb_arg, 0); - CU_ASSERT(g_request->cmd.opc == NVME_OPC_WRITE_ZEROES); + CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_ZEROES); CU_ASSERT(g_request->cmd.nsid == ns.id); nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count); CU_ASSERT_EQUAL(cmd_lba, 0); @@ -365,23 +365,23 @@ test_nvme_ns_cmd_deallocate(void) int rc = 0; prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0); - payload = malloc(num_ranges * sizeof(struct nvme_dsm_range)); + payload = malloc(num_ranges * sizeof(struct spdk_nvme_dsm_range)); nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg); - CU_ASSERT(g_request->cmd.opc == NVME_OPC_DATASET_MANAGEMENT); + CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT); CU_ASSERT(g_request->cmd.nsid == ns.id); CU_ASSERT(g_request->cmd.cdw10 == num_ranges - 1u); - CU_ASSERT(g_request->cmd.cdw11 == NVME_DSM_ATTR_DEALLOCATE); + CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE); free(payload); nvme_free_request(g_request); num_ranges = 256; - payload = malloc(num_ranges * sizeof(struct nvme_dsm_range)); + payload = malloc(num_ranges * sizeof(struct spdk_nvme_dsm_range)); nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg); - CU_ASSERT(g_request->cmd.opc == NVME_OPC_DATASET_MANAGEMENT); + CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT); CU_ASSERT(g_request->cmd.nsid == ns.id); CU_ASSERT(g_request->cmd.cdw10 == num_ranges - 1u); - CU_ASSERT(g_request->cmd.cdw11 == NVME_DSM_ATTR_DEALLOCATE); + CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE); free(payload); nvme_free_request(g_request); @@ -407,19 +407,19 @@ test_io_flags(void) lba_count = (4 * 1024) / 512; rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, - NVME_IO_FLAGS_FORCE_UNIT_ACCESS); + SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS); CU_ASSERT(rc == 0); CU_ASSERT_FATAL(g_request != NULL); - CU_ASSERT((g_request->cmd.cdw12 & NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0); - CU_ASSERT((g_request->cmd.cdw12 & NVME_IO_FLAGS_LIMITED_RETRY) == 0); + CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0); + CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0); nvme_free_request(g_request); rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, - NVME_IO_FLAGS_LIMITED_RETRY); + SPDK_NVME_IO_FLAGS_LIMITED_RETRY); CU_ASSERT(rc == 0); CU_ASSERT_FATAL(g_request != NULL); - CU_ASSERT((g_request->cmd.cdw12 & NVME_IO_FLAGS_FORCE_UNIT_ACCESS) == 0); - CU_ASSERT((g_request->cmd.cdw12 & NVME_IO_FLAGS_LIMITED_RETRY) != 0); + CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) == 0); + CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) != 0); nvme_free_request(g_request); free(payload); @@ -431,7 +431,7 @@ test_nvme_ns_cmd_reservation_register(void) { struct nvme_namespace ns; struct nvme_controller ctrlr; - struct nvme_reservation_register_data *payload; + struct spdk_nvme_reservation_register_data *payload; bool ignore_key = 1; nvme_cb_fn_t cb_fn = NULL; void *cb_arg = NULL; @@ -439,21 +439,20 @@ test_nvme_ns_cmd_reservation_register(void) uint32_t tmp_cdw10; prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0); - payload = (struct nvme_reservation_register_data *)malloc(sizeof(struct - nvme_reservation_register_data)); + payload = malloc(sizeof(struct spdk_nvme_reservation_register_data)); rc = nvme_ns_cmd_reservation_register(&ns, payload, ignore_key, - NVME_RESERVE_REGISTER_KEY, - NVME_RESERVE_PTPL_NO_CHANGES, + SPDK_NVME_RESERVE_REGISTER_KEY, + SPDK_NVME_RESERVE_PTPL_NO_CHANGES, cb_fn, cb_arg); CU_ASSERT(rc == 0); - CU_ASSERT(g_request->cmd.opc == NVME_OPC_RESERVATION_REGISTER); + CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REGISTER); CU_ASSERT(g_request->cmd.nsid == ns.id); - tmp_cdw10 = NVME_RESERVE_REGISTER_KEY; + tmp_cdw10 = SPDK_NVME_RESERVE_REGISTER_KEY; tmp_cdw10 |= ignore_key ? 1 << 3 : 0; - tmp_cdw10 |= (uint32_t)NVME_RESERVE_PTPL_NO_CHANGES << 30; + tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_PTPL_NO_CHANGES << 30; CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10); @@ -466,7 +465,7 @@ test_nvme_ns_cmd_reservation_release(void) { struct nvme_namespace ns; struct nvme_controller ctrlr; - struct nvme_reservation_key_data *payload; + struct spdk_nvme_reservation_key_data *payload; bool ignore_key = 1; nvme_cb_fn_t cb_fn = NULL; void *cb_arg = NULL; @@ -474,21 +473,20 @@ test_nvme_ns_cmd_reservation_release(void) uint32_t tmp_cdw10; prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0); - payload = (struct nvme_reservation_key_data *)malloc(sizeof(struct - nvme_reservation_key_data)); + payload = malloc(sizeof(struct spdk_nvme_reservation_key_data)); rc = nvme_ns_cmd_reservation_release(&ns, payload, ignore_key, - NVME_RESERVE_RELEASE, - NVME_RESERVE_WRITE_EXCLUSIVE, + SPDK_NVME_RESERVE_RELEASE, + SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, cb_fn, cb_arg); CU_ASSERT(rc == 0); - CU_ASSERT(g_request->cmd.opc == NVME_OPC_RESERVATION_RELEASE); + CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_RELEASE); CU_ASSERT(g_request->cmd.nsid == ns.id); - tmp_cdw10 = NVME_RESERVE_RELEASE; + tmp_cdw10 = SPDK_NVME_RESERVE_RELEASE; tmp_cdw10 |= ignore_key ? 1 << 3 : 0; - tmp_cdw10 |= (uint32_t)NVME_RESERVE_WRITE_EXCLUSIVE << 8; + tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8; CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10); @@ -501,7 +499,7 @@ test_nvme_ns_cmd_reservation_acquire(void) { struct nvme_namespace ns; struct nvme_controller ctrlr; - struct nvme_reservation_acquire_data *payload; + struct spdk_nvme_reservation_acquire_data *payload; bool ignore_key = 1; nvme_cb_fn_t cb_fn = NULL; void *cb_arg = NULL; @@ -509,21 +507,20 @@ test_nvme_ns_cmd_reservation_acquire(void) uint32_t tmp_cdw10; prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0); - payload = (struct nvme_reservation_acquire_data *)malloc(sizeof(struct - nvme_reservation_acquire_data)); + payload = malloc(sizeof(struct spdk_nvme_reservation_acquire_data)); rc = nvme_ns_cmd_reservation_acquire(&ns, payload, ignore_key, - NVME_RESERVE_ACQUIRE, - NVME_RESERVE_WRITE_EXCLUSIVE, + SPDK_NVME_RESERVE_ACQUIRE, + SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, cb_fn, cb_arg); CU_ASSERT(rc == 0); - CU_ASSERT(g_request->cmd.opc == NVME_OPC_RESERVATION_ACQUIRE); + CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_ACQUIRE); CU_ASSERT(g_request->cmd.nsid == ns.id); - tmp_cdw10 = NVME_RESERVE_ACQUIRE; + tmp_cdw10 = SPDK_NVME_RESERVE_ACQUIRE; tmp_cdw10 |= ignore_key ? 1 << 3 : 0; - tmp_cdw10 |= (uint32_t)NVME_RESERVE_WRITE_EXCLUSIVE << 8; + tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8; CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10); @@ -536,20 +533,19 @@ test_nvme_ns_cmd_reservation_report(void) { struct nvme_namespace ns; struct nvme_controller ctrlr; - struct nvme_reservation_status_data *payload; + struct spdk_nvme_reservation_status_data *payload; nvme_cb_fn_t cb_fn = NULL; void *cb_arg = NULL; int rc = 0; prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0); - payload = (struct nvme_reservation_status_data *)malloc(sizeof(struct - nvme_reservation_status_data)); + payload = malloc(sizeof(struct spdk_nvme_reservation_status_data)); rc = nvme_ns_cmd_reservation_report(&ns, payload, 0x1000, cb_fn, cb_arg); CU_ASSERT(rc == 0); - CU_ASSERT(g_request->cmd.opc == NVME_OPC_RESERVATION_REPORT); + CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REPORT); CU_ASSERT(g_request->cmd.nsid == ns.id); CU_ASSERT(g_request->cmd.cdw10 == (0x1000 / 4)); diff --git a/test/lib/nvme/unit/nvme_qpair_c/nvme_qpair_ut.c b/test/lib/nvme/unit/nvme_qpair_c/nvme_qpair_ut.c index dca1e51d0..de444a862 100644 --- a/test/lib/nvme/unit/nvme_qpair_c/nvme_qpair_ut.c +++ b/test/lib/nvme/unit/nvme_qpair_c/nvme_qpair_ut.c @@ -112,7 +112,7 @@ static void test1(void) { struct nvme_qpair qpair = {}; - struct nvme_command cmd = {}; + struct spdk_nvme_cmd cmd = {}; outbuf[0] = '\0'; @@ -122,7 +122,7 @@ test1(void) * I/o opc. */ qpair.id = 0; - cmd.opc = NVME_OPC_IDENTIFY; + cmd.opc = SPDK_NVME_OPC_IDENTIFY; nvme_qpair_print_command(&qpair, &cmd); @@ -133,7 +133,7 @@ static void test2(void) { struct nvme_qpair qpair = {}; - struct nvme_command cmd = {}; + struct spdk_nvme_cmd cmd = {}; outbuf[0] = '\0'; @@ -143,7 +143,7 @@ test2(void) * admin opc. */ qpair.id = 1; - cmd.opc = NVME_OPC_DATASET_MANAGEMENT; + cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT; nvme_qpair_print_command(&qpair, &cmd); @@ -153,7 +153,7 @@ test2(void) static void prepare_submit_request_test(struct nvme_qpair *qpair, struct nvme_controller *ctrlr, - struct nvme_registers *regs) + struct spdk_nvme_registers *regs) { memset(ctrlr, 0, sizeof(*ctrlr)); ctrlr->regs = regs; @@ -174,9 +174,9 @@ cleanup_submit_request_test(struct nvme_qpair *qpair) static void ut_insert_cq_entry(struct nvme_qpair *qpair, uint32_t slot) { - struct nvme_request *req; - struct nvme_tracker *tr; - struct nvme_completion *cpl; + struct nvme_request *req; + struct nvme_tracker *tr; + struct spdk_nvme_cpl *cpl; nvme_alloc_request(&req); SPDK_CU_ASSERT_FATAL(req != NULL); @@ -196,24 +196,24 @@ ut_insert_cq_entry(struct nvme_qpair *qpair, uint32_t slot) } static void -expected_success_callback(void *arg, const struct nvme_completion *cpl) +expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl) { - CU_ASSERT(!nvme_completion_is_error(cpl)); + CU_ASSERT(!spdk_nvme_cpl_is_error(cpl)); } static void -expected_failure_callback(void *arg, const struct nvme_completion *cpl) +expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl) { - CU_ASSERT(nvme_completion_is_error(cpl)); + CU_ASSERT(spdk_nvme_cpl_is_error(cpl)); } static void test3(void) { - struct nvme_qpair qpair = {}; - struct nvme_request *req; - struct nvme_controller ctrlr = {}; - struct nvme_registers regs = {}; + struct nvme_qpair qpair = {}; + struct nvme_request *req; + struct nvme_controller ctrlr = {}; + struct spdk_nvme_registers regs = {}; prepare_submit_request_test(&qpair, &ctrlr, ®s); @@ -233,11 +233,11 @@ test3(void) static void test4(void) { - struct nvme_qpair qpair = {}; - struct nvme_request *req; - struct nvme_controller ctrlr = {}; - struct nvme_registers regs = {}; - char payload[4096]; + struct nvme_qpair qpair = {}; + struct nvme_request *req; + struct nvme_controller ctrlr = {}; + struct spdk_nvme_registers regs = {}; + char payload[4096]; prepare_submit_request_test(&qpair, &ctrlr, ®s); @@ -266,11 +266,11 @@ test4(void) static void test_ctrlr_failed(void) { - struct nvme_qpair qpair = {}; - struct nvme_request *req; - struct nvme_controller ctrlr = {}; - struct nvme_registers regs = {}; - char payload[4096]; + struct nvme_qpair qpair = {}; + struct nvme_request *req; + struct nvme_controller ctrlr = {}; + struct spdk_nvme_registers regs = {}; + char payload[4096]; prepare_submit_request_test(&qpair, &ctrlr, ®s); @@ -308,12 +308,12 @@ static void struct_packing(void) static void test_nvme_qpair_fail(void) { - struct nvme_qpair qpair = {}; - struct nvme_request *req = NULL; - struct nvme_controller ctrlr = {}; - struct nvme_registers regs = {}; - struct nvme_tracker *tr_temp; - uint64_t phys_addr = 0; + struct nvme_qpair qpair = {}; + struct nvme_request *req = NULL; + struct nvme_controller ctrlr = {}; + struct spdk_nvme_registers regs = {}; + struct nvme_tracker *tr_temp; + uint64_t phys_addr = 0; prepare_submit_request_test(&qpair, &ctrlr, ®s); @@ -339,9 +339,9 @@ static void test_nvme_qpair_fail(void) static void test_nvme_qpair_process_completions(void) { - struct nvme_qpair qpair = {}; - struct nvme_controller ctrlr = {}; - struct nvme_registers regs = {}; + struct nvme_qpair qpair = {}; + struct nvme_controller ctrlr = {}; + struct spdk_nvme_registers regs = {}; prepare_submit_request_test(&qpair, &ctrlr, ®s); qpair.is_enabled = false; @@ -354,9 +354,9 @@ static void test_nvme_qpair_process_completions(void) static void test_nvme_qpair_process_completions_limit(void) { - struct nvme_qpair qpair = {}; - struct nvme_controller ctrlr = {}; - struct nvme_registers regs = {}; + struct nvme_qpair qpair = {}; + struct nvme_controller ctrlr = {}; + struct spdk_nvme_registers regs = {}; prepare_submit_request_test(&qpair, &ctrlr, ®s); qpair.is_enabled = true; @@ -385,11 +385,11 @@ test_nvme_qpair_process_completions_limit(void) static void test_nvme_qpair_destroy(void) { - struct nvme_qpair qpair = {}; - struct nvme_controller ctrlr = {}; - struct nvme_registers regs = {}; - struct nvme_tracker *tr_temp; - uint64_t phys_addr = 0; + struct nvme_qpair qpair = {}; + struct nvme_controller ctrlr = {}; + struct spdk_nvme_registers regs = {}; + struct nvme_tracker *tr_temp; + uint64_t phys_addr = 0; memset(&ctrlr, 0, sizeof(ctrlr)); ctrlr.regs = ®s; @@ -406,7 +406,7 @@ static void test_nvme_qpair_destroy(void) tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL); SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL); - tr_temp->req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST; + tr_temp->req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; LIST_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, list); nvme_qpair_destroy(&qpair); @@ -416,59 +416,59 @@ static void test_nvme_qpair_destroy(void) static void test_nvme_completion_is_retry(void) { - struct nvme_completion cpl = {}; + struct spdk_nvme_cpl cpl = {}; - cpl.status.sct = NVME_SCT_GENERIC; - cpl.status.sc = NVME_SC_ABORTED_BY_REQUEST; + cpl.status.sct = SPDK_NVME_SCT_GENERIC; + cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; cpl.status.dnr = 0; CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl)); - cpl.status.sc = NVME_SC_INVALID_OPCODE; + cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sc = NVME_SC_INVALID_FIELD; + cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sc = NVME_SC_COMMAND_ID_CONFLICT; + cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sc = NVME_SC_DATA_TRANSFER_ERROR; + cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sc = NVME_SC_ABORTED_POWER_LOSS; + cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sc = NVME_SC_INTERNAL_DEVICE_ERROR; + cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sc = NVME_SC_ABORTED_FAILED_FUSED; + cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sc = NVME_SC_ABORTED_MISSING_FUSED; + cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sc = NVME_SC_INVALID_NAMESPACE_OR_FORMAT; + cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sc = NVME_SC_COMMAND_SEQUENCE_ERROR; + cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sc = NVME_SC_LBA_OUT_OF_RANGE; + cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sc = NVME_SC_CAPACITY_EXCEEDED; + cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); cpl.status.sc = 0x70; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sct = NVME_SCT_COMMAND_SPECIFIC; + cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sct = NVME_SCT_MEDIA_ERROR; + cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); - cpl.status.sct = NVME_SCT_VENDOR_SPECIFIC; + cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC; CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl)); cpl.status.sct = 0x4; @@ -480,16 +480,17 @@ test_get_status_string(void) { const char *status_string; - status_string = get_status_string(NVME_SCT_GENERIC, NVME_SC_SUCCESS); + status_string = get_status_string(SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_SUCCESS); CU_ASSERT(strcmp(status_string, "SUCCESS") == 0); - status_string = get_status_string(NVME_SCT_COMMAND_SPECIFIC, NVME_SC_COMPLETION_QUEUE_INVALID); + status_string = get_status_string(SPDK_NVME_SCT_COMMAND_SPECIFIC, + SPDK_NVME_SC_COMPLETION_QUEUE_INVALID); CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0); - status_string = get_status_string(NVME_SCT_MEDIA_ERROR, NVME_SC_UNRECOVERED_READ_ERROR); + status_string = get_status_string(SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR); CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0); - status_string = get_status_string(NVME_SCT_VENDOR_SPECIFIC, 0); + status_string = get_status_string(SPDK_NVME_SCT_VENDOR_SPECIFIC, 0); CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0); status_string = get_status_string(100, 0);