nvme: add spdk_ prefix

Shorten commonly-used names:
controller -> ctrlr
namespace -> ns

Change-Id: I64f0ce7c65385bab0283f8a8341a3447792b3312
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-02-10 11:26:12 -07:00
parent ad35d6cd86
commit 6ce73aa6e7
19 changed files with 706 additions and 698 deletions

View File

@ -134,18 +134,18 @@ get_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
}
static int
get_feature(struct nvme_controller *ctrlr, uint8_t fid)
get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t fid)
{
struct spdk_nvme_cmd cmd = {};
cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
cmd.cdw10 = fid;
return nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, get_feature_completion, &features[fid]);
return spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, get_feature_completion, &features[fid]);
}
static void
get_features(struct nvme_controller *ctrlr)
get_features(struct spdk_nvme_ctrlr *ctrlr)
{
size_t i;
@ -167,12 +167,12 @@ get_features(struct nvme_controller *ctrlr)
}
while (outstanding_commands) {
nvme_ctrlr_process_admin_completions(ctrlr);
spdk_nvme_ctrlr_process_admin_completions(ctrlr);
}
}
static int
get_health_log_page(struct nvme_controller *ctrlr)
get_health_log_page(struct spdk_nvme_ctrlr *ctrlr)
{
if (health_page == NULL) {
health_page = rte_zmalloc("nvme health", sizeof(*health_page), 4096);
@ -182,9 +182,9 @@ get_health_log_page(struct nvme_controller *ctrlr)
exit(1);
}
if (nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, SPDK_NVME_GLOBAL_NS_TAG,
health_page, sizeof(*health_page), get_log_page_completion, NULL)) {
printf("nvme_ctrlr_cmd_get_log_page() failed\n");
if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION,
SPDK_NVME_GLOBAL_NS_TAG, health_page, sizeof(*health_page), get_log_page_completion, NULL)) {
printf("spdk_nvme_ctrlr_cmd_get_log_page() failed\n");
exit(1);
}
@ -192,7 +192,7 @@ get_health_log_page(struct nvme_controller *ctrlr)
}
static int
get_intel_smart_log_page(struct nvme_controller *ctrlr)
get_intel_smart_log_page(struct spdk_nvme_ctrlr *ctrlr)
{
if (intel_smart_page == NULL) {
intel_smart_page = rte_zmalloc("nvme intel smart", sizeof(*intel_smart_page), 4096);
@ -202,9 +202,9 @@ get_intel_smart_log_page(struct nvme_controller *ctrlr)
exit(1);
}
if (nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_SMART, SPDK_NVME_GLOBAL_NS_TAG,
intel_smart_page, sizeof(*intel_smart_page), get_log_page_completion, NULL)) {
printf("nvme_ctrlr_cmd_get_log_page() failed\n");
if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_SMART, SPDK_NVME_GLOBAL_NS_TAG,
intel_smart_page, sizeof(*intel_smart_page), get_log_page_completion, NULL)) {
printf("spdk_nvme_ctrlr_cmd_get_log_page() failed\n");
exit(1);
}
@ -212,7 +212,7 @@ get_intel_smart_log_page(struct nvme_controller *ctrlr)
}
static int
get_intel_temperature_log_page(struct nvme_controller *ctrlr)
get_intel_temperature_log_page(struct spdk_nvme_ctrlr *ctrlr)
{
if (intel_temperature_page == NULL) {
intel_temperature_page = rte_zmalloc("nvme intel temperature", sizeof(*intel_temperature_page),
@ -223,16 +223,17 @@ get_intel_temperature_log_page(struct nvme_controller *ctrlr)
exit(1);
}
if (nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE, SPDK_NVME_GLOBAL_NS_TAG,
intel_temperature_page, sizeof(*intel_temperature_page), get_log_page_completion, NULL)) {
printf("nvme_ctrlr_cmd_get_log_page() failed\n");
if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE,
SPDK_NVME_GLOBAL_NS_TAG, intel_temperature_page, sizeof(*intel_temperature_page),
get_log_page_completion, NULL)) {
printf("spdk_nvme_ctrlr_cmd_get_log_page() failed\n");
exit(1);
}
return 0;
}
static void
get_log_pages(struct nvme_controller *ctrlr)
get_log_pages(struct spdk_nvme_ctrlr *ctrlr)
{
const struct spdk_nvme_ctrlr_data *ctrlr_data;
outstanding_commands = 0;
@ -243,16 +244,16 @@ get_log_pages(struct nvme_controller *ctrlr)
printf("Get Log Page (SMART/health) failed\n");
}
ctrlr_data = nvme_ctrlr_get_data(ctrlr);
ctrlr_data = spdk_nvme_ctrlr_get_data(ctrlr);
if (ctrlr_data->vid == SPDK_PCI_VID_INTEL) {
if (nvme_ctrlr_is_log_page_supported(ctrlr, SPDK_NVME_INTEL_LOG_SMART)) {
if (spdk_nvme_ctrlr_is_log_page_supported(ctrlr, SPDK_NVME_INTEL_LOG_SMART)) {
if (get_intel_smart_log_page(ctrlr) == 0) {
outstanding_commands++;
} else {
printf("Get Log Page (Intel SMART/health) failed\n");
}
}
if (nvme_ctrlr_is_log_page_supported(ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE)) {
if (spdk_nvme_ctrlr_is_log_page_supported(ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE)) {
if (get_intel_temperature_log_page(ctrlr) == 0) {
outstanding_commands++;
} else {
@ -262,7 +263,7 @@ get_log_pages(struct nvme_controller *ctrlr)
}
while (outstanding_commands) {
nvme_ctrlr_process_admin_completions(ctrlr);
spdk_nvme_ctrlr_process_admin_completions(ctrlr);
}
}
@ -321,16 +322,16 @@ print_uint_var_dec(uint8_t *array, unsigned int len)
}
static void
print_namespace(struct nvme_namespace *ns)
print_namespace(struct spdk_nvme_ns *ns)
{
const struct spdk_nvme_ns_data *nsdata;
uint32_t i;
uint32_t flags;
nsdata = nvme_ns_get_data(ns);
flags = nvme_ns_get_flags(ns);
nsdata = spdk_nvme_ns_get_data(ns);
flags = spdk_nvme_ns_get_flags(ns);
printf("Namespace ID:%d\n", nvme_ns_get_id(ns));
printf("Namespace ID:%d\n", spdk_nvme_ns_get_id(ns));
if (g_hex_dump) {
hex_dump(nsdata, sizeof(*nsdata));
@ -338,11 +339,11 @@ print_namespace(struct nvme_namespace *ns)
}
printf("Deallocate: %s\n",
(flags & NVME_NS_DEALLOCATE_SUPPORTED) ? "Supported" : "Not Supported");
(flags & SPDK_NVME_NS_DEALLOCATE_SUPPORTED) ? "Supported" : "Not Supported");
printf("Flush: %s\n",
(flags & NVME_NS_FLUSH_SUPPORTED) ? "Supported" : "Not Supported");
(flags & SPDK_NVME_NS_FLUSH_SUPPORTED) ? "Supported" : "Not Supported");
printf("Reservation: %s\n",
(flags & NVME_NS_RESERVATION_SUPPORTED) ? "Supported" : "Not Supported");
(flags & SPDK_NVME_NS_RESERVATION_SUPPORTED) ? "Supported" : "Not Supported");
printf("Size (in LBAs): %lld (%lldM)\n",
(long long)nsdata->nsze,
(long long)nsdata->nsze / 1024 / 1024);
@ -364,7 +365,7 @@ print_namespace(struct nvme_namespace *ns)
}
static void
print_controller(struct nvme_controller *ctrlr, struct spdk_pci_device *pci_dev)
print_controller(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_device *pci_dev)
{
const struct spdk_nvme_ctrlr_data *cdata;
uint8_t str[128];
@ -373,7 +374,7 @@ print_controller(struct nvme_controller *ctrlr, struct spdk_pci_device *pci_dev)
get_features(ctrlr);
get_log_pages(ctrlr);
cdata = nvme_ctrlr_get_data(ctrlr);
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
printf("=====================================================\n");
printf("NVMe Controller at PCI bus %d, device %d, function %d\n",
@ -729,8 +730,8 @@ print_controller(struct nvme_controller *ctrlr, struct spdk_pci_device *pci_dev)
printf("\n");
}
for (i = 1; i <= nvme_ctrlr_get_num_ns(ctrlr); i++) {
print_namespace(nvme_ctrlr_get_ns(ctrlr, i));
for (i = 1; i <= spdk_nvme_ctrlr_get_num_ns(ctrlr); i++) {
print_namespace(spdk_nvme_ctrlr_get_ns(ctrlr, i));
}
}
@ -782,10 +783,10 @@ probe_cb(void *cb_ctx, struct spdk_pci_device *dev)
}
static void
attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct nvme_controller *ctrlr)
attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_nvme_ctrlr *ctrlr)
{
print_controller(ctrlr, pci_dev);
nvme_detach(ctrlr);
spdk_nvme_detach(ctrlr);
}
static const char *ealargs[] = {
@ -812,7 +813,7 @@ int main(int argc, char **argv)
}
request_mempool = rte_mempool_create("nvme_request", 8192,
nvme_request_size(), 128, 0,
spdk_nvme_request_size(), 128, 0,
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);
@ -822,8 +823,8 @@ int main(int argc, char **argv)
}
rc = 0;
if (nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
if (spdk_nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "spdk_nvme_probe() failed\n");
rc = 1;
}

View File

@ -55,7 +55,7 @@
#endif
struct ctrlr_entry {
struct nvme_controller *ctrlr;
struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_intel_rw_latency_page *latency_page;
struct ctrlr_entry *next;
char name[1024];
@ -71,8 +71,8 @@ struct ns_entry {
union {
struct {
struct nvme_controller *ctrlr;
struct nvme_namespace *ns;
struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_ns *ns;
} nvme;
#if HAVE_LIBAIO
struct {
@ -145,19 +145,19 @@ static void
task_complete(struct perf_task *task);
static void
register_ns(struct nvme_controller *ctrlr, struct nvme_namespace *ns)
register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
{
struct ns_entry *entry;
const struct spdk_nvme_ctrlr_data *cdata;
cdata = nvme_ctrlr_get_data(ctrlr);
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
if (nvme_ns_get_size(ns) < g_io_size_bytes ||
nvme_ns_get_sector_size(ns) > g_io_size_bytes) {
if (spdk_nvme_ns_get_size(ns) < g_io_size_bytes ||
spdk_nvme_ns_get_sector_size(ns) > g_io_size_bytes) {
printf("WARNING: controller %-20.20s (%-20.20s) ns %u has invalid "
"ns size %" PRIu64 " / block size %u for I/O size %u\n",
cdata->mn, cdata->sn, nvme_ns_get_id(ns),
nvme_ns_get_size(ns), nvme_ns_get_sector_size(ns), g_io_size_bytes);
cdata->mn, cdata->sn, spdk_nvme_ns_get_id(ns),
spdk_nvme_ns_get_size(ns), spdk_nvme_ns_get_sector_size(ns), g_io_size_bytes);
return;
}
@ -170,9 +170,9 @@ register_ns(struct nvme_controller *ctrlr, struct nvme_namespace *ns)
entry->type = ENTRY_TYPE_NVME_NS;
entry->u.nvme.ctrlr = ctrlr;
entry->u.nvme.ns = ns;
entry->size_in_ios = nvme_ns_get_size(ns) /
entry->size_in_ios = spdk_nvme_ns_get_size(ns) /
g_io_size_bytes;
entry->io_size_blocks = g_io_size_bytes / nvme_ns_get_sector_size(ns);
entry->io_size_blocks = g_io_size_bytes / spdk_nvme_ns_get_sector_size(ns);
snprintf(entry->name, 44, "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
@ -191,7 +191,7 @@ enable_latency_tracking_complete(void *cb_arg, const struct spdk_nvme_cpl *cpl)
}
static void
set_latency_tracking_feature(struct nvme_controller *ctrlr, bool enable)
set_latency_tracking_feature(struct spdk_nvme_ctrlr *ctrlr, bool enable)
{
int res;
union spdk_nvme_intel_feat_latency_tracking latency_tracking;
@ -202,8 +202,8 @@ set_latency_tracking_feature(struct nvme_controller *ctrlr, bool enable)
latency_tracking.bits.enable = 0x00;
}
res = nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING,
latency_tracking.raw, 0, NULL, 0, enable_latency_tracking_complete, NULL);
res = spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING,
latency_tracking.raw, 0, NULL, 0, enable_latency_tracking_complete, NULL);
if (res) {
printf("fail to allocate nvme request.\n");
return;
@ -211,16 +211,16 @@ set_latency_tracking_feature(struct nvme_controller *ctrlr, bool enable)
g_outstanding_commands++;
while (g_outstanding_commands) {
nvme_ctrlr_process_admin_completions(ctrlr);
spdk_nvme_ctrlr_process_admin_completions(ctrlr);
}
}
static void
register_ctrlr(struct nvme_controller *ctrlr)
register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
{
int nsid, num_ns;
struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry));
const struct spdk_nvme_ctrlr_data *cdata = nvme_ctrlr_get_data(ctrlr);
const struct spdk_nvme_ctrlr_data *cdata = spdk_nvme_ctrlr_get_data(ctrlr);
if (entry == NULL) {
perror("ctrlr_entry malloc");
@ -241,12 +241,12 @@ register_ctrlr(struct nvme_controller *ctrlr)
g_controllers = entry;
if (g_latency_tracking_enable &&
nvme_ctrlr_is_feature_supported(ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING))
spdk_nvme_ctrlr_is_feature_supported(ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING))
set_latency_tracking_feature(ctrlr, true);
num_ns = nvme_ctrlr_get_num_ns(ctrlr);
num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
for (nsid = 1; nsid <= num_ns; nsid++) {
register_ns(ctrlr, nvme_ctrlr_get_ns(ctrlr, nsid));
register_ns(ctrlr, spdk_nvme_ctrlr_get_ns(ctrlr, nsid));
}
}
@ -398,8 +398,8 @@ submit_single_io(struct ns_worker_ctx *ns_ctx)
} else
#endif
{
rc = nvme_ns_cmd_read(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task, 0);
rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task, 0);
}
} else {
#if HAVE_LIBAIO
@ -409,8 +409,8 @@ submit_single_io(struct ns_worker_ctx *ns_ctx)
} else
#endif
{
rc = nvme_ns_cmd_write(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task, 0);
rc = spdk_nvme_ns_cmd_write(entry->u.nvme.ns, task->buf, offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task, 0);
}
}
@ -458,7 +458,7 @@ check_io(struct ns_worker_ctx *ns_ctx)
} else
#endif
{
nvme_ctrlr_process_io_completions(ns_ctx->entry->u.nvme.ctrlr, g_max_completions);
spdk_nvme_ctrlr_process_io_completions(ns_ctx->entry->u.nvme.ctrlr, g_max_completions);
}
}
@ -488,8 +488,8 @@ work_fn(void *arg)
printf("Starting thread on core %u\n", worker->lcore);
if (nvme_register_io_thread() != 0) {
fprintf(stderr, "nvme_register_io_thread() failed on core %u\n", worker->lcore);
if (spdk_nvme_register_io_thread() != 0) {
fprintf(stderr, "spdk_nvme_register_io_thread() failed on core %u\n", worker->lcore);
return -1;
}
@ -523,7 +523,7 @@ work_fn(void *arg)
ns_ctx = ns_ctx->next;
}
nvme_unregister_io_thread();
spdk_nvme_unregister_io_thread();
return 0;
}
@ -614,11 +614,11 @@ print_latency_statistics(const char *op_name, enum spdk_nvme_intel_log_page log_
printf("========================================================\n");
ctrlr = g_controllers;
while (ctrlr) {
if (nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) {
if (nvme_ctrlr_cmd_get_log_page(ctrlr->ctrlr, log_page, SPDK_NVME_GLOBAL_NS_TAG,
ctrlr->latency_page, sizeof(struct spdk_nvme_intel_rw_latency_page),
enable_latency_tracking_complete,
NULL)) {
if (spdk_nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) {
if (spdk_nvme_ctrlr_cmd_get_log_page(ctrlr->ctrlr, log_page, SPDK_NVME_GLOBAL_NS_TAG,
ctrlr->latency_page, sizeof(struct spdk_nvme_intel_rw_latency_page),
enable_latency_tracking_complete,
NULL)) {
printf("nvme_ctrlr_cmd_get_log_page() failed\n");
exit(1);
}
@ -633,14 +633,14 @@ print_latency_statistics(const char *op_name, enum spdk_nvme_intel_log_page log_
while (g_outstanding_commands) {
ctrlr = g_controllers;
while (ctrlr) {
nvme_ctrlr_process_admin_completions(ctrlr->ctrlr);
spdk_nvme_ctrlr_process_admin_completions(ctrlr->ctrlr);
ctrlr = ctrlr->next;
}
}
ctrlr = g_controllers;
while (ctrlr) {
if (nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) {
if (spdk_nvme_ctrlr_is_log_page_supported(ctrlr->ctrlr, log_page)) {
print_latency_page(ctrlr);
}
ctrlr = ctrlr->next;
@ -843,7 +843,7 @@ probe_cb(void *cb_ctx, struct spdk_pci_device *dev)
}
static void
attach_cb(void *cb_ctx, struct spdk_pci_device *dev, struct nvme_controller *ctrlr)
attach_cb(void *cb_ctx, struct spdk_pci_device *dev, struct spdk_nvme_ctrlr *ctrlr)
{
printf("Attached to %04x:%02x:%02x.%02x\n",
spdk_pci_device_get_domain(dev),
@ -859,8 +859,8 @@ register_controllers(void)
{
printf("Initializing NVMe Controllers\n");
if (nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
if (spdk_nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "spdk_nvme_probe() failed\n");
return 1;
}
@ -876,9 +876,9 @@ unregister_controllers(void)
struct ctrlr_entry *next = entry->next;
rte_free(entry->latency_page);
if (g_latency_tracking_enable &&
nvme_ctrlr_is_feature_supported(entry->ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING))
spdk_nvme_ctrlr_is_feature_supported(entry->ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING))
set_latency_tracking_feature(entry->ctrlr, false);
nvme_detach(entry->ctrlr);
spdk_nvme_detach(entry->ctrlr);
free(entry);
entry = next;
}
@ -988,7 +988,7 @@ int main(int argc, char **argv)
}
request_mempool = rte_mempool_create("nvme_request", 8192,
nvme_request_size(), 128, 0,
spdk_nvme_request_size(), 128, 0,
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);

View File

@ -49,7 +49,7 @@ struct rte_mempool *request_mempool;
struct dev {
struct spdk_pci_device *pci_dev;
struct nvme_controller *ctrlr;
struct spdk_nvme_ctrlr *ctrlr;
char name[100];
};
@ -104,7 +104,7 @@ set_feature_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
}
static int
get_host_identifier(struct nvme_controller *ctrlr)
get_host_identifier(struct spdk_nvme_ctrlr *ctrlr)
{
int ret;
uint64_t *host_id;
@ -116,8 +116,8 @@ get_host_identifier(struct nvme_controller *ctrlr)
outstanding_commands = 0;
host_id = rte_malloc(NULL, 8, 0);
ret = nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, host_id, 8,
get_feature_completion, &features[SPDK_NVME_FEAT_HOST_IDENTIFIER]);
ret = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, host_id, 8,
get_feature_completion, &features[SPDK_NVME_FEAT_HOST_IDENTIFIER]);
if (ret) {
fprintf(stdout, "Get Feature: Failed\n");
return -1;
@ -126,7 +126,7 @@ get_host_identifier(struct nvme_controller *ctrlr)
outstanding_commands++;
while (outstanding_commands) {
nvme_ctrlr_process_admin_completions(ctrlr);
spdk_nvme_ctrlr_process_admin_completions(ctrlr);
}
if (features[SPDK_NVME_FEAT_HOST_IDENTIFIER].valid) {
@ -137,7 +137,7 @@ get_host_identifier(struct nvme_controller *ctrlr)
}
static int
set_host_identifier(struct nvme_controller *ctrlr)
set_host_identifier(struct spdk_nvme_ctrlr *ctrlr)
{
int ret;
uint64_t *host_id;
@ -153,8 +153,8 @@ set_host_identifier(struct nvme_controller *ctrlr)
set_feature_result = -1;
fprintf(stdout, "Set Feature: Host Identifier 0x%"PRIx64"\n", *host_id);
ret = nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, host_id, 8,
set_feature_completion, &features[SPDK_NVME_FEAT_HOST_IDENTIFIER]);
ret = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, host_id, 8,
set_feature_completion, &features[SPDK_NVME_FEAT_HOST_IDENTIFIER]);
if (ret) {
fprintf(stdout, "Set Feature: Failed\n");
rte_free(host_id);
@ -164,7 +164,7 @@ set_host_identifier(struct nvme_controller *ctrlr)
outstanding_commands++;
while (outstanding_commands) {
nvme_ctrlr_process_admin_completions(ctrlr);
spdk_nvme_ctrlr_process_admin_completions(ctrlr);
}
if (set_feature_result)
@ -187,13 +187,13 @@ reservation_ns_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
}
static int
reservation_ns_register(struct nvme_controller *ctrlr, uint16_t ns_id)
reservation_ns_register(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
{
int ret;
struct spdk_nvme_reservation_register_data *rr_data;
struct nvme_namespace *ns;
struct spdk_nvme_ns *ns;
ns = nvme_ctrlr_get_ns(ctrlr, ns_id);
ns = spdk_nvme_ctrlr_get_ns(ctrlr, ns_id);
rr_data = rte_zmalloc(NULL, sizeof(struct spdk_nvme_reservation_register_data), 0);
rr_data->crkey = CR_KEY;
@ -202,10 +202,10 @@ reservation_ns_register(struct nvme_controller *ctrlr, uint16_t ns_id)
outstanding_commands = 0;
reserve_command_result = -1;
ret = nvme_ns_cmd_reservation_register(ns, rr_data, 1,
SPDK_NVME_RESERVE_REGISTER_KEY,
SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
reservation_ns_completion, NULL);
ret = spdk_nvme_ns_cmd_reservation_register(ns, rr_data, 1,
SPDK_NVME_RESERVE_REGISTER_KEY,
SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
reservation_ns_completion, NULL);
if (ret) {
fprintf(stderr, "Reservation Register Failed\n");
rte_free(rr_data);
@ -214,7 +214,7 @@ reservation_ns_register(struct nvme_controller *ctrlr, uint16_t ns_id)
outstanding_commands++;
while (outstanding_commands) {
nvme_ctrlr_process_io_completions(ctrlr, 100);
spdk_nvme_ctrlr_process_io_completions(ctrlr, 100);
}
if (reserve_command_result)
@ -225,22 +225,22 @@ reservation_ns_register(struct nvme_controller *ctrlr, uint16_t ns_id)
}
static int
reservation_ns_report(struct nvme_controller *ctrlr, uint16_t ns_id)
reservation_ns_report(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
{
int ret, i;
uint8_t *payload;
struct spdk_nvme_reservation_status_data *status;
struct spdk_nvme_reservation_ctrlr_data *cdata;
struct nvme_namespace *ns;
struct spdk_nvme_ns *ns;
ns = nvme_ctrlr_get_ns(ctrlr, ns_id);
ns = spdk_nvme_ctrlr_get_ns(ctrlr, ns_id);
payload = rte_zmalloc(NULL, 0x1000, 0x1000);
outstanding_commands = 0;
reserve_command_result = -1;
ret = nvme_ns_cmd_reservation_report(ns, payload, 0x1000,
reservation_ns_completion, NULL);
ret = spdk_nvme_ns_cmd_reservation_report(ns, payload, 0x1000,
reservation_ns_completion, NULL);
if (ret) {
fprintf(stderr, "Reservation Report Failed\n");
rte_free(payload);
@ -249,7 +249,7 @@ reservation_ns_report(struct nvme_controller *ctrlr, uint16_t ns_id)
outstanding_commands++;
while (outstanding_commands) {
nvme_ctrlr_process_io_completions(ctrlr, 100);
spdk_nvme_ctrlr_process_io_completions(ctrlr, 100);
}
if (reserve_command_result) {
@ -277,24 +277,24 @@ reservation_ns_report(struct nvme_controller *ctrlr, uint16_t ns_id)
}
static int
reservation_ns_acquire(struct nvme_controller *ctrlr, uint16_t ns_id)
reservation_ns_acquire(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
{
int ret;
struct spdk_nvme_reservation_acquire_data *cdata;
struct nvme_namespace *ns;
struct spdk_nvme_ns *ns;
ns = nvme_ctrlr_get_ns(ctrlr, ns_id);
ns = spdk_nvme_ctrlr_get_ns(ctrlr, ns_id);
cdata = rte_zmalloc(NULL, sizeof(struct spdk_nvme_reservation_acquire_data), 0);
cdata->crkey = CR_KEY;
outstanding_commands = 0;
reserve_command_result = -1;
ret = nvme_ns_cmd_reservation_acquire(ns, cdata,
0,
SPDK_NVME_RESERVE_ACQUIRE,
SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
reservation_ns_completion, NULL);
ret = spdk_nvme_ns_cmd_reservation_acquire(ns, cdata,
0,
SPDK_NVME_RESERVE_ACQUIRE,
SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
reservation_ns_completion, NULL);
if (ret) {
fprintf(stderr, "Reservation Acquire Failed\n");
rte_free(cdata);
@ -303,7 +303,7 @@ reservation_ns_acquire(struct nvme_controller *ctrlr, uint16_t ns_id)
outstanding_commands++;
while (outstanding_commands) {
nvme_ctrlr_process_io_completions(ctrlr, 100);
spdk_nvme_ctrlr_process_io_completions(ctrlr, 100);
}
if (reserve_command_result)
@ -314,24 +314,24 @@ reservation_ns_acquire(struct nvme_controller *ctrlr, uint16_t ns_id)
}
static int
reservation_ns_release(struct nvme_controller *ctrlr, uint16_t ns_id)
reservation_ns_release(struct spdk_nvme_ctrlr *ctrlr, uint16_t ns_id)
{
int ret;
struct spdk_nvme_reservation_key_data *cdata;
struct nvme_namespace *ns;
struct spdk_nvme_ns *ns;
ns = nvme_ctrlr_get_ns(ctrlr, ns_id);
ns = spdk_nvme_ctrlr_get_ns(ctrlr, ns_id);
cdata = rte_zmalloc(NULL, sizeof(struct spdk_nvme_reservation_key_data), 0);
cdata->crkey = CR_KEY;
outstanding_commands = 0;
reserve_command_result = -1;
ret = nvme_ns_cmd_reservation_release(ns, cdata,
0,
SPDK_NVME_RESERVE_RELEASE,
SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
reservation_ns_completion, NULL);
ret = spdk_nvme_ns_cmd_reservation_release(ns, cdata,
0,
SPDK_NVME_RESERVE_RELEASE,
SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
reservation_ns_completion, NULL);
if (ret) {
fprintf(stderr, "Reservation Release Failed\n");
rte_free(cdata);
@ -340,7 +340,7 @@ reservation_ns_release(struct nvme_controller *ctrlr, uint16_t ns_id)
outstanding_commands++;
while (outstanding_commands) {
nvme_ctrlr_process_io_completions(ctrlr, 100);
spdk_nvme_ctrlr_process_io_completions(ctrlr, 100);
}
if (reserve_command_result)
@ -351,11 +351,11 @@ reservation_ns_release(struct nvme_controller *ctrlr, uint16_t ns_id)
}
static void
reserve_controller(struct nvme_controller *ctrlr, struct spdk_pci_device *pci_dev)
reserve_controller(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_device *pci_dev)
{
const struct spdk_nvme_ctrlr_data *cdata;
cdata = nvme_ctrlr_get_data(ctrlr);
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
printf("=====================================================\n");
printf("NVMe Controller at PCI bus %d, device %d, function %d\n",
@ -397,7 +397,7 @@ probe_cb(void *cb_ctx, struct spdk_pci_device *dev)
}
static void
attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct nvme_controller *ctrlr)
attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_nvme_ctrlr *ctrlr)
{
struct dev *dev;
@ -427,7 +427,7 @@ int main(int argc, char **argv)
}
request_mempool = rte_mempool_create("nvme_request", 8192,
nvme_request_size(), 128, 0,
spdk_nvme_request_size(), 128, 0,
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);
@ -436,13 +436,13 @@ int main(int argc, char **argv)
exit(1);
}
if (nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
if (spdk_nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "spdk_nvme_probe() failed\n");
return 1;
}
if (num_devs) {
rc = nvme_register_io_thread();
rc = spdk_nvme_register_io_thread();
if (rc != 0)
return rc;
}
@ -455,11 +455,11 @@ int main(int argc, char **argv)
for (i = 0; i < num_devs; i++) {
struct dev *dev = &devs[i];
nvme_detach(dev->ctrlr);
spdk_nvme_detach(dev->ctrlr);
}
if (num_devs)
nvme_unregister_io_thread();
spdk_nvme_unregister_io_thread();
return rc;
}

View File

@ -42,28 +42,28 @@
*
*/
#define NVME_DEFAULT_RETRY_COUNT (4)
extern int32_t nvme_retry_count;
#define SPDK_NVME_DEFAULT_RETRY_COUNT (4)
extern int32_t spdk_nvme_retry_count;
#ifdef __cplusplus
extern "C" {
#endif
/** \brief Opaque handle to a controller. Returned by \ref nvme_probe()'s attach_cb. */
struct nvme_controller;
/** \brief Opaque handle to a controller. Returned by \ref spdk_nvme_probe()'s attach_cb. */
struct spdk_nvme_ctrlr;
/**
* Callback for nvme_probe() enumeration.
* Callback for spdk_nvme_probe() enumeration.
*
* \return true to attach to this device.
*/
typedef bool (*nvme_probe_cb)(void *cb_ctx, struct spdk_pci_device *pci_dev);
typedef bool (*spdk_nvme_probe_cb)(void *cb_ctx, struct spdk_pci_device *pci_dev);
/**
* Callback for nvme_probe() to report a device that has been attached to the userspace NVMe driver.
* Callback for spdk_nvme_probe() to report a device that has been attached to the userspace NVMe driver.
*/
typedef void (*nvme_attach_cb)(void *cb_ctx, struct spdk_pci_device *pci_dev,
struct nvme_controller *ctrlr);
typedef void (*spdk_nvme_attach_cb)(void *cb_ctx, struct spdk_pci_device *pci_dev,
struct spdk_nvme_ctrlr *ctrlr);
/**
* \brief Enumerate the NVMe devices attached to the system and attach the userspace NVMe driver
@ -77,20 +77,20 @@ typedef void (*nvme_attach_cb)(void *cb_ctx, struct spdk_pci_device *pci_dev,
* will be reported.
*
* To stop using the the controller and release its associated resources,
* call \ref nvme_detach with the nvme_controller instance returned by this function.
* call \ref nvme_detach with the spdk_nvme_ctrlr instance returned by this function.
*/
int nvme_probe(void *cb_ctx, nvme_probe_cb probe_cb, nvme_attach_cb attach_cb);
int spdk_nvme_probe(void *cb_ctx, spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb);
/**
* \brief Detaches specified device returned by \ref nvme_probe()'s attach_cb from the NVMe driver.
*
* On success, the nvme_controller handle is no longer valid.
* On success, the spdk_nvme_ctrlr handle is no longer valid.
*
* This function should be called from a single thread while no other threads
* are actively using the NVMe device.
*
*/
int nvme_detach(struct nvme_controller *ctrlr);
int spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr);
/**
* \brief Perform a full hardware reset of the NVMe controller.
@ -98,11 +98,11 @@ int nvme_detach(struct nvme_controller *ctrlr);
* This function should be called from a single thread while no other threads
* are actively using the NVMe device.
*
* Any pointers returned from nvme_ctrlr_get_ns() and nvme_ns_get_data() may be invalidated
* by calling this function. The number of namespaces as returned by nvme_ctrlr_get_num_ns() may
* Any pointers returned from spdk_nvme_ctrlr_get_ns() and spdk_nvme_ns_get_data() may be invalidated
* by calling this function. The number of namespaces as returned by spdk_nvme_ctrlr_get_num_ns() may
* also change.
*/
int nvme_ctrlr_reset(struct nvme_controller *ctrlr);
int spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr);
/**
* \brief Get the identify controller data as defined by the NVMe specification.
@ -111,7 +111,7 @@ int nvme_ctrlr_reset(struct nvme_controller *ctrlr);
* the SPDK NVMe driver.
*
*/
const struct spdk_nvme_ctrlr_data *nvme_ctrlr_get_data(struct nvme_controller *ctrlr);
const struct spdk_nvme_ctrlr_data *spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr);
/**
* \brief Get the number of namespaces for the given NVMe controller.
@ -119,11 +119,11 @@ const struct spdk_nvme_ctrlr_data *nvme_ctrlr_get_data(struct nvme_controller *c
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* This is equivalent to calling nvme_ctrlr_get_data() to get the
* nvme_controller_data and then reading the nn field.
* This is equivalent to calling spdk_nvme_ctrlr_get_data() to get the
* spdk_nvme_ctrlr_data and then reading the nn field.
*
*/
uint32_t nvme_ctrlr_get_num_ns(struct nvme_controller *ctrlr);
uint32_t spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr);
/**
* \brief Determine if a particular log page is supported by the given NVMe controller.
@ -131,9 +131,9 @@ uint32_t nvme_ctrlr_get_num_ns(struct nvme_controller *ctrlr);
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* \sa nvme_ctrlr_cmd_get_log_page()
* \sa spdk_nvme_ctrlr_cmd_get_log_page()
*/
bool nvme_ctrlr_is_log_page_supported(struct nvme_controller *ctrlr, uint8_t log_page);
bool spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page);
/**
* \brief Determine if a particular feature is supported by the given NVMe controller.
@ -141,51 +141,51 @@ bool nvme_ctrlr_is_log_page_supported(struct nvme_controller *ctrlr, uint8_t log
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* \sa nvme_ctrlr_cmd_get_feature()
* \sa spdk_nvme_ctrlr_cmd_get_feature()
*/
bool nvme_ctrlr_is_feature_supported(struct nvme_controller *ctrlr, uint8_t feature_code);
bool spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code);
/**
* Signature for callback function invoked when a command is completed.
*
* The nvme_completion parameter contains the completion status.
* The spdk_nvme_cpl parameter contains the completion status.
*/
typedef void (*nvme_cb_fn_t)(void *, const struct spdk_nvme_cpl *);
typedef void (*spdk_nvme_cmd_cb)(void *, const struct spdk_nvme_cpl *);
/**
* Signature for callback function invoked when an asynchronous error
* request command is completed.
*
* The aer_cb_arg parameter is set to the context specified by
* nvme_register_aer_callback().
* The nvme_completion parameter contains the completion status of the
* spdk_nvme_register_aer_callback().
* The spdk_nvme_cpl parameter contains the completion status of the
* asynchronous event request that was completed.
*/
typedef void (*nvme_aer_cb_fn_t)(void *aer_cb_arg,
typedef void (*spdk_nvme_aer_cb)(void *aer_cb_arg,
const struct spdk_nvme_cpl *);
void nvme_ctrlr_register_aer_callback(struct nvme_controller *ctrlr,
nvme_aer_cb_fn_t aer_cb_fn,
void *aer_cb_arg);
void spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
spdk_nvme_aer_cb aer_cb_fn,
void *aer_cb_arg);
/**
* \brief Send the given NVM I/O command to the NVMe controller.
*
* This is a low level interface for submitting I/O commands directly. Prefer
* the nvme_ns_cmd_* functions instead. The validity of the command will
* the spdk_nvme_ns_cmd_* functions instead. The validity of the command will
* not be checked!
*
* When constructing the nvme_command it is not necessary to fill out the PRP
* list/SGL or the CID. The driver will handle both of those for you.
*
* This function is thread safe and can be called at any point after
* nvme_register_io_thread().
* spdk_nvme_register_io_thread().
*
*/
int nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr,
struct spdk_nvme_cmd *cmd,
void *buf, uint32_t len,
nvme_cb_fn_t cb_fn, void *cb_arg);
int spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_cmd *cmd,
void *buf, uint32_t len,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Process any outstanding completions for I/O submitted on the current thread.
@ -204,13 +204,14 @@ int nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr,
* the SPDK NVMe driver.
*
*/
int32_t nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr, uint32_t max_completions);
int32_t spdk_nvme_ctrlr_process_io_completions(struct spdk_nvme_ctrlr *ctrlr,
uint32_t max_completions);
/**
* \brief Send the given admin command to the NVMe controller.
*
* This is a low level interface for submitting admin commands directly. Prefer
* the nvme_ctrlr_cmd_* functions instead. The validity of the command will
* the spdk_nvme_ctrlr_cmd_* functions instead. The validity of the command will
* not be checked!
*
* When constructing the nvme_command it is not necessary to fill out the PRP
@ -219,13 +220,13 @@ int32_t nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr, uint32_
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* Call \ref nvme_ctrlr_process_admin_completions() to poll for completion
* Call \ref spdk_nvme_ctrlr_process_admin_completions() to poll for completion
* of commands submitted through this function.
*/
int nvme_ctrlr_cmd_admin_raw(struct nvme_controller *ctrlr,
struct spdk_nvme_cmd *cmd,
void *buf, uint32_t len,
nvme_cb_fn_t cb_fn, void *cb_arg);
int spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_cmd *cmd,
void *buf, uint32_t len,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Process any outstanding completions for admin commands.
@ -241,29 +242,29 @@ int nvme_ctrlr_cmd_admin_raw(struct nvme_controller *ctrlr,
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
int32_t nvme_ctrlr_process_admin_completions(struct nvme_controller *ctrlr);
int32_t spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr);
/** \brief Opaque handle to a namespace. Obtained by calling nvme_ctrlr_get_ns(). */
struct nvme_namespace;
/** \brief Opaque handle to a namespace. Obtained by calling spdk_nvme_ctrlr_get_ns(). */
struct spdk_nvme_ns;
/**
* \brief Get a handle to a namespace for the given controller.
*
* Namespaces are numbered from 1 to the total number of namespaces. There will never
* be any gaps in the numbering. The number of namespaces is obtained by calling
* nvme_ctrlr_get_num_ns().
* spdk_nvme_ctrlr_get_num_ns().
*
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
struct nvme_namespace *nvme_ctrlr_get_ns(struct nvme_controller *ctrlr, uint32_t ns_id);
struct spdk_nvme_ns *spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t ns_id);
/**
* \brief Get a specific log page from the NVMe controller.
*
* \param log_page The log page identifier.
* \param nsid Depending on the log page, this may be 0, a namespace identifier, or NVME_GLOBAL_NAMESPACE_TAG.
* \param nsid Depending on the log page, this may be 0, a namespace identifier, or SPDK_NVME_GLOBAL_NS_TAG.
* \param payload The pointer to the payload buffer.
* \param payload_size The size of payload buffer.
* \param cb_fn Callback function to invoke when the log page has been retrieved.
@ -274,15 +275,15 @@ struct nvme_namespace *nvme_ctrlr_get_ns(struct nvme_controller *ctrlr, uint32_t
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* Call \ref nvme_ctrlr_process_admin_completions() to poll for completion
* Call \ref spdk_nvme_ctrlr_process_admin_completions() to poll for completion
* of commands submitted through this function.
*
* \sa nvme_ctrlr_is_log_page_supported()
* \sa spdk_nvme_ctrlr_is_log_page_supported()
*/
int nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr,
uint8_t log_page, uint32_t nsid,
void *payload, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg);
int spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr,
uint8_t log_page, uint32_t nsid,
void *payload, uint32_t payload_size,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Set specific feature for the given NVMe controller.
@ -300,15 +301,15 @@ int nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr,
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* Call \ref nvme_ctrlr_process_admin_completions() to poll for completion
* Call \ref spdk_nvme_ctrlr_process_admin_completions() to poll for completion
* of commands submitted through this function.
*
* \sa nvme_ctrlr_cmd_set_feature()
* \sa spdk_nvme_ctrlr_cmd_get_feature()
*/
int nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
uint8_t feature, uint32_t cdw11, uint32_t cdw12,
void *payload, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg);
int spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr,
uint8_t feature, uint32_t cdw11, uint32_t cdw12,
void *payload, uint32_t payload_size,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Get specific feature from given NVMe controller.
@ -325,15 +326,15 @@ int nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*
* Call \ref nvme_ctrlr_process_admin_completions() to poll for completion
* Call \ref spdk_nvme_ctrlr_process_admin_completions() to poll for completion
* of commands submitted through this function.
*
* \sa nvme_ctrlr_cmd_get_feature()
* \sa spdk_nvme_ctrlr_cmd_set_feature()
*/
int nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
uint8_t feature, uint32_t cdw11,
void *payload, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg);
int spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr,
uint8_t feature, uint32_t cdw11,
void *payload, uint32_t payload_size,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Get the identify namespace data as defined by the NVMe specification.
@ -341,7 +342,7 @@ int nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
const struct spdk_nvme_ns_data *nvme_ns_get_data(struct nvme_namespace *ns);
const struct spdk_nvme_ns_data *spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns);
/**
* \brief Get the namespace id (index number) from the given namespace handle.
@ -349,7 +350,7 @@ const struct spdk_nvme_ns_data *nvme_ns_get_data(struct nvme_namespace *ns);
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
uint32_t nvme_ns_get_id(struct nvme_namespace *ns);
uint32_t spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns);
/**
* \brief Get the maximum transfer size, in bytes, for an I/O sent to the given namespace.
@ -357,7 +358,7 @@ uint32_t nvme_ns_get_id(struct nvme_namespace *ns);
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
uint32_t nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns);
uint32_t spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns);
/**
* \brief Get the sector size, in bytes, of the given namespace.
@ -365,7 +366,7 @@ uint32_t nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns);
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
uint32_t nvme_ns_get_sector_size(struct nvme_namespace *ns);
uint32_t spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns);
/**
* \brief Get the number of sectors for the given namespace.
@ -373,7 +374,7 @@ uint32_t nvme_ns_get_sector_size(struct nvme_namespace *ns);
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
uint64_t nvme_ns_get_num_sectors(struct nvme_namespace *ns);
uint64_t spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns);
/**
* \brief Get the size, in bytes, of the given namespace.
@ -381,34 +382,34 @@ uint64_t nvme_ns_get_num_sectors(struct nvme_namespace *ns);
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
uint64_t nvme_ns_get_size(struct nvme_namespace *ns);
uint64_t spdk_nvme_ns_get_size(struct spdk_nvme_ns *ns);
/**
* \brief Namespace command support flags.
*/
enum nvme_namespace_flags {
NVME_NS_DEALLOCATE_SUPPORTED = 0x1, /**< The deallocate command is supported */
NVME_NS_FLUSH_SUPPORTED = 0x2, /**< The flush command is supported */
NVME_NS_RESERVATION_SUPPORTED = 0x4, /**< The reservation command is supported */
NVME_NS_WRITE_ZEROES_SUPPORTED = 0x8, /**< The write zeroes command is supported */
enum spdk_nvme_ns_flags {
SPDK_NVME_NS_DEALLOCATE_SUPPORTED = 0x1, /**< The deallocate command is supported */
SPDK_NVME_NS_FLUSH_SUPPORTED = 0x2, /**< The flush command is supported */
SPDK_NVME_NS_RESERVATION_SUPPORTED = 0x4, /**< The reservation command is supported */
SPDK_NVME_NS_WRITE_ZEROES_SUPPORTED = 0x8, /**< The write zeroes command is supported */
};
/**
* \brief Get the flags for the given namespace.
*
* See nvme_namespace_flags for the possible flags returned.
* See spdk_nvme_ns_flags for the possible flags returned.
*
* This function is thread safe and can be called at any point while the controller is attached to
* the SPDK NVMe driver.
*/
uint32_t nvme_ns_get_flags(struct nvme_namespace *ns);
uint32_t spdk_nvme_ns_get_flags(struct spdk_nvme_ns *ns);
/**
* Restart the SGL walk to the specified offset when the command has scattered payloads.
*
* The cb_arg parameter is the value passed to readv/writev.
*/
typedef void (*nvme_req_reset_sgl_fn_t)(void *cb_arg, uint32_t offset);
typedef void (*spdk_nvme_req_reset_sgl_cb)(void *cb_arg, uint32_t offset);
/**
* Fill out *address and *length with the current SGL entry and advance to the next
@ -418,7 +419,7 @@ typedef void (*nvme_req_reset_sgl_fn_t)(void *cb_arg, uint32_t offset);
* The address parameter contains the physical address of this segment.
* The length parameter contains the length of this physical segment.
*/
typedef int (*nvme_req_next_sge_fn_t)(void *cb_arg, uint64_t *address, uint32_t *length);
typedef int (*spdk_nvme_req_next_sge_cb)(void *cb_arg, uint64_t *address, uint32_t *length);
/**
* \brief Submits a write I/O to the specified NVMe namespace.
@ -429,18 +430,18 @@ typedef int (*nvme_req_next_sge_fn_t)(void *cb_arg, uint64_t *address, uint32_t
* \param lba_count length (in sectors) for the write operation
* \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function
* \param io_flags set flags, defined by the NVME_IO_FLAGS_* entries
* \param io_flags set flags, defined by the SPDK_NVME_IO_FLAGS_* entries
* in spdk/nvme_spec.h, for this I/O.
*
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* nvme_register_io_thread().
* spdk_nvme_register_io_thread().
*/
int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
void *cb_arg, uint32_t io_flags);
int spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, void *payload,
uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
void *cb_arg, uint32_t io_flags);
/**
* \brief Submits a write I/O to the specified NVMe namespace.
@ -459,12 +460,12 @@ int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* nvme_register_io_thread().
* spdk_nvme_register_io_thread().
*/
int nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t io_flags,
nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn);
int spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_count,
spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
spdk_nvme_req_next_sge_cb next_sge_fn);
/**
* \brief Submits a write zeroes I/O to the specified NVMe namespace.
@ -474,18 +475,18 @@ int nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_cou
* \param lba_count length (in sectors) for the write zero operation
* \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function
* \param io_flags set flags, defined by the NVME_IO_FLAGS_* entries
* \param io_flags set flags, defined by the SPDK_NVME_IO_FLAGS_* entries
* in spdk/nvme_spec.h, for this I/O.
*
* \return 0 if successfully submitted, ENOMEM if an nvme_request
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* nvme_register_io_thread().
* spdk_nvme_register_io_thread().
*/
int nvme_ns_cmd_write_zeroes(struct nvme_namespace *ns, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
uint32_t io_flags);
int spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, uint64_t lba,
uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags);
/**
* \brief Submits a read I/O to the specified NVMe namespace.
@ -502,11 +503,11 @@ int nvme_ns_cmd_write_zeroes(struct nvme_namespace *ns, uint64_t lba,
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* nvme_register_io_thread().
* spdk_nvme_register_io_thread().
*/
int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
void *cb_arg, uint32_t io_flags);
int spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, void *payload,
uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
void *cb_arg, uint32_t io_flags);
/**
* \brief Submits a read I/O to the specified NVMe namespace.
@ -525,13 +526,12 @@ int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* nvme_register_io_thread().
* spdk_nvme_register_io_thread().
*/
int nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t io_flags,
nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn);
int spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_count,
spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
spdk_nvme_req_next_sge_cb next_sge_fn);
/**
* \brief Submits a deallocation request to the specified NVMe namespace.
@ -540,7 +540,7 @@ int nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_coun
* \param payload virtual address pointer to the list of LBA ranges to
* deallocate
* \param num_ranges number of ranges in the list pointed to by payload; must be
* between 1 and \ref NVME_DATASET_MANAGEMENT_MAX_RANGES, inclusive.
* between 1 and \ref SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES, inclusive.
* \param cb_fn callback function to invoke when the I/O is completed
* \param cb_arg argument to pass to the callback function
*
@ -548,11 +548,11 @@ int nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_coun
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* nvme_register_io_thread().
* spdk_nvme_register_io_thread().
*/
int nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
uint16_t num_ranges, nvme_cb_fn_t cb_fn,
void *cb_arg);
int spdk_nvme_ns_cmd_deallocate(struct spdk_nvme_ns *ns, void *payload,
uint16_t num_ranges, spdk_nvme_cmd_cb cb_fn,
void *cb_arg);
/**
* \brief Submits a flush request to the specified NVMe namespace.
@ -565,10 +565,9 @@ int nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* nvme_register_io_thread().
* spdk_nvme_register_io_thread().
*/
int nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn,
void *cb_arg);
int spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Submits a reservation register to the specified NVMe namespace.
@ -585,14 +584,14 @@ int nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn,
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* nvme_register_io_thread().
* spdk_nvme_register_io_thread().
*/
int nvme_ns_cmd_reservation_register(struct nvme_namespace *ns,
struct spdk_nvme_reservation_register_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_register_action action,
enum spdk_nvme_reservation_register_cptpl cptpl,
nvme_cb_fn_t cb_fn, void *cb_arg);
int spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
struct spdk_nvme_reservation_register_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_register_action action,
enum spdk_nvme_reservation_register_cptpl cptpl,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Submits a reservation release to the specified NVMe namespace.
@ -609,14 +608,14 @@ int nvme_ns_cmd_reservation_register(struct nvme_namespace *ns,
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* nvme_register_io_thread().
* spdk_nvme_register_io_thread().
*/
int nvme_ns_cmd_reservation_release(struct nvme_namespace *ns,
struct spdk_nvme_reservation_key_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_release_action action,
enum spdk_nvme_reservation_type type,
nvme_cb_fn_t cb_fn, void *cb_arg);
int spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
struct spdk_nvme_reservation_key_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_release_action action,
enum spdk_nvme_reservation_type type,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Submits a reservation acquire to the specified NVMe namespace.
@ -633,14 +632,14 @@ int nvme_ns_cmd_reservation_release(struct nvme_namespace *ns,
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* nvme_register_io_thread().
* spdk_nvme_register_io_thread().
*/
int nvme_ns_cmd_reservation_acquire(struct nvme_namespace *ns,
struct spdk_nvme_reservation_acquire_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_acquire_action action,
enum spdk_nvme_reservation_type type,
nvme_cb_fn_t cb_fn, void *cb_arg);
int spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
struct spdk_nvme_reservation_acquire_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_acquire_action action,
enum spdk_nvme_reservation_type type,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Submits a reservation report to the specified NVMe namespace.
@ -655,10 +654,10 @@ int nvme_ns_cmd_reservation_acquire(struct nvme_namespace *ns,
* structure cannot be allocated for the I/O request
*
* This function is thread safe and can be called at any point after
* nvme_register_io_thread().
* spdk_nvme_register_io_thread().
*/
int nvme_ns_cmd_reservation_report(struct nvme_namespace *ns, void *payload,
uint32_t len, nvme_cb_fn_t cb_fn, void *cb_arg);
int spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns, void *payload,
uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
/**
* \brief Get the size, in bytes, of an nvme_request.
@ -669,10 +668,10 @@ int nvme_ns_cmd_reservation_report(struct nvme_namespace *ns, void *payload,
* This function is thread safe and can be called at any time.
*
*/
size_t nvme_request_size(void);
size_t spdk_nvme_request_size(void);
int nvme_register_io_thread(void);
void nvme_unregister_io_thread(void);
int spdk_nvme_register_io_thread(void);
void spdk_nvme_unregister_io_thread(void);
#ifdef __cplusplus
}

View File

@ -44,7 +44,7 @@ struct nvme_driver g_nvme_driver = {
.attached_ctrlrs = TAILQ_HEAD_INITIALIZER(g_nvme_driver.attached_ctrlrs),
};
int32_t nvme_retry_count;
int32_t spdk_nvme_retry_count;
__thread int nvme_thread_ioq_index = -1;
@ -68,14 +68,14 @@ __thread int nvme_thread_ioq_index = -1;
*/
static struct nvme_controller *
static struct spdk_nvme_ctrlr *
nvme_attach(void *devhandle)
{
struct nvme_controller *ctrlr;
struct spdk_nvme_ctrlr *ctrlr;
int status;
uint64_t phys_addr = 0;
ctrlr = nvme_malloc("nvme_ctrlr", sizeof(struct nvme_controller),
ctrlr = nvme_malloc("nvme_ctrlr", sizeof(struct spdk_nvme_ctrlr),
64, &phys_addr);
if (ctrlr == NULL) {
nvme_printf(NULL, "could not allocate ctrlr\n");
@ -92,7 +92,7 @@ nvme_attach(void *devhandle)
}
int
nvme_detach(struct nvme_controller *ctrlr)
spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_driver *driver = &g_nvme_driver;
@ -121,14 +121,14 @@ nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
}
size_t
nvme_request_size(void)
spdk_nvme_request_size(void)
{
return sizeof(struct nvme_request);
}
struct nvme_request *
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg)
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req = NULL;
@ -158,7 +158,8 @@ nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
}
struct nvme_request *
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
struct nvme_payload payload;
@ -169,7 +170,7 @@ nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t c
}
struct nvme_request *
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
}
@ -229,7 +230,7 @@ nvme_free_ioq_index(void)
}
int
nvme_register_io_thread(void)
spdk_nvme_register_io_thread(void)
{
int rc = 0;
@ -247,13 +248,13 @@ nvme_register_io_thread(void)
}
void
nvme_unregister_io_thread(void)
spdk_nvme_unregister_io_thread(void)
{
nvme_free_ioq_index();
}
struct nvme_enum_ctx {
nvme_probe_cb probe_cb;
spdk_nvme_probe_cb probe_cb;
void *cb_ctx;
};
@ -262,7 +263,7 @@ static int
nvme_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
{
struct nvme_enum_ctx *enum_ctx = ctx;
struct nvme_controller *ctrlr;
struct spdk_nvme_ctrlr *ctrlr;
/* Verify that this controller is not already attached */
TAILQ_FOREACH(ctrlr, &g_nvme_driver.attached_ctrlrs, tailq) {
@ -288,11 +289,11 @@ nvme_enum_cb(void *ctx, struct spdk_pci_device *pci_dev)
}
int
nvme_probe(void *cb_ctx, nvme_probe_cb probe_cb, nvme_attach_cb attach_cb)
spdk_nvme_probe(void *cb_ctx, spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb)
{
int rc, start_rc;
struct nvme_enum_ctx enum_ctx;
struct nvme_controller *ctrlr;
struct spdk_nvme_ctrlr *ctrlr;
nvme_mutex_lock(&g_nvme_driver.lock);

View File

@ -39,11 +39,11 @@
*
*/
static int nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_async_event_request *aer);
static void
nvme_ctrlr_construct_intel_support_log_page_list(struct nvme_controller *ctrlr,
nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_intel_log_page_directory *log_page_directory)
{
struct spdk_pci_device *dev;
@ -76,7 +76,7 @@ nvme_ctrlr_construct_intel_support_log_page_list(struct nvme_controller *ctrlr,
}
}
static int nvme_ctrlr_set_intel_support_log_pages(struct nvme_controller *ctrlr)
static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
{
uint64_t phys_addr = 0;
struct nvme_completion_poll_status status;
@ -91,10 +91,10 @@ static int nvme_ctrlr_set_intel_support_log_pages(struct nvme_controller *ctrlr)
}
status.done = false;
nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY, SPDK_NVME_GLOBAL_NS_TAG,
log_page_directory, sizeof(struct spdk_nvme_intel_log_page_directory),
nvme_completion_poll_cb,
&status);
spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY, SPDK_NVME_GLOBAL_NS_TAG,
log_page_directory, sizeof(struct spdk_nvme_intel_log_page_directory),
nvme_completion_poll_cb,
&status);
while (status.done == false) {
nvme_qpair_process_completions(&ctrlr->adminq, 0);
}
@ -110,7 +110,7 @@ static int nvme_ctrlr_set_intel_support_log_pages(struct nvme_controller *ctrlr)
}
static void
nvme_ctrlr_set_supported_log_pages(struct nvme_controller *ctrlr)
nvme_ctrlr_set_supported_log_pages(struct spdk_nvme_ctrlr *ctrlr)
{
memset(ctrlr->log_page_supported, 0, sizeof(ctrlr->log_page_supported));
/* Mandatory pages */
@ -126,7 +126,7 @@ nvme_ctrlr_set_supported_log_pages(struct nvme_controller *ctrlr)
}
static void
nvme_ctrlr_set_intel_supported_features(struct nvme_controller *ctrlr)
nvme_ctrlr_set_intel_supported_features(struct spdk_nvme_ctrlr *ctrlr)
{
ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_MAX_LBA] = true;
ctrlr->feature_supported[SPDK_NVME_INTEL_FEAT_NATIVE_MAX_LBA] = true;
@ -138,7 +138,7 @@ nvme_ctrlr_set_intel_supported_features(struct nvme_controller *ctrlr)
}
static void
nvme_ctrlr_set_supported_features(struct nvme_controller *ctrlr)
nvme_ctrlr_set_supported_features(struct spdk_nvme_ctrlr *ctrlr)
{
memset(ctrlr->feature_supported, 0, sizeof(ctrlr->feature_supported));
/* Mandatory features */
@ -167,7 +167,7 @@ nvme_ctrlr_set_supported_features(struct nvme_controller *ctrlr)
}
static int
nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
nvme_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr)
{
return nvme_qpair_construct(&ctrlr->adminq,
0, /* qpair ID */
@ -177,7 +177,7 @@ nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
}
static int
nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
nvme_ctrlr_construct_io_qpairs(struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_qpair *qpair;
union spdk_nvme_cap_lo_register cap_lo;
@ -236,7 +236,7 @@ nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
}
static void
nvme_ctrlr_fail(struct nvme_controller *ctrlr)
nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr)
{
uint32_t i;
@ -248,7 +248,7 @@ nvme_ctrlr_fail(struct nvme_controller *ctrlr)
}
static int
_nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_ready_value)
_nvme_ctrlr_wait_for_ready(struct spdk_nvme_ctrlr *ctrlr, int desired_ready_value)
{
int ms_waited, ready_timeout_in_ms;
union spdk_nvme_csts_register csts;
@ -276,7 +276,7 @@ _nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_ready_valu
}
static int
nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr)
nvme_ctrlr_wait_for_ready(struct spdk_nvme_ctrlr *ctrlr)
{
union spdk_nvme_cc_register cc;
@ -291,7 +291,7 @@ nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr)
}
static void
nvme_ctrlr_disable(struct nvme_controller *ctrlr)
nvme_ctrlr_disable(struct spdk_nvme_ctrlr *ctrlr)
{
union spdk_nvme_cc_register cc;
union spdk_nvme_csts_register csts;
@ -310,7 +310,7 @@ nvme_ctrlr_disable(struct nvme_controller *ctrlr)
}
static void
nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
nvme_ctrlr_shutdown(struct spdk_nvme_ctrlr *ctrlr)
{
union spdk_nvme_cc_register cc;
union spdk_nvme_csts_register csts;
@ -338,7 +338,7 @@ nvme_ctrlr_shutdown(struct nvme_controller *ctrlr)
}
static int
nvme_ctrlr_enable(struct nvme_controller *ctrlr)
nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
{
union spdk_nvme_cc_register cc;
union spdk_nvme_csts_register csts;
@ -380,7 +380,7 @@ nvme_ctrlr_enable(struct nvme_controller *ctrlr)
}
static int
nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
nvme_ctrlr_hw_reset(struct spdk_nvme_ctrlr *ctrlr)
{
uint32_t i;
int rc;
@ -408,7 +408,7 @@ nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
}
int
nvme_ctrlr_reset(struct nvme_controller *ctrlr)
spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
{
int rc;
@ -441,7 +441,7 @@ nvme_ctrlr_reset(struct nvme_controller *ctrlr)
}
static int
nvme_ctrlr_identify(struct nvme_controller *ctrlr)
nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_completion_poll_status status;
@ -469,7 +469,7 @@ nvme_ctrlr_identify(struct nvme_controller *ctrlr)
}
static int
nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_driver *driver = &g_nvme_driver;
struct nvme_completion_poll_status status;
@ -510,7 +510,7 @@ nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
}
static int
nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
nvme_ctrlr_create_qpairs(struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_completion_poll_status status;
struct nvme_qpair *qpair;
@ -553,7 +553,7 @@ nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
}
static void
nvme_ctrlr_destruct_namespaces(struct nvme_controller *ctrlr)
nvme_ctrlr_destruct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
{
if (ctrlr->ns) {
uint32_t i, num_ns = ctrlr->num_ns;
@ -574,7 +574,7 @@ nvme_ctrlr_destruct_namespaces(struct nvme_controller *ctrlr)
}
static int
nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
nvme_ctrlr_construct_namespaces(struct spdk_nvme_ctrlr *ctrlr)
{
uint32_t i, nn = ctrlr->cdata.nn;
uint64_t phys_addr = 0;
@ -590,7 +590,7 @@ nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
if (nn != ctrlr->num_ns) {
nvme_ctrlr_destruct_namespaces(ctrlr);
ctrlr->ns = calloc(nn, sizeof(struct nvme_namespace));
ctrlr->ns = calloc(nn, sizeof(struct spdk_nvme_ns));
if (ctrlr->ns == NULL) {
goto fail;
}
@ -606,7 +606,7 @@ nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
}
for (i = 0; i < nn; i++) {
struct nvme_namespace *ns = &ctrlr->ns[i];
struct spdk_nvme_ns *ns = &ctrlr->ns[i];
uint32_t nsid = i + 1;
if (nvme_ns_construct(ns, nsid, ctrlr) != 0) {
@ -625,7 +625,7 @@ static void
nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
{
struct nvme_async_event_request *aer = arg;
struct nvme_controller *ctrlr = aer->ctrlr;
struct spdk_nvme_ctrlr *ctrlr = aer->ctrlr;
if (cpl->status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION) {
/*
@ -655,7 +655,7 @@ nvme_ctrlr_async_event_cb(void *arg, const struct spdk_nvme_cpl *cpl)
}
static int
nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_async_event_request *aer)
{
struct nvme_request *req;
@ -679,7 +679,7 @@ nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
}
static int
nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
{
union spdk_nvme_critical_warning_state state;
struct nvme_async_event_request *aer;
@ -715,7 +715,7 @@ nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
}
int
nvme_ctrlr_start(struct nvme_controller *ctrlr)
nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr)
{
if (nvme_ctrlr_hw_reset(ctrlr) != 0) {
return -1;
@ -751,7 +751,7 @@ nvme_ctrlr_start(struct nvme_controller *ctrlr)
}
static int
nvme_ctrlr_allocate_bars(struct nvme_controller *ctrlr)
nvme_ctrlr_allocate_bars(struct spdk_nvme_ctrlr *ctrlr)
{
int rc;
void *addr;
@ -767,7 +767,7 @@ nvme_ctrlr_allocate_bars(struct nvme_controller *ctrlr)
}
static int
nvme_ctrlr_free_bars(struct nvme_controller *ctrlr)
nvme_ctrlr_free_bars(struct spdk_nvme_ctrlr *ctrlr)
{
int rc = 0;
void *addr = (void *)ctrlr->regs;
@ -779,7 +779,7 @@ nvme_ctrlr_free_bars(struct nvme_controller *ctrlr)
}
int
nvme_ctrlr_construct(struct nvme_controller *ctrlr, void *devhandle)
nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
{
union spdk_nvme_cap_hi_register cap_hi;
uint32_t cmd_reg;
@ -819,7 +819,7 @@ nvme_ctrlr_construct(struct nvme_controller *ctrlr, void *devhandle)
}
void
nvme_ctrlr_destruct(struct nvme_controller *ctrlr)
nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{
uint32_t i;
@ -841,14 +841,14 @@ nvme_ctrlr_destruct(struct nvme_controller *ctrlr)
}
void
nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req)
{
nvme_qpair_submit_request(&ctrlr->adminq, req);
}
void
nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
nvme_ctrlr_submit_io_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req)
{
struct nvme_qpair *qpair;
@ -860,14 +860,14 @@ nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
}
int32_t
nvme_ctrlr_process_io_completions(struct nvme_controller *ctrlr, uint32_t max_completions)
spdk_nvme_ctrlr_process_io_completions(struct spdk_nvme_ctrlr *ctrlr, uint32_t max_completions)
{
nvme_assert(nvme_thread_ioq_index >= 0, ("no ioq_index assigned for thread\n"));
return nvme_qpair_process_completions(&ctrlr->ioq[nvme_thread_ioq_index], max_completions);
}
int32_t
nvme_ctrlr_process_admin_completions(struct nvme_controller *ctrlr)
spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
{
int32_t num_completions;
@ -879,20 +879,19 @@ nvme_ctrlr_process_admin_completions(struct nvme_controller *ctrlr)
}
const struct spdk_nvme_ctrlr_data *
nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr)
{
return &ctrlr->cdata;
}
uint32_t
nvme_ctrlr_get_num_ns(struct nvme_controller *ctrlr)
spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
{
return ctrlr->num_ns;
}
struct nvme_namespace *
nvme_ctrlr_get_ns(struct nvme_controller *ctrlr, uint32_t ns_id)
struct spdk_nvme_ns *
spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t ns_id)
{
if (ns_id < 1 || ns_id > ctrlr->num_ns) {
return NULL;
@ -902,16 +901,16 @@ nvme_ctrlr_get_ns(struct nvme_controller *ctrlr, uint32_t ns_id)
}
void
nvme_ctrlr_register_aer_callback(struct nvme_controller *ctrlr,
nvme_aer_cb_fn_t aer_cb_fn,
void *aer_cb_arg)
spdk_nvme_ctrlr_register_aer_callback(struct spdk_nvme_ctrlr *ctrlr,
spdk_nvme_aer_cb aer_cb_fn,
void *aer_cb_arg)
{
ctrlr->aer_cb_fn = aer_cb_fn;
ctrlr->aer_cb_arg = aer_cb_arg;
}
bool
nvme_ctrlr_is_log_page_supported(struct nvme_controller *ctrlr, uint8_t log_page)
spdk_nvme_ctrlr_is_log_page_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page)
{
/* No bounds check necessary, since log_page is uint8_t and log_page_supported has 256 entries */
SPDK_STATIC_ASSERT(sizeof(ctrlr->log_page_supported) == 256, "log_page_supported size mismatch");
@ -919,7 +918,7 @@ nvme_ctrlr_is_log_page_supported(struct nvme_controller *ctrlr, uint8_t log_page
}
bool
nvme_ctrlr_is_feature_supported(struct nvme_controller *ctrlr, uint8_t feature_code)
spdk_nvme_ctrlr_is_feature_supported(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature_code)
{
/* No bounds check necessary, since feature_code is uint8_t and feature_supported has 256 entries */
SPDK_STATIC_ASSERT(sizeof(ctrlr->feature_supported) == 256, "feature_supported size mismatch");

View File

@ -34,10 +34,10 @@
#include "nvme_internal.h"
int
nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr,
struct spdk_nvme_cmd *cmd,
void *buf, uint32_t len,
nvme_cb_fn_t cb_fn, void *cb_arg)
spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_cmd *cmd,
void *buf, uint32_t len,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
@ -54,10 +54,10 @@ nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr,
}
int
nvme_ctrlr_cmd_admin_raw(struct nvme_controller *ctrlr,
struct spdk_nvme_cmd *cmd,
void *buf, uint32_t len,
nvme_cb_fn_t cb_fn, void *cb_arg)
spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_cmd *cmd,
void *buf, uint32_t len,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
@ -77,8 +77,8 @@ nvme_ctrlr_cmd_admin_raw(struct nvme_controller *ctrlr,
}
void
nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_ctrlr_cmd_identify_controller(struct spdk_nvme_ctrlr *ctrlr, void *payload,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -100,8 +100,8 @@ nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
}
void
nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid,
void *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_ctrlr_cmd_identify_namespace(struct spdk_nvme_ctrlr *ctrlr, uint16_t nsid,
void *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -122,8 +122,8 @@ nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid,
}
void
nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn,
nvme_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
struct nvme_request *req;
@ -150,8 +150,8 @@ nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
}
void
nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -174,9 +174,9 @@ nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
}
int
nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg)
spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -201,9 +201,9 @@ nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
}
int
nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
uint32_t cdw11, void *payload, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg)
spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
uint32_t cdw11, void *payload, uint32_t payload_size,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -227,33 +227,32 @@ nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
}
void
nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
uint32_t cdw11;
cdw11 = ((num_queues - 1) << 16) | (num_queues - 1);
nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, cdw11, 0,
NULL, 0, cb_fn, cb_arg);
spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, cdw11, 0,
NULL, 0, cb_fn, cb_arg);
}
void
nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
union spdk_nvme_critical_warning_state state, nvme_cb_fn_t cb_fn,
nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
union spdk_nvme_critical_warning_state state, spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
uint32_t cdw11;
cdw11 = state.raw;
nvme_ctrlr_cmd_set_feature(ctrlr,
SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0, NULL, 0, cb_fn,
cb_arg);
spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0, NULL, 0,
cb_fn, cb_arg);
}
int
nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
uint32_t nsid, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
void *cb_arg)
spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
uint32_t nsid, void *payload, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -278,8 +277,8 @@ nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
}
void
nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, uint16_t cid,
uint16_t sqid, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;

View File

@ -142,8 +142,8 @@ struct __attribute__((packed)) nvme_payload {
* Functions for retrieving physical addresses for scattered payloads.
*/
struct {
nvme_req_reset_sgl_fn_t reset_sgl_fn;
nvme_req_next_sge_fn_t next_sge_fn;
spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
spdk_nvme_req_next_sge_cb next_sge_fn;
} sgl;
} u;
@ -175,7 +175,7 @@ struct nvme_request {
*/
uint32_t payload_offset;
nvme_cb_fn_t cb_fn;
spdk_nvme_cmd_cb cb_fn;
void *cb_arg;
STAILQ_ENTRY(nvme_request) stailq;
@ -221,7 +221,7 @@ struct nvme_completion_poll_status {
};
struct nvme_async_event_request {
struct nvme_controller *ctrlr;
struct spdk_nvme_ctrlr *ctrlr;
struct nvme_request *req;
struct spdk_nvme_cpl cpl;
};
@ -270,14 +270,14 @@ struct nvme_qpair {
/*
* Fields below this point should not be touched on the normal I/O happy path.
*/
struct nvme_controller *ctrlr;
struct spdk_nvme_ctrlr *ctrlr;
uint64_t cmd_bus_addr;
uint64_t cpl_bus_addr;
};
struct nvme_namespace {
struct nvme_controller *ctrlr;
struct spdk_nvme_ns {
struct spdk_nvme_ctrlr *ctrlr;
uint32_t stripe_size;
uint32_t sector_size;
uint32_t sectors_per_max_io;
@ -289,7 +289,7 @@ struct nvme_namespace {
/*
* One of these per allocated PCI device.
*/
struct nvme_controller {
struct spdk_nvme_ctrlr {
/* Hot data (accessed in I/O path) starts here. */
/** NVMe MMIO register space */
@ -299,7 +299,7 @@ struct nvme_controller {
struct nvme_qpair *ioq;
/** Array of namespaces indexed by nsid - 1 */
struct nvme_namespace *ns;
struct spdk_nvme_ns *ns;
uint32_t num_ns;
@ -309,7 +309,7 @@ struct nvme_controller {
/* Cold data (not accessed in normal I/O path) is after this point. */
TAILQ_ENTRY(nvme_controller) tailq;
TAILQ_ENTRY(spdk_nvme_ctrlr) tailq;
/** All the log pages supported */
bool log_page_supported[256];
@ -333,7 +333,7 @@ struct nvme_controller {
uint32_t num_aers;
struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
nvme_aer_cb_fn_t aer_cb_fn;
spdk_nvme_aer_cb aer_cb_fn;
void *aer_cb_arg;
/** guards access to the controller itself, including admin queues */
@ -362,8 +362,8 @@ struct nvme_driver {
uint16_t *ioq_index_pool;
uint32_t max_io_queues;
uint16_t ioq_index_pool_next;
TAILQ_HEAD(, nvme_controller) init_ctrlrs;
TAILQ_HEAD(, nvme_controller) attached_ctrlrs;
TAILQ_HEAD(, spdk_nvme_ctrlr) init_ctrlrs;
TAILQ_HEAD(, spdk_nvme_ctrlr) attached_ctrlrs;
};
struct pci_id {
@ -407,44 +407,44 @@ nvme_align32pow2(uint32_t x)
}
/* Admin functions */
void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
void nvme_ctrlr_cmd_identify_controller(struct spdk_nvme_ctrlr *ctrlr,
void *payload,
nvme_cb_fn_t cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_identify_namespace(struct spdk_nvme_ctrlr *ctrlr,
uint16_t nsid, void *payload,
nvme_cb_fn_t cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_qpair *io_que,
nvme_cb_fn_t cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_qpair *io_que,
nvme_cb_fn_t cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
uint32_t num_queues, nvme_cb_fn_t cb_fn,
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
uint32_t num_queues, spdk_nvme_cmd_cb cb_fn,
void *cb_arg);
void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
void nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
union spdk_nvme_critical_warning_state state,
nvme_cb_fn_t cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
void nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, uint16_t cid,
uint16_t sqid, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
void nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
int nvme_ctrlr_construct(struct nvme_controller *ctrlr, void *devhandle);
void nvme_ctrlr_destruct(struct nvme_controller *ctrlr);
int nvme_ctrlr_start(struct nvme_controller *ctrlr);
int nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle);
void nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
int nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr);
void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
void nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req);
void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
void nvme_ctrlr_submit_io_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req);
void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
void nvme_ctrlr_post_failed_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req);
int nvme_qpair_construct(struct nvme_qpair *qpair, uint16_t id,
uint16_t num_entries,
uint16_t num_trackers,
struct nvme_controller *ctrlr);
struct spdk_nvme_ctrlr *ctrlr);
void nvme_qpair_destroy(struct nvme_qpair *qpair);
void nvme_qpair_enable(struct nvme_qpair *qpair);
void nvme_qpair_disable(struct nvme_qpair *qpair);
@ -454,15 +454,16 @@ void nvme_qpair_submit_request(struct nvme_qpair *qpair,
void nvme_qpair_reset(struct nvme_qpair *qpair);
void nvme_qpair_fail(struct nvme_qpair *qpair);
int nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
struct nvme_controller *ctrlr);
void nvme_ns_destruct(struct nvme_namespace *ns);
int nvme_ns_construct(struct spdk_nvme_ns *ns, uint16_t id,
struct spdk_nvme_ctrlr *ctrlr);
void nvme_ns_destruct(struct spdk_nvme_ns *ns);
struct nvme_request *nvme_allocate_request(const struct nvme_payload *payload,
uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg);
struct nvme_request *nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg);
uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
struct nvme_request *nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg);
struct nvme_request *nvme_allocate_request_contig(void *buffer, uint32_t payload_size,
nvme_cb_fn_t cb_fn, void *cb_arg);
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
void nvme_free_request(struct nvme_request *req);
bool nvme_intel_has_quirk(struct pci_id *id, uint64_t quirk);
#endif /* __NVME_INTERNAL_H__ */

View File

@ -34,56 +34,56 @@
#include "nvme_internal.h"
static inline struct spdk_nvme_ns_data *
_nvme_ns_get_data(struct nvme_namespace *ns)
_nvme_ns_get_data(struct spdk_nvme_ns *ns)
{
return &ns->ctrlr->nsdata[ns->id - 1];
}
uint32_t
nvme_ns_get_id(struct nvme_namespace *ns)
spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns)
{
return ns->id;
}
uint32_t
nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns)
spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
{
return ns->ctrlr->max_xfer_size;
}
uint32_t
nvme_ns_get_sector_size(struct nvme_namespace *ns)
spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns)
{
return ns->sector_size;
}
uint64_t
nvme_ns_get_num_sectors(struct nvme_namespace *ns)
spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns)
{
return _nvme_ns_get_data(ns)->nsze;
}
uint64_t
nvme_ns_get_size(struct nvme_namespace *ns)
spdk_nvme_ns_get_size(struct spdk_nvme_ns *ns)
{
return nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns);
return spdk_nvme_ns_get_num_sectors(ns) * spdk_nvme_ns_get_sector_size(ns);
}
uint32_t
nvme_ns_get_flags(struct nvme_namespace *ns)
spdk_nvme_ns_get_flags(struct spdk_nvme_ns *ns)
{
return ns->flags;
}
const struct spdk_nvme_ns_data *
nvme_ns_get_data(struct nvme_namespace *ns)
spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns)
{
return _nvme_ns_get_data(ns);
}
int
nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
struct nvme_controller *ctrlr)
nvme_ns_construct(struct spdk_nvme_ns *ns, uint16_t id,
struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_completion_poll_status status;
struct spdk_nvme_ns_data *nsdata;
@ -115,29 +115,29 @@ nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
ns->sector_size = 1 << nsdata->lbaf[nsdata->flbas.format].lbads;
ns->sectors_per_max_io = nvme_ns_get_max_io_xfer_size(ns) / ns->sector_size;
ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->sector_size;
ns->sectors_per_stripe = ns->stripe_size / ns->sector_size;
if (ctrlr->cdata.oncs.dsm) {
ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED;
ns->flags |= SPDK_NVME_NS_DEALLOCATE_SUPPORTED;
}
if (ctrlr->cdata.vwc.present) {
ns->flags |= NVME_NS_FLUSH_SUPPORTED;
ns->flags |= SPDK_NVME_NS_FLUSH_SUPPORTED;
}
if (ctrlr->cdata.oncs.write_zeroes) {
ns->flags |= NVME_NS_WRITE_ZEROES_SUPPORTED;
ns->flags |= SPDK_NVME_NS_WRITE_ZEROES_SUPPORTED;
}
if (nsdata->nsrescap.raw) {
ns->flags |= NVME_NS_RESERVATION_SUPPORTED;
ns->flags |= SPDK_NVME_NS_RESERVATION_SUPPORTED;
}
return 0;
}
void nvme_ns_destruct(struct nvme_namespace *ns)
void nvme_ns_destruct(struct spdk_nvme_ns *ns)
{
}

View File

@ -38,9 +38,9 @@
*
*/
static struct nvme_request *_nvme_ns_cmd_rw(struct nvme_namespace *ns,
static struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns,
const struct nvme_payload *payload, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn,
uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
void *cb_arg, uint32_t opc, uint32_t io_flags);
static void
@ -87,10 +87,10 @@ nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
}
static struct nvme_request *
_nvme_ns_cmd_split_request(struct nvme_namespace *ns,
_nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
const struct nvme_payload *payload,
uint64_t lba, uint32_t lba_count,
nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t opc,
spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
uint32_t io_flags, struct nvme_request *req,
uint32_t sectors_per_max_io, uint32_t sector_mask)
{
@ -120,8 +120,8 @@ _nvme_ns_cmd_split_request(struct nvme_namespace *ns,
}
static struct nvme_request *
_nvme_ns_cmd_rw(struct nvme_namespace *ns, const struct nvme_payload *payload,
uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t opc,
_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, const struct nvme_payload *payload,
uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
uint32_t io_flags)
{
struct nvme_request *req;
@ -175,9 +175,9 @@ _nvme_ns_cmd_rw(struct nvme_namespace *ns, const struct nvme_payload *payload,
}
int
nvme_ns_cmd_read(struct nvme_namespace *ns, void *buffer, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
uint32_t io_flags)
spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, void *buffer, uint64_t lba,
uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags)
{
struct nvme_request *req;
struct nvme_payload payload;
@ -195,10 +195,10 @@ nvme_ns_cmd_read(struct nvme_namespace *ns, void *buffer, uint64_t lba,
}
int
nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t io_flags,
nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn)
spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_count,
spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
spdk_nvme_req_next_sge_cb next_sge_fn)
{
struct nvme_request *req;
struct nvme_payload payload;
@ -220,9 +220,9 @@ nvme_ns_cmd_readv(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
}
int
nvme_ns_cmd_write(struct nvme_namespace *ns, void *buffer, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
uint32_t io_flags)
spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, void *buffer, uint64_t lba,
uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags)
{
struct nvme_request *req;
struct nvme_payload payload;
@ -240,10 +240,10 @@ nvme_ns_cmd_write(struct nvme_namespace *ns, void *buffer, uint64_t lba,
}
int
nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
nvme_cb_fn_t cb_fn, void *cb_arg, uint32_t io_flags,
nvme_req_reset_sgl_fn_t reset_sgl_fn,
nvme_req_next_sge_fn_t next_sge_fn)
spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_count,
spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
spdk_nvme_req_next_sge_cb next_sge_fn)
{
struct nvme_request *req;
struct nvme_payload payload;
@ -265,9 +265,9 @@ nvme_ns_cmd_writev(struct nvme_namespace *ns, uint64_t lba, uint32_t lba_count,
}
int
nvme_ns_cmd_write_zeroes(struct nvme_namespace *ns, uint64_t lba,
uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg,
uint32_t io_flags)
spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, uint64_t lba,
uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
uint32_t io_flags)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -297,8 +297,8 @@ nvme_ns_cmd_write_zeroes(struct nvme_namespace *ns, uint64_t lba,
}
int
nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
uint16_t num_ranges, nvme_cb_fn_t cb_fn, void *cb_arg)
spdk_nvme_ns_cmd_deallocate(struct spdk_nvme_ns *ns, void *payload,
uint16_t num_ranges, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -328,7 +328,7 @@ nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
}
int
nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -348,12 +348,12 @@ nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
}
int
nvme_ns_cmd_reservation_register(struct nvme_namespace *ns,
struct spdk_nvme_reservation_register_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_register_action action,
enum spdk_nvme_reservation_register_cptpl cptpl,
nvme_cb_fn_t cb_fn, void *cb_arg)
spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
struct spdk_nvme_reservation_register_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_register_action action,
enum spdk_nvme_reservation_register_cptpl cptpl,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -382,12 +382,12 @@ nvme_ns_cmd_reservation_register(struct nvme_namespace *ns,
}
int
nvme_ns_cmd_reservation_release(struct nvme_namespace *ns,
struct spdk_nvme_reservation_key_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_release_action action,
enum spdk_nvme_reservation_type type,
nvme_cb_fn_t cb_fn, void *cb_arg)
spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
struct spdk_nvme_reservation_key_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_release_action action,
enum spdk_nvme_reservation_type type,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -415,12 +415,12 @@ nvme_ns_cmd_reservation_release(struct nvme_namespace *ns,
}
int
nvme_ns_cmd_reservation_acquire(struct nvme_namespace *ns,
struct spdk_nvme_reservation_acquire_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_acquire_action action,
enum spdk_nvme_reservation_type type,
nvme_cb_fn_t cb_fn, void *cb_arg)
spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
struct spdk_nvme_reservation_acquire_data *payload,
bool ignore_key,
enum spdk_nvme_reservation_acquire_action action,
enum spdk_nvme_reservation_type type,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
struct nvme_request *req;
struct spdk_nvme_cmd *cmd;
@ -449,8 +449,8 @@ nvme_ns_cmd_reservation_acquire(struct nvme_namespace *ns,
}
int
nvme_ns_cmd_reservation_report(struct nvme_namespace *ns, void *payload,
uint32_t len, nvme_cb_fn_t cb_fn, void *cb_arg)
spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns, void *payload,
uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
uint32_t num_dwords;
struct nvme_request *req;

View File

@ -325,7 +325,7 @@ nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
error = spdk_nvme_cpl_is_error(cpl);
retry = error && nvme_completion_is_retry(cpl) &&
req->retries < nvme_retry_count;
req->retries < spdk_nvme_retry_count;
if (error && print_on_error) {
nvme_qpair_print_command(qpair, &req->cmd);
@ -524,7 +524,7 @@ nvme_qpair_process_completions(struct nvme_qpair *qpair, uint32_t max_completion
int
nvme_qpair_construct(struct nvme_qpair *qpair, uint16_t id,
uint16_t num_entries, uint16_t num_trackers,
struct nvme_controller *ctrlr)
struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_tracker *tr;
uint16_t i;

View File

@ -47,7 +47,7 @@ struct rte_mempool *request_mempool;
struct dev {
struct spdk_pci_device *pci_dev;
struct nvme_controller *ctrlr;
struct spdk_nvme_ctrlr *ctrlr;
struct spdk_nvme_health_information_page *health_page;
uint32_t orig_temp_threshold;
char name[100];
@ -90,7 +90,7 @@ set_temp_threshold(struct dev *dev, uint32_t temp)
cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
cmd.cdw11 = temp;
return nvme_ctrlr_cmd_admin_raw(dev->ctrlr, &cmd, NULL, 0, set_feature_completion, dev);
return spdk_nvme_ctrlr_cmd_admin_raw(dev->ctrlr, &cmd, NULL, 0, set_feature_completion, dev);
}
static void
@ -120,7 +120,7 @@ get_temp_threshold(struct dev *dev)
cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
cmd.cdw10 = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
return nvme_ctrlr_cmd_admin_raw(dev->ctrlr, &cmd, NULL, 0, get_feature_completion, dev);
return spdk_nvme_ctrlr_cmd_admin_raw(dev->ctrlr, &cmd, NULL, 0, get_feature_completion, dev);
}
static void
@ -148,9 +148,9 @@ get_log_page_completion(void *cb_arg, const struct spdk_nvme_cpl *cpl)
static int
get_health_log_page(struct dev *dev)
{
return nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION,
SPDK_NVME_GLOBAL_NS_TAG, dev->health_page, sizeof(*dev->health_page),
get_log_page_completion, dev);
return spdk_nvme_ctrlr_cmd_get_log_page(dev->ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION,
SPDK_NVME_GLOBAL_NS_TAG, dev->health_page, sizeof(*dev->health_page),
get_log_page_completion, dev);
}
static void
@ -211,7 +211,7 @@ probe_cb(void *cb_ctx, struct spdk_pci_device *dev)
}
static void
attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct nvme_controller *ctrlr)
attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_nvme_ctrlr *ctrlr)
{
struct dev *dev;
@ -256,7 +256,7 @@ int main(int argc, char **argv)
}
request_mempool = rte_mempool_create("nvme_request", 8192,
nvme_request_size(), 128, 0,
spdk_nvme_request_size(), 128, 0,
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);
@ -265,8 +265,8 @@ int main(int argc, char **argv)
exit(1);
}
if (nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
if (spdk_nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "spdk_nvme_probe() failed\n");
return 1;
}
@ -276,7 +276,7 @@ int main(int argc, char **argv)
printf("Registering asynchronous event callbacks...\n");
foreach_dev(dev) {
nvme_ctrlr_register_aer_callback(dev->ctrlr, aer_cb, dev);
spdk_nvme_ctrlr_register_aer_callback(dev->ctrlr, aer_cb, dev);
}
printf("Setting temperature thresholds...\n");
@ -287,7 +287,7 @@ int main(int argc, char **argv)
while (!failed && temperature_done < num_devs) {
foreach_dev(dev) {
nvme_ctrlr_process_admin_completions(dev->ctrlr);
spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
}
}
@ -299,7 +299,7 @@ int main(int argc, char **argv)
while (!failed && aer_done < num_devs) {
foreach_dev(dev) {
nvme_ctrlr_process_admin_completions(dev->ctrlr);
spdk_nvme_ctrlr_process_admin_completions(dev->ctrlr);
}
}
@ -308,7 +308,7 @@ int main(int argc, char **argv)
for (i = 0; i < num_devs; i++) {
struct dev *dev = &devs[i];
nvme_detach(dev->ctrlr);
spdk_nvme_detach(dev->ctrlr);
}
done:

View File

@ -48,13 +48,13 @@
#include "spdk/string.h"
struct ctrlr_entry {
struct nvme_controller *ctrlr;
struct spdk_nvme_ctrlr *ctrlr;
struct ctrlr_entry *next;
char name[1024];
};
struct ns_entry {
struct nvme_namespace *ns;
struct spdk_nvme_ns *ns;
struct ns_entry *next;
uint32_t io_size_blocks;
uint64_t size_in_ios;
@ -101,7 +101,7 @@ static int g_queue_depth;
static int g_time_in_sec;
static void
register_ns(struct nvme_controller *ctrlr, struct nvme_namespace *ns)
register_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns *ns)
{
struct ns_entry *entry;
const struct spdk_nvme_ctrlr_data *cdata;
@ -112,12 +112,12 @@ register_ns(struct nvme_controller *ctrlr, struct nvme_namespace *ns)
exit(1);
}
cdata = nvme_ctrlr_get_data(ctrlr);
cdata = spdk_nvme_ctrlr_get_data(ctrlr);
entry->ns = ns;
entry->size_in_ios = nvme_ns_get_size(ns) /
entry->size_in_ios = spdk_nvme_ns_get_size(ns) /
g_io_size_bytes;
entry->io_size_blocks = g_io_size_bytes / nvme_ns_get_sector_size(ns);
entry->io_size_blocks = g_io_size_bytes / spdk_nvme_ns_get_sector_size(ns);
snprintf(entry->name, 44, "%-20.20s (%-20.20s)", cdata->mn, cdata->sn);
@ -127,7 +127,7 @@ register_ns(struct nvme_controller *ctrlr, struct nvme_namespace *ns)
}
static void
register_ctrlr(struct nvme_controller *ctrlr)
register_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
{
int nsid, num_ns;
struct ctrlr_entry *entry = malloc(sizeof(struct ctrlr_entry));
@ -141,9 +141,9 @@ register_ctrlr(struct nvme_controller *ctrlr)
entry->next = g_controllers;
g_controllers = entry;
num_ns = nvme_ctrlr_get_num_ns(ctrlr);
num_ns = spdk_nvme_ctrlr_get_num_ns(ctrlr);
for (nsid = 1; nsid <= num_ns; nsid++) {
register_ns(ctrlr, nvme_ctrlr_get_ns(ctrlr, nsid));
register_ns(ctrlr, spdk_nvme_ctrlr_get_ns(ctrlr, nsid));
}
}
@ -189,11 +189,11 @@ submit_single_io(struct ns_worker_ctx *ns_ctx)
if ((g_rw_percentage == 100) ||
(g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) {
rc = nvme_ns_cmd_read(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task, 0);
rc = spdk_nvme_ns_cmd_read(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task, 0);
} else {
rc = nvme_ns_cmd_write(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task, 0);
rc = spdk_nvme_ns_cmd_write(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks,
entry->io_size_blocks, io_complete, task, 0);
}
if (rc != 0) {
@ -239,7 +239,7 @@ io_complete(void *ctx, const struct spdk_nvme_cpl *completion)
static void
check_io(struct ns_worker_ctx *ns_ctx)
{
nvme_ctrlr_process_io_completions(ns_ctx->ctr_entry->ctrlr, 0);
spdk_nvme_ctrlr_process_io_completions(ns_ctx->ctr_entry->ctrlr, 0);
}
static void
@ -268,8 +268,8 @@ work_fn(void *arg)
printf("Starting thread on core %u\n", worker->lcore);
if (nvme_register_io_thread() != 0) {
fprintf(stderr, "nvme_register_io_thread() failed on core %u\n", worker->lcore);
if (spdk_nvme_register_io_thread() != 0) {
fprintf(stderr, "spdk_nvme_register_io_thread() failed on core %u\n", worker->lcore);
return -1;
}
@ -296,7 +296,7 @@ work_fn(void *arg)
((tsc_end - rte_get_timer_cycles()) / g_tsc_rate) < (uint64_t)(g_time_in_sec / 5 + 10)) {
ns_ctx = worker->ns_ctx;
while (ns_ctx != NULL) {
if (nvme_ctrlr_reset(ns_ctx->ctr_entry->ctrlr) < 0) {
if (spdk_nvme_ctrlr_reset(ns_ctx->ctr_entry->ctrlr) < 0) {
fprintf(stderr, "nvme reset failed.\n");
return -1;
}
@ -315,7 +315,7 @@ work_fn(void *arg)
ns_ctx = ns_ctx->next;
}
nvme_unregister_io_thread();
spdk_nvme_unregister_io_thread();
return 0;
}
@ -520,7 +520,7 @@ probe_cb(void *cb_ctx, struct spdk_pci_device *dev)
}
static void
attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct nvme_controller *ctrlr)
attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_nvme_ctrlr *ctrlr)
{
register_ctrlr(ctrlr);
}
@ -530,8 +530,8 @@ register_controllers(void)
{
printf("Initializing NVMe Controllers\n");
if (nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
if (spdk_nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "spdk_nvme_probe() failed\n");
return 1;
}
@ -545,7 +545,7 @@ unregister_controllers(void)
while (entry) {
struct ctrlr_entry *next = entry->next;
nvme_detach(entry->ctrlr);
spdk_nvme_detach(entry->ctrlr);
free(entry);
entry = next;
}
@ -596,7 +596,7 @@ run_nvme_reset_cycle(int retry_count)
struct worker_thread *worker;
struct ns_worker_ctx *ns_ctx;
nvme_retry_count = retry_count;
spdk_nvme_retry_count = retry_count;
if (work_fn(g_workers) != 0) {
return -1;
@ -643,7 +643,7 @@ int main(int argc, char **argv)
}
request_mempool = rte_mempool_create("nvme_request", 8192,
nvme_request_size(), 128, 0,
spdk_nvme_request_size(), 128, 0,
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);

View File

@ -56,7 +56,7 @@ struct rte_mempool *request_mempool;
struct dev {
struct spdk_pci_device *pci_dev;
struct nvme_controller *ctrlr;
struct spdk_nvme_ctrlr *ctrlr;
char name[100];
};
@ -273,15 +273,15 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
char *buf;
struct io_request *req;
struct nvme_namespace *ns;
struct spdk_nvme_ns *ns;
const struct spdk_nvme_ns_data *nsdata;
ns = nvme_ctrlr_get_ns(dev->ctrlr, 1);
ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, 1);
if (!ns) {
return -1;
}
nsdata = nvme_ns_get_data(ns);
if (!nsdata || !nvme_ns_get_sector_size(ns))
nsdata = spdk_nvme_ns_get_data(ns);
if (!nsdata || !spdk_nvme_ns_get_sector_size(ns))
return -1;
req = rte_zmalloc(NULL, sizeof(*req), 0);
@ -293,7 +293,7 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
if (!len)
return 0;
lba_count = len / nvme_ns_get_sector_size(ns);
lba_count = len / spdk_nvme_ns_get_sector_size(ns);
if (BASE_LBA_START + lba_count > (uint32_t)nsdata->nsze) {
rte_free(req);
return -1;
@ -304,10 +304,10 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
memset(req->iovs[i].iov_base, DATA_PATTERN, req->iovs[i].iov_len);
}
rc = nvme_ns_cmd_writev(ns, BASE_LBA_START, lba_count,
io_complete, req, 0,
nvme_request_reset_sgl,
nvme_request_next_sge);
rc = spdk_nvme_ns_cmd_writev(ns, BASE_LBA_START, lba_count,
io_complete, req, 0,
nvme_request_reset_sgl,
nvme_request_next_sge);
if (rc != 0) {
fprintf(stderr, "Writev Failed\n");
@ -318,7 +318,7 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
io_complete_flag = 0;
while (!io_complete_flag)
nvme_ctrlr_process_io_completions(dev->ctrlr, 1);
spdk_nvme_ctrlr_process_io_completions(dev->ctrlr, 1);
if (io_complete_flag != 1) {
fprintf(stderr, "%s Writev Failed\n", dev->name);
@ -333,10 +333,10 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
memset(req->iovs[i].iov_base, 0, req->iovs[i].iov_len);
}
rc = nvme_ns_cmd_readv(ns, BASE_LBA_START, lba_count,
io_complete, req, 0,
nvme_request_reset_sgl,
nvme_request_next_sge);
rc = spdk_nvme_ns_cmd_readv(ns, BASE_LBA_START, lba_count,
io_complete, req, 0,
nvme_request_reset_sgl,
nvme_request_next_sge);
if (rc != 0) {
fprintf(stderr, "Readv Failed\n");
@ -345,7 +345,7 @@ writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn)
}
while (!io_complete_flag)
nvme_ctrlr_process_io_completions(dev->ctrlr, 1);
spdk_nvme_ctrlr_process_io_completions(dev->ctrlr, 1);
if (io_complete_flag != 1) {
fprintf(stderr, "%s Readv Failed\n", dev->name);
@ -393,7 +393,7 @@ probe_cb(void *cb_ctx, struct spdk_pci_device *dev)
}
static void
attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct nvme_controller *ctrlr)
attach_cb(void *cb_ctx, struct spdk_pci_device *pci_dev, struct spdk_nvme_ctrlr *ctrlr)
{
struct dev *dev;
@ -435,7 +435,7 @@ int main(int argc, char **argv)
}
request_mempool = rte_mempool_create("nvme_request", 8192,
nvme_request_size(), 128, 0,
spdk_nvme_request_size(), 128, 0,
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);
@ -444,13 +444,13 @@ int main(int argc, char **argv)
exit(1);
}
if (nvme_probe(NULL, probe_cb, attach_cb) != 0) {
if (spdk_nvme_probe(NULL, probe_cb, attach_cb) != 0) {
fprintf(stderr, "nvme_probe() failed\n");
exit(1);
}
if (num_devs) {
rc = nvme_register_io_thread();
rc = spdk_nvme_register_io_thread();
if (rc != 0)
return rc;
}
@ -471,11 +471,11 @@ int main(int argc, char **argv)
for (i = 0; i < num_devs; i++) {
struct dev *dev = &devs[i];
nvme_detach(dev->ctrlr);
spdk_nvme_detach(dev->ctrlr);
}
if (num_devs)
nvme_unregister_io_thread();
spdk_nvme_unregister_io_thread();
return rc;
}

View File

@ -47,18 +47,18 @@ uint64_t nvme_vtophys(void *buf)
}
int
nvme_ctrlr_construct(struct nvme_controller *ctrlr, void *devhandle)
nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
{
return 0;
}
void
nvme_ctrlr_destruct(struct nvme_controller *ctrlr)
nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{
}
int
nvme_ctrlr_start(struct nvme_controller *ctrlr)
nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr)
{
return 0;
}
@ -92,7 +92,7 @@ nvme_thread(void *arg)
while (sync_start == 0)
;
rc = nvme_register_io_thread();
rc = spdk_nvme_register_io_thread();
if (rc == 0) {
__sync_fetch_and_add(&threads_pass, 1);
} else {
@ -113,19 +113,19 @@ test1(void)
CU_ASSERT(nvme_thread_ioq_index == -1);
rc = nvme_register_io_thread();
rc = spdk_nvme_register_io_thread();
CU_ASSERT(rc == 0);
CU_ASSERT(nvme_thread_ioq_index >= 0);
CU_ASSERT(driver->ioq_index_pool_next == 1);
/* try to register thread again - this should fail */
last_index = nvme_thread_ioq_index;
rc = nvme_register_io_thread();
rc = spdk_nvme_register_io_thread();
CU_ASSERT(rc != 0);
/* assert that the ioq_index was unchanged */
CU_ASSERT(nvme_thread_ioq_index == last_index);
nvme_unregister_io_thread();
spdk_nvme_unregister_io_thread();
CU_ASSERT(nvme_thread_ioq_index == -1);
CU_ASSERT(driver->ioq_index_pool_next == 0);
}

View File

@ -75,15 +75,15 @@ spdk_pci_device_get_subdevice_id(struct spdk_pci_device *dev)
int nvme_qpair_construct(struct nvme_qpair *qpair, uint16_t id,
uint16_t num_entries, uint16_t num_trackers,
struct nvme_controller *ctrlr)
struct spdk_nvme_ctrlr *ctrlr)
{
return 0;
}
int
nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
uint32_t nsid, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
void *cb_arg)
spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
uint32_t nsid, void *payload, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
return 0;
}
@ -131,52 +131,53 @@ nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
}
void
nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
union spdk_nvme_critical_warning_state state, nvme_cb_fn_t cb_fn,
nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
union spdk_nvme_critical_warning_state state, spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
}
void
nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_ctrlr_cmd_identify_controller(struct spdk_nvme_ctrlr *ctrlr, void *payload,
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
}
void
nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
}
void
nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn,
nvme_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
}
void
nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn,
nvme_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
}
void
nvme_ns_destruct(struct nvme_namespace *ns)
nvme_ns_destruct(struct spdk_nvme_ns *ns)
{
}
int
nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
struct nvme_controller *ctrlr)
nvme_ns_construct(struct spdk_nvme_ns *ns, uint16_t id,
struct spdk_nvme_ctrlr *ctrlr)
{
return 0;
}
struct nvme_request *
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
struct nvme_request *req = NULL;
@ -197,7 +198,8 @@ nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
}
struct nvme_request *
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
struct nvme_payload payload;
@ -208,7 +210,7 @@ nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t c
}
struct nvme_request *
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
}
@ -216,7 +218,7 @@ nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
static void
test_nvme_ctrlr_fail(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
ctrlr.num_io_queues = 0;
nvme_ctrlr_fail(&ctrlr);
@ -228,14 +230,14 @@ static void
test_nvme_ctrlr_construct_intel_support_log_page_list(void)
{
bool res;
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_intel_log_page_directory payload = {};
/* set a invalid vendor id */
ctrlr.cdata.vid = 0xFFFF;
nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
CU_ASSERT(res == false);
/* set valid vendor id and log page directory*/
@ -244,13 +246,13 @@ test_nvme_ctrlr_construct_intel_support_log_page_list(void)
memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
CU_ASSERT(res == true);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
CU_ASSERT(res == true);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
CU_ASSERT(res == false);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
CU_ASSERT(res == false);
/* set valid vendor id, device id and sub device id*/
@ -263,13 +265,13 @@ test_nvme_ctrlr_construct_intel_support_log_page_list(void)
memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
CU_ASSERT(res == true);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
CU_ASSERT(res == false);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
CU_ASSERT(res == true);
res = nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
CU_ASSERT(res == false);
}
@ -277,21 +279,21 @@ static void
test_nvme_ctrlr_set_supported_features(void)
{
bool res;
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
/* set a invalid vendor id */
ctrlr.cdata.vid = 0xFFFF;
nvme_ctrlr_set_supported_features(&ctrlr);
res = nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
CU_ASSERT(res == true);
res = nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
CU_ASSERT(res == false);
ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
nvme_ctrlr_set_supported_features(&ctrlr);
res = nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
CU_ASSERT(res == true);
res = nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
CU_ASSERT(res == true);
}

View File

@ -178,7 +178,8 @@ static void verify_intel_get_log_page_directory(struct nvme_request *req)
}
struct nvme_request *
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
struct nvme_request *req = &g_req;
@ -196,7 +197,8 @@ nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
}
struct nvme_request *
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
struct nvme_payload payload;
@ -207,13 +209,13 @@ nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t c
}
struct nvme_request *
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
}
void
nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
nvme_ctrlr_submit_io_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req)
{
verify_fn(req);
@ -222,7 +224,7 @@ nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
}
void
nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, struct nvme_request *req)
nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_request *req)
{
verify_fn(req);
/* stop analyzer from thinking stack variable addresses are stored in a global */
@ -232,32 +234,33 @@ nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr, struct nvme_reque
static void
test_firmware_get_log_page(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_firmware_page payload = {};
verify_fn = verify_firmware_log_page;
nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_FIRMWARE_SLOT, SPDK_NVME_GLOBAL_NS_TAG,
&payload,
sizeof(payload), NULL, NULL);
spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_FIRMWARE_SLOT, SPDK_NVME_GLOBAL_NS_TAG,
&payload,
sizeof(payload), NULL, NULL);
}
static void
test_health_get_log_page(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_health_information_page payload = {};
verify_fn = verify_health_log_page;
nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, health_log_nsid, &payload,
sizeof(payload), NULL, NULL);
spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, health_log_nsid,
&payload,
sizeof(payload), NULL, NULL);
}
static void
test_error_get_log_page(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_error_information_entry payload = {};
ctrlr.cdata.elpe = CTRLR_CDATA_ELPE;
@ -266,65 +269,66 @@ test_error_get_log_page(void)
/* valid page */
error_num_entries = 1;
nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_ERROR, SPDK_NVME_GLOBAL_NS_TAG, &payload,
sizeof(payload), NULL, NULL);
spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_ERROR, SPDK_NVME_GLOBAL_NS_TAG, &payload,
sizeof(payload), NULL, NULL);
}
static void test_intel_smart_get_log_page(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_intel_smart_information_page payload = {};
verify_fn = verify_intel_smart_log_page;
nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_SMART, health_log_nsid, &payload,
sizeof(payload), NULL, NULL);
spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_SMART, health_log_nsid, &payload,
sizeof(payload), NULL, NULL);
}
static void test_intel_temperature_get_log_page(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_intel_temperature_page payload = {};
verify_fn = verify_intel_temperature_log_page;
nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE, SPDK_NVME_GLOBAL_NS_TAG,
&payload, sizeof(payload), NULL, NULL);
spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE, SPDK_NVME_GLOBAL_NS_TAG,
&payload, sizeof(payload), NULL, NULL);
}
static void test_intel_read_latency_get_log_page(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_intel_rw_latency_page payload = {};
verify_fn = verify_intel_read_latency_log_page;
nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY,
SPDK_NVME_GLOBAL_NS_TAG,
&payload, sizeof(payload), NULL, NULL);
spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY,
SPDK_NVME_GLOBAL_NS_TAG,
&payload, sizeof(payload), NULL, NULL);
}
static void test_intel_write_latency_get_log_page(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_intel_rw_latency_page payload = {};
verify_fn = verify_intel_write_latency_log_page;
nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY,
SPDK_NVME_GLOBAL_NS_TAG,
&payload, sizeof(payload), NULL, NULL);
spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY,
SPDK_NVME_GLOBAL_NS_TAG,
&payload, sizeof(payload), NULL, NULL);
}
static void test_intel_get_log_page_directory(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_intel_log_page_directory payload = {};
verify_fn = verify_intel_get_log_page_directory;
nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY, SPDK_NVME_GLOBAL_NS_TAG,
&payload, sizeof(payload), NULL, NULL);
spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
SPDK_NVME_GLOBAL_NS_TAG,
&payload, sizeof(payload), NULL, NULL);
}
static void test_generic_get_log_pages(void)
@ -346,28 +350,28 @@ static void test_intel_get_log_pages(void)
static void
test_set_feature_cmd(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
verify_fn = verify_set_feature_cmd;
nvme_ctrlr_cmd_set_feature(&ctrlr, feature, feature_cdw11, feature_cdw12, NULL, 0, NULL, NULL);
spdk_nvme_ctrlr_cmd_set_feature(&ctrlr, feature, feature_cdw11, feature_cdw12, NULL, 0, NULL, NULL);
}
static void
test_get_feature_cmd(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
verify_fn = verify_get_feature_cmd;
nvme_ctrlr_cmd_get_feature(&ctrlr, get_feature, get_feature_cdw11, NULL, 0, NULL, NULL);
spdk_nvme_ctrlr_cmd_get_feature(&ctrlr, get_feature, get_feature_cdw11, NULL, 0, NULL, NULL);
}
static void
test_abort_cmd(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
verify_fn = verify_abort_cmd;
@ -377,12 +381,12 @@ test_abort_cmd(void)
static void
test_io_raw_cmd(void)
{
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_cmd cmd = {};
verify_fn = verify_io_raw_cmd;
nvme_ctrlr_cmd_io_raw(&ctrlr, &cmd, NULL, 1, NULL, NULL);
spdk_nvme_ctrlr_cmd_io_raw(&ctrlr, &cmd, NULL, 1, NULL, NULL);
}
static void

View File

@ -45,43 +45,43 @@ uint64_t nvme_vtophys(void *buf)
}
int
nvme_ctrlr_construct(struct nvme_controller *ctrlr, void *devhandle)
nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
{
return 0;
}
void
nvme_ctrlr_destruct(struct nvme_controller *ctrlr)
nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{
}
int
nvme_ctrlr_start(struct nvme_controller *ctrlr)
nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr)
{
return 0;
}
uint32_t
nvme_ns_get_sector_size(struct nvme_namespace *ns)
spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns)
{
return ns->sector_size;
}
uint32_t
nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns)
spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
{
return ns->ctrlr->max_xfer_size;
}
void
nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
nvme_ctrlr_submit_io_request(struct spdk_nvme_ctrlr *ctrlr,
struct nvme_request *req)
{
g_request = req;
}
static void
prepare_for_test(struct nvme_namespace *ns, struct nvme_controller *ctrlr,
prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
uint32_t sector_size, uint32_t max_xfer_size,
uint32_t stripe_size)
{
@ -90,7 +90,7 @@ prepare_for_test(struct nvme_namespace *ns, struct nvme_controller *ctrlr,
ns->ctrlr = ctrlr;
ns->sector_size = sector_size;
ns->stripe_size = stripe_size;
ns->sectors_per_max_io = nvme_ns_get_max_io_xfer_size(ns) / ns->sector_size;
ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->sector_size;
ns->sectors_per_stripe = ns->stripe_size / ns->sector_size;
g_request = NULL;
@ -107,8 +107,8 @@ nvme_cmd_interpret_rw(const struct spdk_nvme_cmd *cmd,
static void
split_test(void)
{
struct nvme_namespace ns;
struct nvme_controller ctrlr;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
void *payload;
uint64_t lba, cmd_lba;
uint32_t lba_count, cmd_lba_count;
@ -119,7 +119,7 @@ split_test(void)
lba = 0;
lba_count = 1;
rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, 0);
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, 0);
CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
@ -136,8 +136,8 @@ split_test(void)
static void
split_test2(void)
{
struct nvme_namespace ns;
struct nvme_controller ctrlr;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct nvme_request *child;
void *payload;
uint64_t lba, cmd_lba;
@ -155,7 +155,7 @@ split_test2(void)
lba = 0;
lba_count = (256 * 1024) / 512;
rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, 0);
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, 0);
CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
@ -189,8 +189,8 @@ split_test2(void)
static void
split_test3(void)
{
struct nvme_namespace ns;
struct nvme_controller ctrlr;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct nvme_request *child;
void *payload;
uint64_t lba, cmd_lba;
@ -210,7 +210,7 @@ split_test3(void)
lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
lba_count = (256 * 1024) / 512;
rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, 0);
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL, 0);
CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
@ -244,8 +244,8 @@ split_test3(void)
static void
split_test4(void)
{
struct nvme_namespace ns;
struct nvme_controller ctrlr;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct nvme_request *child;
void *payload;
uint64_t lba, cmd_lba;
@ -267,8 +267,8 @@ split_test4(void)
lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
lba_count = (256 * 1024) / 512;
rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL,
SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL,
SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
CU_ASSERT(rc == 0);
SPDK_CU_ASSERT_FATAL(g_request != NULL);
@ -317,14 +317,14 @@ split_test4(void)
static void
test_nvme_ns_cmd_flush(void)
{
struct nvme_namespace ns;
struct nvme_controller ctrlr;
nvme_cb_fn_t cb_fn = NULL;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
spdk_nvme_cmd_cb cb_fn = NULL;
void *cb_arg = NULL;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
nvme_ns_cmd_flush(&ns, cb_fn, cb_arg);
spdk_nvme_ns_cmd_flush(&ns, cb_fn, cb_arg);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_FLUSH);
CU_ASSERT(g_request->cmd.nsid == ns.id);
@ -334,16 +334,16 @@ test_nvme_ns_cmd_flush(void)
static void
test_nvme_ns_cmd_write_zeroes(void)
{
struct nvme_namespace ns = { 0 };
struct nvme_controller ctrlr = { 0 };
nvme_cb_fn_t cb_fn = NULL;
struct spdk_nvme_ns ns = { 0 };
struct spdk_nvme_ctrlr ctrlr = { 0 };
spdk_nvme_cmd_cb cb_fn = NULL;
void *cb_arg = NULL;
uint64_t cmd_lba;
uint32_t cmd_lba_count;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
nvme_ns_cmd_write_zeroes(&ns, 0, 2, cb_fn, cb_arg, 0);
spdk_nvme_ns_cmd_write_zeroes(&ns, 0, 2, cb_fn, cb_arg, 0);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_ZEROES);
CU_ASSERT(g_request->cmd.nsid == ns.id);
nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
@ -356,9 +356,9 @@ test_nvme_ns_cmd_write_zeroes(void)
static void
test_nvme_ns_cmd_deallocate(void)
{
struct nvme_namespace ns;
struct nvme_controller ctrlr;
nvme_cb_fn_t cb_fn = NULL;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
spdk_nvme_cmd_cb cb_fn = NULL;
void *cb_arg = NULL;
uint16_t num_ranges = 1;
void *payload = NULL;
@ -367,7 +367,7 @@ test_nvme_ns_cmd_deallocate(void)
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
payload = malloc(num_ranges * sizeof(struct spdk_nvme_dsm_range));
nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg);
spdk_nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw10 == num_ranges - 1u);
@ -377,7 +377,7 @@ test_nvme_ns_cmd_deallocate(void)
num_ranges = 256;
payload = malloc(num_ranges * sizeof(struct spdk_nvme_dsm_range));
nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg);
spdk_nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
CU_ASSERT(g_request->cmd.nsid == ns.id);
CU_ASSERT(g_request->cmd.cdw10 == num_ranges - 1u);
@ -387,15 +387,15 @@ test_nvme_ns_cmd_deallocate(void)
payload = NULL;
num_ranges = 0;
rc = nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg);
rc = spdk_nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg);
CU_ASSERT(rc != 0);
}
static void
test_io_flags(void)
{
struct nvme_namespace ns;
struct nvme_controller ctrlr;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
void *payload;
uint64_t lba;
uint32_t lba_count;
@ -406,16 +406,16 @@ test_io_flags(void)
lba = 0;
lba_count = (4 * 1024) / 512;
rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL,
SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL,
SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
CU_ASSERT(rc == 0);
CU_ASSERT_FATAL(g_request != NULL);
CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
nvme_free_request(g_request);
rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL,
SPDK_NVME_IO_FLAGS_LIMITED_RETRY);
rc = spdk_nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL,
SPDK_NVME_IO_FLAGS_LIMITED_RETRY);
CU_ASSERT(rc == 0);
CU_ASSERT_FATAL(g_request != NULL);
CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) == 0);
@ -429,11 +429,11 @@ test_io_flags(void)
static void
test_nvme_ns_cmd_reservation_register(void)
{
struct nvme_namespace ns;
struct nvme_controller ctrlr;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_reservation_register_data *payload;
bool ignore_key = 1;
nvme_cb_fn_t cb_fn = NULL;
spdk_nvme_cmd_cb cb_fn = NULL;
void *cb_arg = NULL;
int rc = 0;
uint32_t tmp_cdw10;
@ -441,10 +441,10 @@ test_nvme_ns_cmd_reservation_register(void)
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
payload = malloc(sizeof(struct spdk_nvme_reservation_register_data));
rc = nvme_ns_cmd_reservation_register(&ns, payload, ignore_key,
SPDK_NVME_RESERVE_REGISTER_KEY,
SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
cb_fn, cb_arg);
rc = spdk_nvme_ns_cmd_reservation_register(&ns, payload, ignore_key,
SPDK_NVME_RESERVE_REGISTER_KEY,
SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
cb_fn, cb_arg);
CU_ASSERT(rc == 0);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REGISTER);
@ -463,11 +463,11 @@ test_nvme_ns_cmd_reservation_register(void)
static void
test_nvme_ns_cmd_reservation_release(void)
{
struct nvme_namespace ns;
struct nvme_controller ctrlr;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_reservation_key_data *payload;
bool ignore_key = 1;
nvme_cb_fn_t cb_fn = NULL;
spdk_nvme_cmd_cb cb_fn = NULL;
void *cb_arg = NULL;
int rc = 0;
uint32_t tmp_cdw10;
@ -475,10 +475,10 @@ test_nvme_ns_cmd_reservation_release(void)
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
payload = malloc(sizeof(struct spdk_nvme_reservation_key_data));
rc = nvme_ns_cmd_reservation_release(&ns, payload, ignore_key,
SPDK_NVME_RESERVE_RELEASE,
SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
cb_fn, cb_arg);
rc = spdk_nvme_ns_cmd_reservation_release(&ns, payload, ignore_key,
SPDK_NVME_RESERVE_RELEASE,
SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
cb_fn, cb_arg);
CU_ASSERT(rc == 0);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_RELEASE);
@ -497,11 +497,11 @@ test_nvme_ns_cmd_reservation_release(void)
static void
test_nvme_ns_cmd_reservation_acquire(void)
{
struct nvme_namespace ns;
struct nvme_controller ctrlr;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_reservation_acquire_data *payload;
bool ignore_key = 1;
nvme_cb_fn_t cb_fn = NULL;
spdk_nvme_cmd_cb cb_fn = NULL;
void *cb_arg = NULL;
int rc = 0;
uint32_t tmp_cdw10;
@ -509,10 +509,10 @@ test_nvme_ns_cmd_reservation_acquire(void)
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
payload = malloc(sizeof(struct spdk_nvme_reservation_acquire_data));
rc = nvme_ns_cmd_reservation_acquire(&ns, payload, ignore_key,
SPDK_NVME_RESERVE_ACQUIRE,
SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
cb_fn, cb_arg);
rc = spdk_nvme_ns_cmd_reservation_acquire(&ns, payload, ignore_key,
SPDK_NVME_RESERVE_ACQUIRE,
SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
cb_fn, cb_arg);
CU_ASSERT(rc == 0);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_ACQUIRE);
@ -531,18 +531,18 @@ test_nvme_ns_cmd_reservation_acquire(void)
static void
test_nvme_ns_cmd_reservation_report(void)
{
struct nvme_namespace ns;
struct nvme_controller ctrlr;
struct spdk_nvme_ns ns;
struct spdk_nvme_ctrlr ctrlr;
struct spdk_nvme_reservation_status_data *payload;
nvme_cb_fn_t cb_fn = NULL;
spdk_nvme_cmd_cb cb_fn = NULL;
void *cb_arg = NULL;
int rc = 0;
prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
payload = malloc(sizeof(struct spdk_nvme_reservation_status_data));
rc = nvme_ns_cmd_reservation_report(&ns, payload, 0x1000,
cb_fn, cb_arg);
rc = spdk_nvme_ns_cmd_reservation_report(&ns, payload, 0x1000,
cb_fn, cb_arg);
CU_ASSERT(rc == 0);
CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REPORT);

View File

@ -40,7 +40,7 @@ struct nvme_driver g_nvme_driver = {
.max_io_queues = DEFAULT_MAX_IO_QUEUES,
};
int32_t nvme_retry_count = 1;
int32_t spdk_nvme_retry_count = 1;
char outbuf[OUTBUF_SIZE];
@ -56,7 +56,8 @@ uint64_t nvme_vtophys(void *buf)
}
struct nvme_request *
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
struct nvme_request *req = NULL;
@ -86,7 +87,8 @@ nvme_allocate_request(const struct nvme_payload *payload, uint32_t payload_size,
}
struct nvme_request *
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
void *cb_arg)
{
struct nvme_payload payload;
@ -97,7 +99,7 @@ nvme_allocate_request_contig(void *buffer, uint32_t payload_size, nvme_cb_fn_t c
}
struct nvme_request *
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
}
@ -152,7 +154,7 @@ test2(void)
static void
prepare_submit_request_test(struct nvme_qpair *qpair,
struct nvme_controller *ctrlr,
struct spdk_nvme_ctrlr *ctrlr,
struct spdk_nvme_registers *regs)
{
memset(ctrlr, 0, sizeof(*ctrlr));
@ -212,7 +214,7 @@ test3(void)
{
struct nvme_qpair qpair = {};
struct nvme_request *req;
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
prepare_submit_request_test(&qpair, &ctrlr, &regs);
@ -235,7 +237,7 @@ test4(void)
{
struct nvme_qpair qpair = {};
struct nvme_request *req;
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
char payload[4096];
@ -268,7 +270,7 @@ test_ctrlr_failed(void)
{
struct nvme_qpair qpair = {};
struct nvme_request *req;
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
char payload[4096];
@ -310,7 +312,7 @@ static void test_nvme_qpair_fail(void)
{
struct nvme_qpair qpair = {};
struct nvme_request *req = NULL;
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
struct nvme_tracker *tr_temp;
uint64_t phys_addr = 0;
@ -340,7 +342,7 @@ static void test_nvme_qpair_fail(void)
static void test_nvme_qpair_process_completions(void)
{
struct nvme_qpair qpair = {};
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
prepare_submit_request_test(&qpair, &ctrlr, &regs);
@ -355,7 +357,7 @@ static void
test_nvme_qpair_process_completions_limit(void)
{
struct nvme_qpair qpair = {};
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
prepare_submit_request_test(&qpair, &ctrlr, &regs);
@ -386,7 +388,7 @@ test_nvme_qpair_process_completions_limit(void)
static void test_nvme_qpair_destroy(void)
{
struct nvme_qpair qpair = {};
struct nvme_controller ctrlr = {};
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_registers regs = {};
struct nvme_tracker *tr_temp;
uint64_t phys_addr = 0;