2015-09-21 15:52:41 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
2016-01-26 17:47:22 +00:00
|
|
|
* Copyright (c) Intel Corporation.
|
2015-09-21 15:52:41 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __NVME_INTERNAL_H__
|
|
|
|
#define __NVME_INTERNAL_H__
|
|
|
|
|
2016-02-19 19:41:12 +00:00
|
|
|
#include "spdk/nvme.h"
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
#include <errno.h>
|
2016-08-08 17:03:52 +00:00
|
|
|
#include <pthread.h>
|
2015-09-21 15:52:41 +00:00
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <time.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <x86intrin.h>
|
|
|
|
|
|
|
|
#include <sys/user.h>
|
|
|
|
|
|
|
|
#include "spdk/queue.h"
|
|
|
|
#include "spdk/barrier.h"
|
2016-10-18 16:49:07 +00:00
|
|
|
#include "spdk/bit_array.h"
|
2016-08-12 16:03:40 +00:00
|
|
|
#include "spdk/log.h"
|
2015-12-10 23:56:12 +00:00
|
|
|
#include "spdk/mmio.h"
|
2016-01-06 05:43:33 +00:00
|
|
|
#include "spdk/pci_ids.h"
|
|
|
|
#include "spdk/nvme_intel.h"
|
2016-01-27 07:08:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some Intel devices support vendor-unique read latency log page even
|
|
|
|
* though the log page directory says otherwise.
|
|
|
|
*/
|
|
|
|
#define NVME_INTEL_QUIRK_READ_LATENCY 0x1
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some Intel devices support vendor-unique write latency log page even
|
|
|
|
* though the log page directory says otherwise.
|
|
|
|
*/
|
|
|
|
#define NVME_INTEL_QUIRK_WRITE_LATENCY 0x2
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-10-27 06:34:32 +00:00
|
|
|
/*
|
|
|
|
* The controller needs a delay before starts checking the device
|
|
|
|
* readiness, which is done by reading the NVME_CSTS_RDY bit.
|
|
|
|
*/
|
|
|
|
#define NVME_QUIRK_DELAY_BEFORE_CHK_RDY 0x4
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
#define NVME_MAX_ASYNC_EVENTS (8)
|
|
|
|
|
|
|
|
#define NVME_MIN_TIMEOUT_PERIOD (5)
|
|
|
|
#define NVME_MAX_TIMEOUT_PERIOD (120)
|
|
|
|
|
|
|
|
/* Maximum log page size to fetch for AERs. */
|
|
|
|
#define NVME_MAX_AER_LOG_SIZE (4096)
|
|
|
|
|
2015-11-30 16:18:46 +00:00
|
|
|
/*
|
|
|
|
* NVME_MAX_IO_QUEUES in nvme_spec.h defines the 64K spec-limit, but this
|
|
|
|
* define specifies the maximum number of queues this driver will actually
|
|
|
|
* try to configure, if available.
|
|
|
|
*/
|
|
|
|
#define DEFAULT_MAX_IO_QUEUES (1024)
|
|
|
|
|
2016-01-22 23:56:20 +00:00
|
|
|
enum nvme_payload_type {
|
|
|
|
NVME_PAYLOAD_TYPE_INVALID = 0,
|
|
|
|
|
|
|
|
/** nvme_request::u.payload.contig_buffer is valid for this request */
|
|
|
|
NVME_PAYLOAD_TYPE_CONTIG,
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-01-22 23:56:20 +00:00
|
|
|
/** nvme_request::u.sgl is valid for this request */
|
|
|
|
NVME_PAYLOAD_TYPE_SGL,
|
|
|
|
};
|
|
|
|
|
2016-03-01 02:50:31 +00:00
|
|
|
/*
|
|
|
|
* Controller support flags.
|
|
|
|
*/
|
|
|
|
enum spdk_nvme_ctrlr_flags {
|
2016-05-05 01:41:16 +00:00
|
|
|
SPDK_NVME_CTRLR_SGL_SUPPORTED = 0x1, /**< The SGL is supported */
|
2016-03-01 02:50:31 +00:00
|
|
|
};
|
|
|
|
|
2016-01-22 23:56:20 +00:00
|
|
|
/**
|
|
|
|
* Descriptor for a request data payload.
|
|
|
|
*
|
|
|
|
* This struct is arranged so that it fits nicely in struct nvme_request.
|
|
|
|
*/
|
|
|
|
struct __attribute__((packed)) nvme_payload {
|
2015-09-21 15:52:41 +00:00
|
|
|
union {
|
2016-01-22 23:56:20 +00:00
|
|
|
/** Virtual memory address of a single physically contiguous buffer */
|
|
|
|
void *contig;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Functions for retrieving physical addresses for scattered payloads.
|
|
|
|
*/
|
|
|
|
struct {
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
|
|
|
|
spdk_nvme_req_next_sge_cb next_sge_fn;
|
2016-02-18 21:23:28 +00:00
|
|
|
void *cb_arg;
|
2016-01-22 23:56:20 +00:00
|
|
|
} sgl;
|
2015-09-21 15:52:41 +00:00
|
|
|
} u;
|
|
|
|
|
2016-04-07 06:52:43 +00:00
|
|
|
/** Virtual memory address of a single physically contiguous metadata buffer */
|
|
|
|
void *md;
|
|
|
|
|
2016-01-22 23:56:20 +00:00
|
|
|
/** \ref nvme_payload_type */
|
|
|
|
uint8_t type;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nvme_request {
|
2016-02-09 18:06:48 +00:00
|
|
|
struct spdk_nvme_cmd cmd;
|
2016-01-22 23:56:20 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Data payload for this request's command.
|
|
|
|
*/
|
|
|
|
struct nvme_payload payload;
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
uint8_t retries;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Number of children requests still outstanding for this
|
|
|
|
* request which was split into multiple child requests.
|
|
|
|
*/
|
|
|
|
uint8_t num_children;
|
|
|
|
uint32_t payload_size;
|
2016-01-22 23:56:20 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Offset in bytes from the beginning of payload for this request.
|
|
|
|
* This is used for I/O commands that are split into multiple requests.
|
|
|
|
*/
|
|
|
|
uint32_t payload_offset;
|
2016-04-07 06:52:43 +00:00
|
|
|
uint32_t md_offset;
|
2016-01-22 23:56:20 +00:00
|
|
|
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_cmd_cb cb_fn;
|
2015-09-21 15:52:41 +00:00
|
|
|
void *cb_arg;
|
|
|
|
STAILQ_ENTRY(nvme_request) stailq;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The following members should not be reordered with members
|
|
|
|
* above. These members are only needed when splitting
|
|
|
|
* requests which is done rarely, and the driver is careful
|
|
|
|
* to not touch the following fields until a split operation is
|
|
|
|
* needed, to avoid touching an extra cacheline.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Points to the outstanding child requests for a parent request.
|
|
|
|
* Only valid if a request was split into multiple children
|
|
|
|
* requests, and is not initialized for non-split requests.
|
|
|
|
*/
|
|
|
|
TAILQ_HEAD(, nvme_request) children;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Linked-list pointers for a child request in its parent's list.
|
|
|
|
*/
|
|
|
|
TAILQ_ENTRY(nvme_request) child_tailq;
|
|
|
|
|
2015-12-22 23:34:53 +00:00
|
|
|
/**
|
|
|
|
* Points to a parent request if part of a split request,
|
|
|
|
* NULL otherwise.
|
|
|
|
*/
|
|
|
|
struct nvme_request *parent;
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
/**
|
|
|
|
* Completion status for a parent request. Initialized to all 0's
|
|
|
|
* (SUCCESS) before child requests are submitted. If a child
|
|
|
|
* request completes with error, the error status is copied here,
|
|
|
|
* to ensure that the parent request is also completed with error
|
|
|
|
* status once all child requests are completed.
|
|
|
|
*/
|
2016-02-09 18:06:48 +00:00
|
|
|
struct spdk_nvme_cpl parent_status;
|
2016-09-12 21:30:59 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* The user_cb_fn and user_cb_arg fields are used for holding the original
|
|
|
|
* callback data when using nvme_allocate_request_user_copy.
|
|
|
|
*/
|
|
|
|
spdk_nvme_cmd_cb user_cb_fn;
|
|
|
|
void *user_cb_arg;
|
|
|
|
void *user_buffer;
|
2015-09-21 15:52:41 +00:00
|
|
|
};
|
|
|
|
|
2016-10-12 23:18:13 +00:00
|
|
|
struct pci_id {
|
|
|
|
uint16_t vendor_id;
|
|
|
|
uint16_t dev_id;
|
|
|
|
uint16_t sub_vendor_id;
|
|
|
|
uint16_t sub_dev_id;
|
|
|
|
};
|
|
|
|
|
2016-10-12 23:11:55 +00:00
|
|
|
struct spdk_nvme_transport {
|
2016-10-18 19:50:43 +00:00
|
|
|
struct spdk_nvme_ctrlr *(*ctrlr_construct)(void *devhandle);
|
2016-10-13 23:08:22 +00:00
|
|
|
void (*ctrlr_destruct)(struct spdk_nvme_ctrlr *ctrlr);
|
|
|
|
|
2016-10-19 20:42:21 +00:00
|
|
|
int (*ctrlr_enable)(struct spdk_nvme_ctrlr *ctrlr);
|
|
|
|
|
2016-10-12 23:18:13 +00:00
|
|
|
int (*ctrlr_get_pci_id)(struct spdk_nvme_ctrlr *ctrlr, struct pci_id *pci_id);
|
2016-10-13 00:00:54 +00:00
|
|
|
|
|
|
|
int (*ctrlr_set_reg_4)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
|
|
|
|
int (*ctrlr_set_reg_8)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
|
|
|
|
|
|
|
|
int (*ctrlr_get_reg_4)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
|
|
|
|
int (*ctrlr_get_reg_8)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
|
2016-10-13 00:33:37 +00:00
|
|
|
|
2016-10-19 23:14:09 +00:00
|
|
|
uint32_t (*ctrlr_get_max_xfer_size)(struct spdk_nvme_ctrlr *ctrlr);
|
|
|
|
|
2016-10-18 16:49:07 +00:00
|
|
|
struct spdk_nvme_qpair *(*ctrlr_create_io_qpair)(struct spdk_nvme_ctrlr *ctrlr,
|
|
|
|
uint16_t qid, enum spdk_nvme_qprio qprio);
|
2016-10-13 00:33:37 +00:00
|
|
|
int (*ctrlr_delete_io_qpair)(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
|
2016-10-18 16:49:07 +00:00
|
|
|
int (*ctrlr_reinit_io_qpair)(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
|
2016-10-13 22:23:55 +00:00
|
|
|
|
|
|
|
int (*qpair_construct)(struct spdk_nvme_qpair *qpair);
|
|
|
|
void (*qpair_destroy)(struct spdk_nvme_qpair *qpair);
|
|
|
|
|
|
|
|
void (*qpair_enable)(struct spdk_nvme_qpair *qpair);
|
|
|
|
void (*qpair_disable)(struct spdk_nvme_qpair *qpair);
|
|
|
|
|
|
|
|
void (*qpair_reset)(struct spdk_nvme_qpair *qpair);
|
|
|
|
void (*qpair_fail)(struct spdk_nvme_qpair *qpair);
|
|
|
|
|
|
|
|
int (*qpair_submit_request)(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
|
|
|
|
int32_t (*qpair_process_completions)(struct spdk_nvme_qpair *qpair, uint32_t max_completions);
|
2016-10-12 23:11:55 +00:00
|
|
|
};
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
struct nvme_completion_poll_status {
|
2016-02-09 18:06:48 +00:00
|
|
|
struct spdk_nvme_cpl cpl;
|
2015-09-21 15:52:41 +00:00
|
|
|
bool done;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nvme_async_event_request {
|
2016-02-10 18:26:12 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
2015-09-21 15:52:41 +00:00
|
|
|
struct nvme_request *req;
|
2016-02-09 18:06:48 +00:00
|
|
|
struct spdk_nvme_cpl cpl;
|
2015-09-21 15:52:41 +00:00
|
|
|
};
|
|
|
|
|
2016-02-29 17:11:35 +00:00
|
|
|
struct spdk_nvme_qpair {
|
2016-10-12 23:11:55 +00:00
|
|
|
const struct spdk_nvme_transport *transport;
|
|
|
|
|
2016-03-12 00:06:12 +00:00
|
|
|
STAILQ_HEAD(, nvme_request) queued_req;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
uint16_t id;
|
|
|
|
|
|
|
|
uint16_t num_entries;
|
2016-02-29 21:19:02 +00:00
|
|
|
|
2016-05-17 21:17:20 +00:00
|
|
|
uint8_t qprio;
|
|
|
|
|
2016-02-10 18:26:12 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-02-29 21:19:02 +00:00
|
|
|
/* List entry for spdk_nvme_ctrlr::free_io_qpairs and active_io_qpairs */
|
|
|
|
TAILQ_ENTRY(spdk_nvme_qpair) tailq;
|
2015-09-21 15:52:41 +00:00
|
|
|
};
|
|
|
|
|
2016-02-10 18:26:12 +00:00
|
|
|
struct spdk_nvme_ns {
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr;
|
2015-09-21 15:52:41 +00:00
|
|
|
uint32_t stripe_size;
|
|
|
|
uint32_t sector_size;
|
2016-04-07 06:52:43 +00:00
|
|
|
uint32_t md_size;
|
|
|
|
uint32_t pi_type;
|
2015-09-21 15:52:41 +00:00
|
|
|
uint32_t sectors_per_max_io;
|
|
|
|
uint32_t sectors_per_stripe;
|
|
|
|
uint16_t id;
|
|
|
|
uint16_t flags;
|
|
|
|
};
|
|
|
|
|
2016-02-23 23:36:13 +00:00
|
|
|
/**
|
|
|
|
* State of struct spdk_nvme_ctrlr (in particular, during initialization).
|
|
|
|
*/
|
|
|
|
enum nvme_ctrlr_state {
|
|
|
|
/**
|
|
|
|
* Controller has not been initialized yet.
|
|
|
|
*/
|
|
|
|
NVME_CTRLR_STATE_INIT,
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Waiting for CSTS.RDY to transition from 0 to 1 so that CC.EN may be set to 0.
|
|
|
|
*/
|
|
|
|
NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1,
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Waiting for CSTS.RDY to transition from 1 to 0 so that CC.EN may be set to 1.
|
|
|
|
*/
|
|
|
|
NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0,
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Waiting for CSTS.RDY to transition from 0 to 1 after enabling the controller.
|
|
|
|
*/
|
|
|
|
NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1,
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Controller initialization has completed and the controller is ready.
|
|
|
|
*/
|
|
|
|
NVME_CTRLR_STATE_READY
|
|
|
|
};
|
|
|
|
|
|
|
|
#define NVME_TIMEOUT_INFINITE UINT64_MAX
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
/*
|
|
|
|
* One of these per allocated PCI device.
|
|
|
|
*/
|
2016-02-10 18:26:12 +00:00
|
|
|
struct spdk_nvme_ctrlr {
|
2015-09-21 15:52:41 +00:00
|
|
|
/* Hot data (accessed in I/O path) starts here. */
|
|
|
|
|
2016-10-12 23:11:55 +00:00
|
|
|
const struct spdk_nvme_transport *transport;
|
|
|
|
|
2015-11-03 01:02:27 +00:00
|
|
|
/** Array of namespaces indexed by nsid - 1 */
|
2016-02-10 18:26:12 +00:00
|
|
|
struct spdk_nvme_ns *ns;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
uint32_t num_ns;
|
|
|
|
|
2015-11-03 01:02:27 +00:00
|
|
|
bool is_resetting;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2015-11-03 01:02:27 +00:00
|
|
|
bool is_failed;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-03-01 02:50:31 +00:00
|
|
|
/** Controller support flags */
|
|
|
|
uint64_t flags;
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
/* Cold data (not accessed in normal I/O path) is after this point. */
|
|
|
|
|
2016-10-19 20:18:27 +00:00
|
|
|
union spdk_nvme_cap_register cap;
|
|
|
|
|
2016-02-23 23:36:13 +00:00
|
|
|
enum nvme_ctrlr_state state;
|
|
|
|
uint64_t state_timeout_tsc;
|
|
|
|
|
2016-02-10 18:26:12 +00:00
|
|
|
TAILQ_ENTRY(spdk_nvme_ctrlr) tailq;
|
2016-01-29 20:15:29 +00:00
|
|
|
|
2016-01-06 05:43:33 +00:00
|
|
|
/** All the log pages supported */
|
2016-01-15 20:21:18 +00:00
|
|
|
bool log_page_supported[256];
|
2016-01-06 05:43:33 +00:00
|
|
|
|
2016-01-25 05:04:23 +00:00
|
|
|
/** All the features supported */
|
|
|
|
bool feature_supported[256];
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
/* Opaque handle to associated PCI device. */
|
2016-02-03 21:36:26 +00:00
|
|
|
struct spdk_pci_device *devhandle;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
/** maximum i/o size in bytes */
|
|
|
|
uint32_t max_xfer_size;
|
|
|
|
|
|
|
|
/** minimum page size supported by this controller in bytes */
|
|
|
|
uint32_t min_page_size;
|
|
|
|
|
|
|
|
uint32_t num_aers;
|
|
|
|
struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_aer_cb aer_cb_fn;
|
2015-09-21 15:52:41 +00:00
|
|
|
void *aer_cb_arg;
|
|
|
|
|
|
|
|
/** guards access to the controller itself, including admin queues */
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_t ctrlr_lock;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
|
2016-10-19 17:19:34 +00:00
|
|
|
struct spdk_nvme_qpair *adminq;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Identify Controller data.
|
|
|
|
*/
|
2016-02-09 18:06:48 +00:00
|
|
|
struct spdk_nvme_ctrlr_data cdata;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Array of Identify Namespace data.
|
|
|
|
*
|
|
|
|
* Stored separately from ns since nsdata should not normally be accessed during I/O.
|
|
|
|
*/
|
2016-02-09 18:06:48 +00:00
|
|
|
struct spdk_nvme_ns_data *nsdata;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-10-18 16:49:07 +00:00
|
|
|
struct spdk_bit_array *free_io_qids;
|
2016-02-29 21:19:02 +00:00
|
|
|
TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs;
|
2016-03-07 17:36:17 +00:00
|
|
|
|
|
|
|
struct spdk_nvme_ctrlr_opts opts;
|
2016-05-05 01:41:16 +00:00
|
|
|
|
2016-10-08 00:01:54 +00:00
|
|
|
/** PCI address including domain, bus, device and function */
|
|
|
|
struct spdk_pci_addr pci_addr;
|
2016-10-24 23:29:47 +00:00
|
|
|
|
|
|
|
uint64_t quirks;
|
2016-10-27 06:34:32 +00:00
|
|
|
|
|
|
|
/* Extra sleep time during controller initialization */
|
|
|
|
uint64_t sleep_timeout_tsc;
|
2016-02-29 21:19:02 +00:00
|
|
|
};
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
struct nvme_driver {
|
2016-08-08 17:03:52 +00:00
|
|
|
pthread_mutex_t lock;
|
2016-02-10 18:26:12 +00:00
|
|
|
TAILQ_HEAD(, spdk_nvme_ctrlr) init_ctrlrs;
|
|
|
|
TAILQ_HEAD(, spdk_nvme_ctrlr) attached_ctrlrs;
|
2016-08-12 17:24:34 +00:00
|
|
|
struct spdk_mempool *request_mempool;
|
2015-09-21 15:52:41 +00:00
|
|
|
};
|
|
|
|
|
2016-08-24 06:25:58 +00:00
|
|
|
extern struct nvme_driver *g_spdk_nvme_driver;
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-10-12 23:11:55 +00:00
|
|
|
extern const struct spdk_nvme_transport spdk_nvme_transport_pcie;
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
#define nvme_min(a,b) (((a)<(b))?(a):(b))
|
|
|
|
|
2016-10-12 23:18:13 +00:00
|
|
|
#define INTEL_DC_P3X00_DEVID 0x0953
|
2015-09-21 15:52:41 +00:00
|
|
|
|
|
|
|
#define nvme_delay usleep
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
nvme_u32log2(uint32_t x)
|
|
|
|
{
|
|
|
|
if (x == 0) {
|
|
|
|
/* __builtin_clz(0) is undefined, so just bail */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 31u - __builtin_clz(x);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
nvme_align32pow2(uint32_t x)
|
|
|
|
{
|
|
|
|
return 1u << (1 + nvme_u32log2(x - 1));
|
|
|
|
}
|
|
|
|
|
2016-10-13 22:23:55 +00:00
|
|
|
static inline bool
|
|
|
|
nvme_qpair_is_admin_queue(struct spdk_nvme_qpair *qpair)
|
|
|
|
{
|
|
|
|
return qpair->id == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
nvme_qpair_is_io_queue(struct spdk_nvme_qpair *qpair)
|
|
|
|
{
|
|
|
|
return qpair->id != 0;
|
|
|
|
}
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
/* Admin functions */
|
2016-02-29 21:33:50 +00:00
|
|
|
int nvme_ctrlr_cmd_identify_controller(struct spdk_nvme_ctrlr *ctrlr,
|
2015-09-21 15:52:41 +00:00
|
|
|
void *payload,
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
2016-02-29 21:33:50 +00:00
|
|
|
int nvme_ctrlr_cmd_identify_namespace(struct spdk_nvme_ctrlr *ctrlr,
|
2015-09-21 15:52:41 +00:00
|
|
|
uint16_t nsid, void *payload,
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
2016-02-29 21:33:50 +00:00
|
|
|
int nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
|
2016-02-10 18:26:12 +00:00
|
|
|
uint32_t num_queues, spdk_nvme_cmd_cb cb_fn,
|
2015-09-21 15:52:41 +00:00
|
|
|
void *cb_arg);
|
2016-02-29 21:33:50 +00:00
|
|
|
int nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
|
2016-02-09 18:06:48 +00:00
|
|
|
union spdk_nvme_critical_warning_state state,
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
2016-02-29 21:33:50 +00:00
|
|
|
int nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, uint16_t cid,
|
2016-02-10 18:26:12 +00:00
|
|
|
uint16_t sqid, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
2016-02-25 03:44:44 +00:00
|
|
|
int nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
|
|
|
|
struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
|
|
|
int nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
|
|
|
|
struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
|
|
|
int nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
|
|
|
|
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
|
|
|
int nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
|
|
|
|
void *cb_arg);
|
2016-03-07 06:29:50 +00:00
|
|
|
int nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
|
|
|
|
struct spdk_nvme_format *format, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
2016-05-03 05:18:39 +00:00
|
|
|
int nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
|
|
|
|
const struct spdk_nvme_fw_commit *fw_commit,
|
|
|
|
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
|
|
|
int nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
|
|
|
|
uint32_t size, uint32_t offset, void *payload,
|
|
|
|
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
2016-02-09 18:06:48 +00:00
|
|
|
void nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-10-18 19:50:43 +00:00
|
|
|
int nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr);
|
2016-02-10 18:26:12 +00:00
|
|
|
void nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
|
2016-02-23 23:36:13 +00:00
|
|
|
int nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr);
|
2016-02-10 18:26:12 +00:00
|
|
|
int nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-03-08 22:16:09 +00:00
|
|
|
int nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
|
2015-09-21 15:52:41 +00:00
|
|
|
struct nvme_request *req);
|
2016-10-13 23:08:22 +00:00
|
|
|
int nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap);
|
2016-02-29 17:11:35 +00:00
|
|
|
int nvme_qpair_construct(struct spdk_nvme_qpair *qpair, uint16_t id,
|
2015-09-21 15:52:41 +00:00
|
|
|
uint16_t num_entries,
|
2016-02-10 18:26:12 +00:00
|
|
|
struct spdk_nvme_ctrlr *ctrlr);
|
2016-02-29 17:11:35 +00:00
|
|
|
void nvme_qpair_destroy(struct spdk_nvme_qpair *qpair);
|
|
|
|
void nvme_qpair_enable(struct spdk_nvme_qpair *qpair);
|
|
|
|
void nvme_qpair_disable(struct spdk_nvme_qpair *qpair);
|
2016-03-08 22:16:09 +00:00
|
|
|
int nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
|
2015-09-21 15:52:41 +00:00
|
|
|
struct nvme_request *req);
|
2016-02-29 17:11:35 +00:00
|
|
|
void nvme_qpair_fail(struct spdk_nvme_qpair *qpair);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-02-10 18:26:12 +00:00
|
|
|
int nvme_ns_construct(struct spdk_nvme_ns *ns, uint16_t id,
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr);
|
|
|
|
void nvme_ns_destruct(struct spdk_nvme_ns *ns);
|
2015-09-21 15:52:41 +00:00
|
|
|
|
2016-01-22 23:56:20 +00:00
|
|
|
struct nvme_request *nvme_allocate_request(const struct nvme_payload *payload,
|
2016-02-10 18:26:12 +00:00
|
|
|
uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
|
|
|
struct nvme_request *nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
2016-01-22 23:56:20 +00:00
|
|
|
struct nvme_request *nvme_allocate_request_contig(void *buffer, uint32_t payload_size,
|
2016-02-10 18:26:12 +00:00
|
|
|
spdk_nvme_cmd_cb cb_fn, void *cb_arg);
|
2016-09-12 21:30:59 +00:00
|
|
|
struct nvme_request *nvme_allocate_request_user_copy(void *buffer, uint32_t payload_size,
|
|
|
|
spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller);
|
2015-10-20 00:03:04 +00:00
|
|
|
void nvme_free_request(struct nvme_request *req);
|
2016-04-08 16:55:27 +00:00
|
|
|
void nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child);
|
2016-10-24 23:29:47 +00:00
|
|
|
uint64_t nvme_get_quirks(const struct pci_id *id);
|
2016-02-10 18:26:12 +00:00
|
|
|
|
2016-03-07 17:36:17 +00:00
|
|
|
void spdk_nvme_ctrlr_opts_set_defaults(struct spdk_nvme_ctrlr_opts *opts);
|
|
|
|
|
2016-08-24 03:25:18 +00:00
|
|
|
int nvme_mutex_init_shared(pthread_mutex_t *mtx);
|
2016-10-17 03:26:55 +00:00
|
|
|
int nvme_mutex_init_recursive_shared(pthread_mutex_t *mtx);
|
2016-08-24 03:25:18 +00:00
|
|
|
|
2016-10-13 22:23:55 +00:00
|
|
|
bool nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl);
|
|
|
|
void nvme_qpair_print_command(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd);
|
|
|
|
void nvme_qpair_print_completion(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cpl *cpl);
|
|
|
|
|
2015-09-21 15:52:41 +00:00
|
|
|
#endif /* __NVME_INTERNAL_H__ */
|