nvme: convert transport type to an enum

Function pointers will not work for the DPDK multi-process model (they
can have different addresses in different processes), so define a
transport enum and dispatch functions that switch on the transport type
instead.

Change-Id: Ic16866786eba5e523ce533e56e7a5c92672eb2a5
Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Daniel Verkamp 2016-11-03 15:34:35 -07:00
parent fa5206c416
commit 1ffec5d53a
13 changed files with 368 additions and 209 deletions

View File

@ -43,6 +43,7 @@ extern "C" {
#endif
#include <assert.h>
#include <stdlib.h>
#define SPDK_CONCAT_(x, y) x##y
#define SPDK_CONCAT(x, y) SPDK_CONCAT_(x, y)
@ -53,6 +54,12 @@ extern "C" {
#define SPDK_STATIC_ASSERT(cond, msg)
#endif
#if !defined(DEBUG) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
#define SPDK_UNREACHABLE() __builtin_unreachable()
#else
#define SPDK_UNREACHABLE() abort()
#endif
#ifdef __cplusplus
}
#endif

View File

@ -35,7 +35,7 @@ SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
CFLAGS += $(ENV_CFLAGS)
C_SRCS = nvme_ctrlr_cmd.c nvme_ctrlr.c nvme_ns_cmd.c nvme_ns.c nvme_pcie.c nvme_qpair.c nvme.c nvme_quirks.c
C_SRCS = nvme_ctrlr_cmd.c nvme_ctrlr.c nvme_ns_cmd.c nvme_ns.c nvme_pcie.c nvme_qpair.c nvme.c nvme_quirks.c nvme_transport.c
LIBNAME = nvme
include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk

View File

@ -47,12 +47,12 @@ int32_t spdk_nvme_retry_count;
static struct spdk_nvme_ctrlr *
nvme_attach(void *devhandle)
{
const struct spdk_nvme_transport *transport;
enum spdk_nvme_transport transport;
struct spdk_nvme_ctrlr *ctrlr;
transport = &spdk_nvme_transport_pcie;
transport = SPDK_NVME_TRANSPORT_PCIE;
ctrlr = transport->ctrlr_construct(devhandle);
ctrlr = nvme_transport_ctrlr_construct(transport, devhandle);
return ctrlr;
}

View File

@ -40,36 +40,36 @@ static int nvme_ctrlr_construct_and_submit_aer(struct spdk_nvme_ctrlr *ctrlr,
static int
nvme_ctrlr_get_cc(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cc_register *cc)
{
return ctrlr->transport->ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
&cc->raw);
return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
&cc->raw);
}
static int
nvme_ctrlr_get_csts(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_csts_register *csts)
{
return ctrlr->transport->ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
&csts->raw);
return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, csts.raw),
&csts->raw);
}
int
nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
{
return ctrlr->transport->ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
&cap->raw);
return nvme_transport_ctrlr_get_reg_8(ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
&cap->raw);
}
static int
nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
{
return ctrlr->transport->ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
&vs->raw);
return nvme_transport_ctrlr_get_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, vs.raw),
&vs->raw);
}
static int
nvme_ctrlr_set_cc(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cc_register *cc)
{
return ctrlr->transport->ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
cc->raw);
return nvme_transport_ctrlr_set_reg_4(ctrlr, offsetof(struct spdk_nvme_registers, cc.raw),
cc->raw);
}
void
@ -120,7 +120,7 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
return NULL;
}
qpair = ctrlr->transport->ctrlr_create_io_qpair(ctrlr, qid, qprio);
qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, qprio);
if (qpair == NULL) {
SPDK_ERRLOG("transport->ctrlr_create_io_qpair() failed\n");
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
@ -150,7 +150,7 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
if (ctrlr->transport->ctrlr_delete_io_qpair(ctrlr, qpair)) {
if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) {
pthread_mutex_unlock(&ctrlr->ctrlr_lock);
return -1;
}
@ -169,7 +169,7 @@ nvme_ctrlr_construct_intel_support_log_page_list(struct spdk_nvme_ctrlr *ctrlr,
return;
}
if (ctrlr->transport->ctrlr_get_pci_id(ctrlr, &pci_id)) {
if (nvme_transport_ctrlr_get_pci_id(ctrlr, &pci_id)) {
return;
}
@ -348,7 +348,7 @@ nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
union spdk_nvme_cc_register cc;
int rc;
rc = ctrlr->transport->ctrlr_enable(ctrlr);
rc = nvme_transport_ctrlr_enable(ctrlr);
if (rc != 0) {
SPDK_ERRLOG("transport ctrlr_enable failed\n");
return rc;
@ -455,7 +455,7 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
if (!ctrlr->is_failed) {
/* Reinitialize qpairs */
TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
if (ctrlr->transport->ctrlr_reinit_io_qpair(ctrlr, qpair) != 0) {
if (nvme_transport_ctrlr_reinit_io_qpair(ctrlr, qpair) != 0) {
nvme_ctrlr_fail(ctrlr);
rc = -1;
}
@ -494,7 +494,7 @@ nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
* Use MDTS to ensure our default max_xfer_size doesn't exceed what the
* controller supports.
*/
ctrlr->max_xfer_size = ctrlr->transport->ctrlr_get_max_xfer_size(ctrlr);
ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr);
SPDK_TRACELOG(SPDK_TRACE_NVME, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
if (ctrlr->cdata.mdts > 0) {
ctrlr->max_xfer_size = nvme_min(ctrlr->max_xfer_size,
@ -987,7 +987,7 @@ nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
int
nvme_ctrlr_start(struct spdk_nvme_ctrlr *ctrlr)
{
ctrlr->transport->qpair_reset(ctrlr->adminq);
nvme_transport_qpair_reset(ctrlr->adminq);
nvme_qpair_enable(ctrlr->adminq);
@ -1060,7 +1060,7 @@ nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
nvme_mutex_init_recursive_shared(&ctrlr->ctrlr_lock);
if (ctrlr->transport->ctrlr_get_pci_id(ctrlr, &pci_id) == 0) {
if (nvme_transport_ctrlr_get_pci_id(ctrlr, &pci_id) == 0) {
ctrlr->quirks = nvme_get_quirks(&pci_id);
}
@ -1089,7 +1089,7 @@ nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
nvme_ctrlr_free_processes(ctrlr);
ctrlr->transport->ctrlr_destruct(ctrlr);
nvme_transport_ctrlr_destruct(ctrlr);
}
int

View File

@ -217,38 +217,8 @@ struct nvme_request {
void *user_buffer;
};
struct spdk_nvme_transport {
struct spdk_nvme_ctrlr *(*ctrlr_construct)(void *devhandle);
void (*ctrlr_destruct)(struct spdk_nvme_ctrlr *ctrlr);
int (*ctrlr_enable)(struct spdk_nvme_ctrlr *ctrlr);
int (*ctrlr_get_pci_id)(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_id *pci_id);
int (*ctrlr_set_reg_4)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value);
int (*ctrlr_set_reg_8)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value);
int (*ctrlr_get_reg_4)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value);
int (*ctrlr_get_reg_8)(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value);
uint32_t (*ctrlr_get_max_xfer_size)(struct spdk_nvme_ctrlr *ctrlr);
struct spdk_nvme_qpair *(*ctrlr_create_io_qpair)(struct spdk_nvme_ctrlr *ctrlr,
uint16_t qid, enum spdk_nvme_qprio qprio);
int (*ctrlr_delete_io_qpair)(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
int (*ctrlr_reinit_io_qpair)(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair);
int (*qpair_construct)(struct spdk_nvme_qpair *qpair);
void (*qpair_destroy)(struct spdk_nvme_qpair *qpair);
void (*qpair_enable)(struct spdk_nvme_qpair *qpair);
void (*qpair_disable)(struct spdk_nvme_qpair *qpair);
void (*qpair_reset)(struct spdk_nvme_qpair *qpair);
void (*qpair_fail)(struct spdk_nvme_qpair *qpair);
int (*qpair_submit_request)(struct spdk_nvme_qpair *qpair, struct nvme_request *req);
int32_t (*qpair_process_completions)(struct spdk_nvme_qpair *qpair, uint32_t max_completions);
enum spdk_nvme_transport {
SPDK_NVME_TRANSPORT_PCIE,
};
struct nvme_completion_poll_status {
@ -263,10 +233,10 @@ struct nvme_async_event_request {
};
struct spdk_nvme_qpair {
const struct spdk_nvme_transport *transport;
STAILQ_HEAD(, nvme_request) queued_req;
enum spdk_nvme_transport transport;
uint16_t id;
uint16_t num_entries;
@ -348,11 +318,11 @@ struct spdk_nvme_controller_process {
struct spdk_nvme_ctrlr {
/* Hot data (accessed in I/O path) starts here. */
const struct spdk_nvme_transport *transport;
/** Array of namespaces indexed by nsid - 1 */
struct spdk_nvme_ns *ns;
enum spdk_nvme_transport transport;
uint32_t num_ns;
bool is_resetting;
@ -437,8 +407,6 @@ struct nvme_driver {
extern struct nvme_driver *g_spdk_nvme_driver;
extern const struct spdk_nvme_transport spdk_nvme_transport_pcie;
#define nvme_min(a,b) (((a)<(b))?(a):(b))
#define INTEL_DC_P3X00_DEVID 0x0953
@ -552,4 +520,32 @@ bool nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl);
void nvme_qpair_print_command(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd);
void nvme_qpair_print_completion(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cpl *cpl);
/* Transport specific functions */
#define DECLARE_TRANSPORT(name) \
struct spdk_nvme_ctrlr *nvme_ ## name ## _ctrlr_construct(enum spdk_nvme_transport transport, void *devhandle); \
int nvme_ ## name ## _ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr); \
int nvme_ ## name ## _ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr); \
int nvme_ ## name ## _ctrlr_get_pci_id(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_id *pci_id); \
int nvme_ ## name ## _ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value); \
int nvme_ ## name ## _ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value); \
int nvme_ ## name ## _ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value); \
int nvme_ ## name ## _ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value); \
uint32_t nvme_ ## name ## _ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr); \
struct spdk_nvme_qpair *nvme_ ## name ## _ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, enum spdk_nvme_qprio qprio); \
int nvme_ ## name ## _ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _qpair_construct(struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _qpair_destroy(struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _qpair_enable(struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _qpair_disable(struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _qpair_reset(struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _qpair_fail(struct spdk_nvme_qpair *qpair); \
int nvme_ ## name ## _qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req); \
int32_t nvme_ ## name ## _qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions);
DECLARE_TRANSPORT(transport) /* generic transport dispatch functions */
DECLARE_TRANSPORT(pcie)
#undef DECLARE_TRANSPORT
#endif /* __NVME_INTERNAL_H__ */

View File

@ -191,7 +191,7 @@ int nvme_ns_construct(struct spdk_nvme_ns *ns, uint16_t id,
ns->id = id;
ns->stripe_size = 0;
if (ctrlr->transport->ctrlr_get_pci_id(ctrlr, &pci_id) == 0) {
if (nvme_transport_ctrlr_get_pci_id(ctrlr, &pci_id) == 0) {
if (pci_id.vendor_id == SPDK_PCI_VID_INTEL &&
pci_id.device_id == INTEL_DC_P3X00_DEVID &&
ctrlr->cdata.vs[3] != 0) {

View File

@ -166,18 +166,18 @@ struct nvme_pcie_qpair {
static inline struct nvme_pcie_ctrlr *
nvme_pcie_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
{
assert(ctrlr->transport == &spdk_nvme_transport_pcie);
assert(ctrlr->transport == SPDK_NVME_TRANSPORT_PCIE);
return (struct nvme_pcie_ctrlr *)((uintptr_t)ctrlr - offsetof(struct nvme_pcie_ctrlr, ctrlr));
}
static inline struct nvme_pcie_qpair *
nvme_pcie_qpair(struct spdk_nvme_qpair *qpair)
{
assert(qpair->transport == &spdk_nvme_transport_pcie);
assert(qpair->transport == SPDK_NVME_TRANSPORT_PCIE);
return (struct nvme_pcie_qpair *)((uintptr_t)qpair - offsetof(struct nvme_pcie_qpair, qpair));
}
static int
int
nvme_pcie_ctrlr_get_pci_id(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_id *pci_id)
{
assert(ctrlr != NULL);
@ -196,7 +196,7 @@ nvme_pcie_reg_addr(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset)
return (volatile void *)((uintptr_t)pctrlr->regs + offset);
}
static int
int
nvme_pcie_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
{
assert(offset <= sizeof(struct spdk_nvme_registers) - 4);
@ -204,7 +204,7 @@ nvme_pcie_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32
return 0;
}
static int
int
nvme_pcie_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
{
assert(offset <= sizeof(struct spdk_nvme_registers) - 8);
@ -212,7 +212,7 @@ nvme_pcie_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64
return 0;
}
static int
int
nvme_pcie_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
{
assert(offset <= sizeof(struct spdk_nvme_registers) - 4);
@ -221,7 +221,7 @@ nvme_pcie_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32
return 0;
}
static int
int
nvme_pcie_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
{
assert(offset <= sizeof(struct spdk_nvme_registers) - 8);
@ -265,7 +265,7 @@ nvme_pcie_ctrlr_get_cmbsz(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_cmbsz_
&cmbsz->raw);
}
static uint32_t
uint32_t
nvme_pcie_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
{
return NVME_MAX_XFER_SIZE;
@ -426,7 +426,8 @@ nvme_pcie_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr)
SPDK_NVME_QPRIO_URGENT);
}
static struct spdk_nvme_ctrlr *nvme_pcie_ctrlr_construct(void *devhandle)
struct spdk_nvme_ctrlr *nvme_pcie_ctrlr_construct(enum spdk_nvme_transport transport,
void *devhandle)
{
struct spdk_pci_device *pci_dev = devhandle;
struct nvme_pcie_ctrlr *pctrlr;
@ -440,7 +441,7 @@ static struct spdk_nvme_ctrlr *nvme_pcie_ctrlr_construct(void *devhandle)
return NULL;
}
pctrlr->ctrlr.transport = &spdk_nvme_transport_pcie;
pctrlr->ctrlr.transport = SPDK_NVME_TRANSPORT_PCIE;
pctrlr->ctrlr.devhandle = devhandle;
rc = nvme_pcie_ctrlr_allocate_bars(pctrlr);
@ -488,7 +489,7 @@ static struct spdk_nvme_ctrlr *nvme_pcie_ctrlr_construct(void *devhandle)
return &pctrlr->ctrlr;
}
static int
int
nvme_pcie_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
@ -518,7 +519,7 @@ nvme_pcie_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
return 0;
}
static void
int
nvme_pcie_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
@ -533,6 +534,8 @@ nvme_pcie_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
nvme_pcie_ctrlr_free_bars(pctrlr);
spdk_free(pctrlr);
return 0;
}
static void
@ -543,7 +546,7 @@ nvme_qpair_construct_tracker(struct nvme_tracker *tr, uint16_t cid, uint64_t phy
tr->active = false;
}
static void
int
nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair)
{
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
@ -563,9 +566,11 @@ nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair)
qpair->num_entries * sizeof(struct spdk_nvme_cmd));
memset(pqpair->cpl, 0,
qpair->num_entries * sizeof(struct spdk_nvme_cpl));
return 0;
}
static int
int
nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
{
struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
@ -898,7 +903,7 @@ nvme_pcie_admin_qpair_destroy(struct spdk_nvme_qpair *qpair)
nvme_pcie_admin_qpair_abort_aers(qpair);
}
static void
int
nvme_pcie_qpair_destroy(struct spdk_nvme_qpair *qpair)
{
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
@ -918,6 +923,8 @@ nvme_pcie_qpair_destroy(struct spdk_nvme_qpair *qpair)
spdk_free(pqpair->tr);
pqpair->tr = NULL;
}
return 0;
}
static void
@ -939,7 +946,7 @@ nvme_pcie_io_qpair_enable(struct spdk_nvme_qpair *qpair)
nvme_pcie_qpair_abort_trackers(qpair, 0);
}
static void
int
nvme_pcie_qpair_enable(struct spdk_nvme_qpair *qpair)
{
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
@ -950,6 +957,8 @@ nvme_pcie_qpair_enable(struct spdk_nvme_qpair *qpair)
} else {
nvme_pcie_admin_qpair_enable(qpair);
}
return 0;
}
static void
@ -963,7 +972,7 @@ nvme_pcie_io_qpair_disable(struct spdk_nvme_qpair *qpair)
{
}
static void
int
nvme_pcie_qpair_disable(struct spdk_nvme_qpair *qpair)
{
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
@ -974,13 +983,17 @@ nvme_pcie_qpair_disable(struct spdk_nvme_qpair *qpair)
} else {
nvme_pcie_admin_qpair_disable(qpair);
}
return 0;
}
static void
int
nvme_pcie_qpair_fail(struct spdk_nvme_qpair *qpair)
{
nvme_pcie_qpair_abort_trackers(qpair, 1 /* do not retry */);
return 0;
}
static int
@ -1130,7 +1143,7 @@ _nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
return 0;
}
static struct spdk_nvme_qpair *
struct spdk_nvme_qpair *
nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
enum spdk_nvme_qprio qprio)
{
@ -1172,13 +1185,13 @@ nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
return qpair;
}
static int
int
nvme_pcie_ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
return _nvme_pcie_ctrlr_create_io_qpair(ctrlr, qpair, qpair->id);
}
static int
int
nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
@ -1460,7 +1473,7 @@ nvme_pcie_qpair_check_enabled(struct spdk_nvme_qpair *qpair)
return pqpair->is_enabled;
}
static int
int
nvme_pcie_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
struct nvme_tracker *tr;
@ -1516,7 +1529,7 @@ nvme_pcie_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_reques
return 0;
}
static int32_t
int32_t
nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
{
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
@ -1581,36 +1594,3 @@ nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
return num_completions;
}
const struct spdk_nvme_transport spdk_nvme_transport_pcie = {
.ctrlr_construct = nvme_pcie_ctrlr_construct,
.ctrlr_destruct = nvme_pcie_ctrlr_destruct,
.ctrlr_enable = nvme_pcie_ctrlr_enable,
.ctrlr_get_pci_id = nvme_pcie_ctrlr_get_pci_id,
.ctrlr_set_reg_4 = nvme_pcie_ctrlr_set_reg_4,
.ctrlr_set_reg_8 = nvme_pcie_ctrlr_set_reg_8,
.ctrlr_get_reg_4 = nvme_pcie_ctrlr_get_reg_4,
.ctrlr_get_reg_8 = nvme_pcie_ctrlr_get_reg_8,
.ctrlr_get_max_xfer_size = nvme_pcie_ctrlr_get_max_xfer_size,
.ctrlr_create_io_qpair = nvme_pcie_ctrlr_create_io_qpair,
.ctrlr_delete_io_qpair = nvme_pcie_ctrlr_delete_io_qpair,
.ctrlr_reinit_io_qpair = nvme_pcie_ctrlr_reinit_io_qpair,
.qpair_construct = nvme_pcie_qpair_construct,
.qpair_destroy = nvme_pcie_qpair_destroy,
.qpair_enable = nvme_pcie_qpair_enable,
.qpair_disable = nvme_pcie_qpair_disable,
.qpair_reset = nvme_pcie_qpair_reset,
.qpair_fail = nvme_pcie_qpair_fail,
.qpair_submit_request = nvme_pcie_qpair_submit_request,
.qpair_process_completions = nvme_pcie_qpair_process_completions,
};

View File

@ -330,7 +330,7 @@ nvme_qpair_manual_complete_request(struct spdk_nvme_qpair *qpair,
int32_t
spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
{
return qpair->transport->qpair_process_completions(qpair, max_completions);
return nvme_transport_qpair_process_completions(qpair, max_completions);
}
int
@ -350,7 +350,7 @@ nvme_qpair_construct(struct spdk_nvme_qpair *qpair, uint16_t id,
STAILQ_INIT(&qpair->queued_req);
if (qpair->transport->qpair_construct(qpair)) {
if (nvme_transport_qpair_construct(qpair)) {
SPDK_ERRLOG("qpair_construct() failed\n");
nvme_qpair_destroy(qpair);
return -1;
@ -362,7 +362,7 @@ nvme_qpair_construct(struct spdk_nvme_qpair *qpair, uint16_t id,
void
nvme_qpair_destroy(struct spdk_nvme_qpair *qpair)
{
qpair->transport->qpair_destroy(qpair);
nvme_transport_qpair_destroy(qpair);
}
int
@ -397,7 +397,7 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
return rc;
}
return qpair->transport->qpair_submit_request(qpair, req);
return nvme_transport_qpair_submit_request(qpair, req);
}
static void
@ -422,13 +422,13 @@ nvme_qpair_enable(struct spdk_nvme_qpair *qpair)
_nvme_io_qpair_enable(qpair);
}
qpair->transport->qpair_enable(qpair);
nvme_transport_qpair_enable(qpair);
}
void
nvme_qpair_disable(struct spdk_nvme_qpair *qpair)
{
qpair->transport->qpair_disable(qpair);
nvme_transport_qpair_disable(qpair);
}
void
@ -444,5 +444,5 @@ nvme_qpair_fail(struct spdk_nvme_qpair *qpair)
SPDK_NVME_SC_ABORTED_BY_REQUEST, true);
}
qpair->transport->qpair_fail(qpair);
nvme_transport_qpair_fail(qpair);
}

183
lib/nvme/nvme_transport.c Normal file
View File

@ -0,0 +1,183 @@
/*-
* BSD LICENSE
*
* Copyright (c) Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* NVMe transport abstraction
*/
#include "nvme_internal.h"
#ifdef DEBUG
static __attribute__((noreturn)) void
nvme_transport_unknown(enum spdk_nvme_transport transport)
{
SPDK_ERRLOG("Unknown transport %d\n", (int)transport);
abort();
}
#define TRANSPORT_DEFAULT(transport) default: nvme_transport_unknown(transport);
#else
#define TRANSPORT_DEFAULT(transport)
#endif
#define TRANSPORT_PCIE(func_name, args) case SPDK_NVME_TRANSPORT_PCIE: return nvme_pcie_ ## func_name args;
#define NVME_TRANSPORT_CALL(transport, func_name, args) \
do { \
switch (transport) { \
TRANSPORT_PCIE(func_name, args) \
TRANSPORT_DEFAULT(transport) \
} \
SPDK_UNREACHABLE(); \
} while (0)
struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(enum spdk_nvme_transport transport,
void *devhandle)
{
NVME_TRANSPORT_CALL(transport, ctrlr_construct, (transport, devhandle));
}
int
nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{
NVME_TRANSPORT_CALL(ctrlr->transport, ctrlr_destruct, (ctrlr));
}
int
nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
{
NVME_TRANSPORT_CALL(ctrlr->transport, ctrlr_enable, (ctrlr));
}
int
nvme_transport_ctrlr_get_pci_id(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_id *pci_id)
{
NVME_TRANSPORT_CALL(ctrlr->transport, ctrlr_get_pci_id, (ctrlr, pci_id));
}
int
nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
{
NVME_TRANSPORT_CALL(ctrlr->transport, ctrlr_set_reg_4, (ctrlr, offset, value));
}
int
nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
{
NVME_TRANSPORT_CALL(ctrlr->transport, ctrlr_set_reg_8, (ctrlr, offset, value));
}
int
nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
{
NVME_TRANSPORT_CALL(ctrlr->transport, ctrlr_get_reg_4, (ctrlr, offset, value));
}
int
nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
{
NVME_TRANSPORT_CALL(ctrlr->transport, ctrlr_get_reg_8, (ctrlr, offset, value));
}
uint32_t
nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
{
NVME_TRANSPORT_CALL(ctrlr->transport, ctrlr_get_max_xfer_size, (ctrlr));
}
struct spdk_nvme_qpair *
nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
enum spdk_nvme_qprio qprio)
{
NVME_TRANSPORT_CALL(ctrlr->transport, ctrlr_create_io_qpair, (ctrlr, qid, qprio));
}
int
nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
NVME_TRANSPORT_CALL(ctrlr->transport, ctrlr_delete_io_qpair, (ctrlr, qpair));
}
int
nvme_transport_ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
NVME_TRANSPORT_CALL(ctrlr->transport, ctrlr_reinit_io_qpair, (ctrlr, qpair));
}
int
nvme_transport_qpair_construct(struct spdk_nvme_qpair *qpair)
{
NVME_TRANSPORT_CALL(qpair->transport, qpair_construct, (qpair));
}
int
nvme_transport_qpair_destroy(struct spdk_nvme_qpair *qpair)
{
NVME_TRANSPORT_CALL(qpair->transport, qpair_destroy, (qpair));
}
int
nvme_transport_qpair_enable(struct spdk_nvme_qpair *qpair)
{
NVME_TRANSPORT_CALL(qpair->transport, qpair_enable, (qpair));
}
int
nvme_transport_qpair_disable(struct spdk_nvme_qpair *qpair)
{
NVME_TRANSPORT_CALL(qpair->transport, qpair_disable, (qpair));
}
int
nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
{
NVME_TRANSPORT_CALL(qpair->transport, qpair_reset, (qpair));
}
int
nvme_transport_qpair_fail(struct spdk_nvme_qpair *qpair)
{
NVME_TRANSPORT_CALL(qpair->transport, qpair_fail, (qpair));
}
int
nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
NVME_TRANSPORT_CALL(qpair->transport, qpair_submit_request, (qpair, req));
}
int32_t
nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
{
NVME_TRANSPORT_CALL(qpair->transport, qpair_process_completions, (qpair, max_completions));
}

View File

@ -39,9 +39,6 @@
#include "lib/nvme/unit/test_env.c"
const struct spdk_nvme_transport spdk_nvme_transport_pcie = {
};
int
spdk_pci_enumerate(enum spdk_pci_device_type type,
spdk_pci_enum_cb enum_cb,
@ -60,6 +57,12 @@ spdk_pci_device_get_id(struct spdk_pci_device *pci_dev)
return pci_id;
}
struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(enum spdk_nvme_transport transport,
void *devhandle)
{
return NULL;
}
void
nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{

View File

@ -57,24 +57,26 @@ struct spdk_nvme_registers g_ut_nvme_regs = {};
__thread int nvme_thread_ioq_index = -1;
static struct spdk_nvme_ctrlr *ut_ctrlr_construct(void *devhandle)
struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(enum spdk_nvme_transport transport,
void *devhandle)
{
return NULL;
}
static void
ut_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{
}
static int
ut_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
int
nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{
return 0;
}
static int
ut_ctrlr_get_pci_id(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_id *pci_id)
int
nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
{
return 0;
}
int
nvme_transport_ctrlr_get_pci_id(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_id *pci_id)
{
if (ctrlr == NULL || pci_id == NULL) {
return -EINVAL;
@ -88,46 +90,47 @@ ut_ctrlr_get_pci_id(struct spdk_nvme_ctrlr *ctrlr, struct spdk_pci_id *pci_id)
return 0;
}
static int
ut_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
int
nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
{
SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
*(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
return 0;
}
static int
ut_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
int
nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
{
SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
*(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
return 0;
}
static int
ut_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
int
nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
{
SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
*value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
return 0;
}
static int
ut_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
int
nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
{
SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
*value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
return 0;
}
static uint32_t
ut_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
uint32_t
nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
{
return UINT32_MAX;
}
static struct spdk_nvme_qpair *
ut_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, enum spdk_nvme_qprio qprio)
struct spdk_nvme_qpair *
nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
enum spdk_nvme_qprio qprio)
{
struct spdk_nvme_qpair *qpair;
@ -141,39 +144,24 @@ ut_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, enum spdk_
return qpair;
}
static int
ut_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
int
nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
free(qpair);
return 0;
}
static void
ut_qpair_reset(struct spdk_nvme_qpair *qpair)
int
nvme_transport_ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
{
return 0;
}
static const struct spdk_nvme_transport nvme_ctrlr_ut_transport = {
.ctrlr_construct = ut_ctrlr_construct,
.ctrlr_destruct = ut_ctrlr_destruct,
.ctrlr_enable = ut_ctrlr_enable,
.ctrlr_get_pci_id = ut_ctrlr_get_pci_id,
.ctrlr_set_reg_4 = ut_ctrlr_set_reg_4,
.ctrlr_set_reg_8 = ut_ctrlr_set_reg_8,
.ctrlr_get_reg_4 = ut_ctrlr_get_reg_4,
.ctrlr_get_reg_8 = ut_ctrlr_get_reg_8,
.ctrlr_get_max_xfer_size = ut_ctrlr_get_max_xfer_size,
.ctrlr_create_io_qpair = ut_ctrlr_create_io_qpair,
.ctrlr_delete_io_qpair = ut_ctrlr_delete_io_qpair,
.qpair_reset = ut_qpair_reset,
};
int
nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
{
return 0;
}
int nvme_qpair_construct(struct spdk_nvme_qpair *qpair, uint16_t id,
uint16_t num_entries,
@ -405,7 +393,6 @@ test_nvme_ctrlr_init_en_1_rdy_0(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
ctrlr.transport = &nvme_ctrlr_ut_transport;
memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
/*
@ -454,7 +441,6 @@ test_nvme_ctrlr_init_en_1_rdy_1(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
ctrlr.transport = &nvme_ctrlr_ut_transport;
memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
/*
@ -496,7 +482,6 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
ctrlr.transport = &nvme_ctrlr_ut_transport;
memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
/*
@ -640,7 +625,6 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
ctrlr.transport = &nvme_ctrlr_ut_transport;
memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
/*
@ -785,7 +769,6 @@ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
ctrlr.transport = &nvme_ctrlr_ut_transport;
memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
/*
@ -931,7 +914,6 @@ test_nvme_ctrlr_init_en_0_rdy_0(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
ctrlr.transport = &nvme_ctrlr_ut_transport;
memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
/*
@ -964,7 +946,6 @@ test_nvme_ctrlr_init_en_0_rdy_1(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
ctrlr.transport = &nvme_ctrlr_ut_transport;
memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
/*
@ -1005,7 +986,6 @@ setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
uint32_t i;
CU_ASSERT_FATAL(pthread_mutex_init(&ctrlr->ctrlr_lock, NULL) == 0);
ctrlr->transport = &nvme_ctrlr_ut_transport;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
@ -1195,11 +1175,9 @@ test_nvme_ctrlr_construct_intel_support_log_page_list(void)
struct spdk_nvme_intel_log_page_directory payload = {};
struct spdk_pci_id pci_id;
ctrlr.transport = &nvme_ctrlr_ut_transport;
/* set a invalid vendor id */
g_pci_vendor_id = 0xFFFF;
ut_ctrlr_get_pci_id(&ctrlr, &pci_id);
nvme_transport_ctrlr_get_pci_id(&ctrlr, &pci_id);
ctrlr.quirks = nvme_get_quirks(&pci_id);
nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
@ -1209,7 +1187,7 @@ test_nvme_ctrlr_construct_intel_support_log_page_list(void)
/* set valid vendor id and log page directory*/
g_pci_vendor_id = SPDK_PCI_VID_INTEL;
payload.temperature_statistics_log_len = 1;
ut_ctrlr_get_pci_id(&ctrlr, &pci_id);
nvme_transport_ctrlr_get_pci_id(&ctrlr, &pci_id);
ctrlr.quirks = nvme_get_quirks(&pci_id);
memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
@ -1230,7 +1208,7 @@ test_nvme_ctrlr_construct_intel_support_log_page_list(void)
g_pci_device_id = 0x0953;
g_pci_subvendor_id = SPDK_PCI_VID_INTEL;
g_pci_subdevice_id = 0x3702;
ut_ctrlr_get_pci_id(&ctrlr, &pci_id);
nvme_transport_ctrlr_get_pci_id(&ctrlr, &pci_id);
ctrlr.quirks = nvme_get_quirks(&pci_id);
memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));

View File

@ -39,10 +39,6 @@
struct nvme_request *g_request = NULL;
const struct spdk_nvme_transport spdk_nvme_transport_pcie = {
};
int
spdk_pci_enumerate(enum spdk_pci_device_type type,
spdk_pci_enum_cb enum_cb,
@ -60,6 +56,12 @@ static int nvme_request_next_sge(void *cb_arg, uint64_t *address, uint32_t *leng
return 0;
}
struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(enum spdk_nvme_transport transport,
void *devhandle)
{
return NULL;
}
void
nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
{

View File

@ -183,45 +183,55 @@ nvme_request_remove_child(struct nvme_request *parent,
TAILQ_REMOVE(&parent->children, child, child_tailq);
}
static int
ut_qpair_construct(struct spdk_nvme_qpair *qpair)
int
nvme_transport_qpair_construct(struct spdk_nvme_qpair *qpair)
{
return 0;
}
static void
ut_qpair_destroy(struct spdk_nvme_qpair *qpair)
int
nvme_transport_qpair_destroy(struct spdk_nvme_qpair *qpair)
{
return 0;
}
static int
ut_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
int
nvme_transport_qpair_enable(struct spdk_nvme_qpair *qpair)
{
return 0;
}
int
nvme_transport_qpair_disable(struct spdk_nvme_qpair *qpair)
{
return 0;
}
int
nvme_transport_qpair_fail(struct spdk_nvme_qpair *qpair)
{
return 0;
}
int
nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
// TODO
return 0;
}
static int32_t
ut_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
int32_t
nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
{
// TODO
return 0;
}
static const struct spdk_nvme_transport nvme_qpair_ut_transport = {
.qpair_construct = ut_qpair_construct,
.qpair_destroy = ut_qpair_destroy,
.qpair_submit_request = ut_qpair_submit_request,
.qpair_process_completions = ut_qpair_process_completions,
};
static void
prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
struct spdk_nvme_ctrlr *ctrlr)
{
memset(ctrlr, 0, sizeof(*ctrlr));
ctrlr->transport = &nvme_qpair_ut_transport;
ctrlr->free_io_qids = NULL;
TAILQ_INIT(&ctrlr->active_io_qpairs);
TAILQ_INIT(&ctrlr->active_procs);