2021-01-15 11:44:50 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright (c) Intel Corporation. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NVMe over PCIe common library
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "spdk/stdinc.h"
|
|
|
|
#include "spdk/likely.h"
|
|
|
|
#include "spdk/string.h"
|
|
|
|
#include "nvme_internal.h"
|
|
|
|
#include "nvme_pcie_internal.h"
|
|
|
|
|
2021-01-19 10:25:24 +00:00
|
|
|
static uint64_t
|
|
|
|
nvme_pcie_vtophys(struct spdk_nvme_ctrlr *ctrlr, const void *buf)
|
|
|
|
{
|
|
|
|
if (spdk_likely(ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE)) {
|
|
|
|
return spdk_vtophys(buf, NULL);
|
|
|
|
} else {
|
|
|
|
/* vfio-user address translation with IOVA=VA mode */
|
|
|
|
return (uint64_t)(uintptr_t)buf;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-15 11:44:50 +00:00
|
|
|
int
|
|
|
|
nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair)
|
|
|
|
{
|
|
|
|
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
/* all head/tail vals are set to 0 */
|
|
|
|
pqpair->last_sq_tail = pqpair->sq_tail = pqpair->sq_head = pqpair->cq_head = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First time through the completion queue, HW will set phase
|
|
|
|
* bit on completions to 1. So set this to 1 here, indicating
|
|
|
|
* we're looking for a 1 to know which entries have completed.
|
|
|
|
* we'll toggle the bit each time when the completion queue
|
|
|
|
* rolls over.
|
|
|
|
*/
|
|
|
|
pqpair->flags.phase = 1;
|
|
|
|
for (i = 0; i < pqpair->num_entries; i++) {
|
|
|
|
pqpair->cpl[i].status.p = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nvme_qpair_construct_tracker(struct nvme_tracker *tr, uint16_t cid, uint64_t phys_addr)
|
|
|
|
{
|
|
|
|
tr->prp_sgl_bus_addr = phys_addr + offsetof(struct nvme_tracker, u.prp);
|
|
|
|
tr->cid = cid;
|
|
|
|
tr->req = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
nvme_pcie_ctrlr_alloc_cmb(struct spdk_nvme_ctrlr *ctrlr, uint64_t size, uint64_t alignment,
|
|
|
|
uint64_t *phys_addr)
|
|
|
|
{
|
|
|
|
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
|
|
|
|
uintptr_t addr;
|
|
|
|
|
|
|
|
if (pctrlr->cmb.mem_register_addr != NULL) {
|
|
|
|
/* BAR is mapped for data */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = (uintptr_t)pctrlr->cmb.bar_va + pctrlr->cmb.current_offset;
|
|
|
|
addr = (addr + (alignment - 1)) & ~(alignment - 1);
|
|
|
|
|
|
|
|
/* CMB may only consume part of the BAR, calculate accordingly */
|
|
|
|
if (addr + size > ((uintptr_t)pctrlr->cmb.bar_va + pctrlr->cmb.size)) {
|
|
|
|
SPDK_ERRLOG("Tried to allocate past valid CMB range!\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
*phys_addr = pctrlr->cmb.bar_pa + addr - (uintptr_t)pctrlr->cmb.bar_va;
|
|
|
|
|
|
|
|
pctrlr->cmb.current_offset = (addr + size) - (uintptr_t)pctrlr->cmb.bar_va;
|
|
|
|
|
|
|
|
return (void *)addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair,
|
|
|
|
const struct spdk_nvme_io_qpair_opts *opts)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
|
|
|
|
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
|
|
|
|
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
|
|
|
|
struct nvme_tracker *tr;
|
|
|
|
uint16_t i;
|
|
|
|
uint16_t num_trackers;
|
|
|
|
size_t page_align = sysconf(_SC_PAGESIZE);
|
|
|
|
size_t queue_align, queue_len;
|
|
|
|
uint32_t flags = SPDK_MALLOC_DMA;
|
|
|
|
uint64_t sq_paddr = 0;
|
|
|
|
uint64_t cq_paddr = 0;
|
|
|
|
|
|
|
|
if (opts) {
|
|
|
|
pqpair->sq_vaddr = opts->sq.vaddr;
|
|
|
|
pqpair->cq_vaddr = opts->cq.vaddr;
|
|
|
|
sq_paddr = opts->sq.paddr;
|
|
|
|
cq_paddr = opts->cq.paddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
pqpair->retry_count = ctrlr->opts.transport_retry_count;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Limit the maximum number of completions to return per call to prevent wraparound,
|
|
|
|
* and calculate how many trackers can be submitted at once without overflowing the
|
|
|
|
* completion queue.
|
|
|
|
*/
|
|
|
|
pqpair->max_completions_cap = pqpair->num_entries / 4;
|
|
|
|
pqpair->max_completions_cap = spdk_max(pqpair->max_completions_cap, NVME_MIN_COMPLETIONS);
|
|
|
|
pqpair->max_completions_cap = spdk_min(pqpair->max_completions_cap, NVME_MAX_COMPLETIONS);
|
|
|
|
num_trackers = pqpair->num_entries - pqpair->max_completions_cap;
|
|
|
|
|
|
|
|
SPDK_INFOLOG(nvme, "max_completions_cap = %" PRIu16 " num_trackers = %" PRIu16 "\n",
|
|
|
|
pqpair->max_completions_cap, num_trackers);
|
|
|
|
|
|
|
|
assert(num_trackers != 0);
|
|
|
|
|
|
|
|
pqpair->sq_in_cmb = false;
|
|
|
|
|
|
|
|
if (nvme_qpair_is_admin_queue(&pqpair->qpair)) {
|
|
|
|
flags |= SPDK_MALLOC_SHARE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* cmd and cpl rings must be aligned on page size boundaries. */
|
|
|
|
if (ctrlr->opts.use_cmb_sqs) {
|
|
|
|
pqpair->cmd = nvme_pcie_ctrlr_alloc_cmb(ctrlr, pqpair->num_entries * sizeof(struct spdk_nvme_cmd),
|
|
|
|
page_align, &pqpair->cmd_bus_addr);
|
|
|
|
if (pqpair->cmd != NULL) {
|
|
|
|
pqpair->sq_in_cmb = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pqpair->sq_in_cmb == false) {
|
|
|
|
if (pqpair->sq_vaddr) {
|
|
|
|
pqpair->cmd = pqpair->sq_vaddr;
|
|
|
|
} else {
|
|
|
|
/* To ensure physical address contiguity we make each ring occupy
|
|
|
|
* a single hugepage only. See MAX_IO_QUEUE_ENTRIES.
|
|
|
|
*/
|
|
|
|
queue_len = pqpair->num_entries * sizeof(struct spdk_nvme_cmd);
|
|
|
|
queue_align = spdk_max(spdk_align32pow2(queue_len), page_align);
|
|
|
|
pqpair->cmd = spdk_zmalloc(queue_len, queue_align, NULL, SPDK_ENV_SOCKET_ID_ANY, flags);
|
|
|
|
if (pqpair->cmd == NULL) {
|
|
|
|
SPDK_ERRLOG("alloc qpair_cmd failed\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (sq_paddr) {
|
|
|
|
assert(pqpair->sq_vaddr != NULL);
|
|
|
|
pqpair->cmd_bus_addr = sq_paddr;
|
|
|
|
} else {
|
2021-01-19 10:25:24 +00:00
|
|
|
pqpair->cmd_bus_addr = nvme_pcie_vtophys(ctrlr, pqpair->cmd);
|
2021-01-15 11:44:50 +00:00
|
|
|
if (pqpair->cmd_bus_addr == SPDK_VTOPHYS_ERROR) {
|
|
|
|
SPDK_ERRLOG("spdk_vtophys(pqpair->cmd) failed\n");
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pqpair->cq_vaddr) {
|
|
|
|
pqpair->cpl = pqpair->cq_vaddr;
|
|
|
|
} else {
|
|
|
|
queue_len = pqpair->num_entries * sizeof(struct spdk_nvme_cpl);
|
|
|
|
queue_align = spdk_max(spdk_align32pow2(queue_len), page_align);
|
|
|
|
pqpair->cpl = spdk_zmalloc(queue_len, queue_align, NULL, SPDK_ENV_SOCKET_ID_ANY, flags);
|
|
|
|
if (pqpair->cpl == NULL) {
|
|
|
|
SPDK_ERRLOG("alloc qpair_cpl failed\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (cq_paddr) {
|
|
|
|
assert(pqpair->cq_vaddr != NULL);
|
|
|
|
pqpair->cpl_bus_addr = cq_paddr;
|
|
|
|
} else {
|
2021-01-19 10:25:24 +00:00
|
|
|
pqpair->cpl_bus_addr = nvme_pcie_vtophys(ctrlr, pqpair->cpl);
|
2021-01-15 11:44:50 +00:00
|
|
|
if (pqpair->cpl_bus_addr == SPDK_VTOPHYS_ERROR) {
|
|
|
|
SPDK_ERRLOG("spdk_vtophys(pqpair->cpl) failed\n");
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-18 15:12:13 +00:00
|
|
|
pqpair->sq_tdbl = pctrlr->doorbell_base + (2 * qpair->id + 0) * pctrlr->doorbell_stride_u32;
|
|
|
|
pqpair->cq_hdbl = pctrlr->doorbell_base + (2 * qpair->id + 1) * pctrlr->doorbell_stride_u32;
|
2021-01-15 11:44:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reserve space for all of the trackers in a single allocation.
|
|
|
|
* struct nvme_tracker must be padded so that its size is already a power of 2.
|
|
|
|
* This ensures the PRP list embedded in the nvme_tracker object will not span a
|
|
|
|
* 4KB boundary, while allowing access to trackers in tr[] via normal array indexing.
|
|
|
|
*/
|
|
|
|
pqpair->tr = spdk_zmalloc(num_trackers * sizeof(*tr), sizeof(*tr), NULL,
|
|
|
|
SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
|
|
|
|
if (pqpair->tr == NULL) {
|
|
|
|
SPDK_ERRLOG("nvme_tr failed\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_INIT(&pqpair->free_tr);
|
|
|
|
TAILQ_INIT(&pqpair->outstanding_tr);
|
|
|
|
|
|
|
|
for (i = 0; i < num_trackers; i++) {
|
|
|
|
tr = &pqpair->tr[i];
|
2021-01-19 10:25:24 +00:00
|
|
|
nvme_qpair_construct_tracker(tr, i, nvme_pcie_vtophys(ctrlr, tr));
|
2021-01-15 11:44:50 +00:00
|
|
|
TAILQ_INSERT_HEAD(&pqpair->free_tr, tr, tq_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
nvme_pcie_qpair_reset(qpair);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_pcie_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t num_entries)
|
|
|
|
{
|
|
|
|
struct nvme_pcie_qpair *pqpair;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
pqpair = spdk_zmalloc(sizeof(*pqpair), 64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
|
|
|
|
if (pqpair == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
pqpair->num_entries = num_entries;
|
|
|
|
pqpair->flags.delay_cmd_submit = 0;
|
|
|
|
|
|
|
|
ctrlr->adminq = &pqpair->qpair;
|
|
|
|
|
|
|
|
rc = nvme_qpair_init(ctrlr->adminq,
|
|
|
|
0, /* qpair ID */
|
|
|
|
ctrlr,
|
|
|
|
SPDK_NVME_QPRIO_URGENT,
|
|
|
|
num_entries);
|
|
|
|
if (rc != 0) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nvme_pcie_qpair_construct(ctrlr->adminq, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Note: the ctrlr_lock must be held when calling this function.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
nvme_pcie_qpair_insert_pending_admin_request(struct spdk_nvme_qpair *qpair,
|
|
|
|
struct nvme_request *req, struct spdk_nvme_cpl *cpl)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
|
|
|
|
struct nvme_request *active_req = req;
|
|
|
|
struct spdk_nvme_ctrlr_process *active_proc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The admin request is from another process. Move to the per
|
|
|
|
* process list for that process to handle it later.
|
|
|
|
*/
|
|
|
|
assert(nvme_qpair_is_admin_queue(qpair));
|
|
|
|
assert(active_req->pid != getpid());
|
|
|
|
|
|
|
|
active_proc = nvme_ctrlr_get_process(ctrlr, active_req->pid);
|
|
|
|
if (active_proc) {
|
|
|
|
/* Save the original completion information */
|
|
|
|
memcpy(&active_req->cpl, cpl, sizeof(*cpl));
|
|
|
|
STAILQ_INSERT_TAIL(&active_proc->active_reqs, active_req, stailq);
|
|
|
|
} else {
|
|
|
|
SPDK_ERRLOG("The owning process (pid %d) is not found. Dropping the request.\n",
|
|
|
|
active_req->pid);
|
|
|
|
|
|
|
|
nvme_free_request(active_req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Note: the ctrlr_lock must be held when calling this function.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
nvme_pcie_qpair_complete_pending_admin_request(struct spdk_nvme_qpair *qpair)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
|
|
|
|
struct nvme_request *req, *tmp_req;
|
|
|
|
pid_t pid = getpid();
|
|
|
|
struct spdk_nvme_ctrlr_process *proc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether there is any pending admin request from
|
|
|
|
* other active processes.
|
|
|
|
*/
|
|
|
|
assert(nvme_qpair_is_admin_queue(qpair));
|
|
|
|
|
|
|
|
proc = nvme_ctrlr_get_current_process(ctrlr);
|
|
|
|
if (!proc) {
|
|
|
|
SPDK_ERRLOG("the active process (pid %d) is not found for this controller.\n", pid);
|
|
|
|
assert(proc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) {
|
|
|
|
STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq);
|
|
|
|
|
|
|
|
assert(req->pid == pid);
|
|
|
|
|
|
|
|
nvme_complete_request(req->cb_fn, req->cb_arg, qpair, req, &req->cpl);
|
|
|
|
nvme_free_request(req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_pcie_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr,
|
|
|
|
struct spdk_nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn,
|
|
|
|
void *cb_arg)
|
|
|
|
{
|
|
|
|
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(io_que);
|
|
|
|
struct nvme_request *req;
|
|
|
|
struct spdk_nvme_cmd *cmd;
|
|
|
|
|
|
|
|
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
|
|
|
if (req == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd = &req->cmd;
|
|
|
|
cmd->opc = SPDK_NVME_OPC_CREATE_IO_CQ;
|
|
|
|
|
|
|
|
cmd->cdw10_bits.create_io_q.qid = io_que->id;
|
|
|
|
cmd->cdw10_bits.create_io_q.qsize = pqpair->num_entries - 1;
|
|
|
|
|
|
|
|
cmd->cdw11_bits.create_io_cq.pc = 1;
|
|
|
|
cmd->dptr.prp.prp1 = pqpair->cpl_bus_addr;
|
|
|
|
|
|
|
|
return nvme_ctrlr_submit_admin_request(ctrlr, req);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_pcie_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
|
|
|
|
struct spdk_nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(io_que);
|
|
|
|
struct nvme_request *req;
|
|
|
|
struct spdk_nvme_cmd *cmd;
|
|
|
|
|
|
|
|
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
|
|
|
if (req == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd = &req->cmd;
|
|
|
|
cmd->opc = SPDK_NVME_OPC_CREATE_IO_SQ;
|
|
|
|
|
|
|
|
cmd->cdw10_bits.create_io_q.qid = io_que->id;
|
|
|
|
cmd->cdw10_bits.create_io_q.qsize = pqpair->num_entries - 1;
|
|
|
|
cmd->cdw11_bits.create_io_sq.pc = 1;
|
|
|
|
cmd->cdw11_bits.create_io_sq.qprio = io_que->qprio;
|
|
|
|
cmd->cdw11_bits.create_io_sq.cqid = io_que->id;
|
|
|
|
cmd->dptr.prp.prp1 = pqpair->cmd_bus_addr;
|
|
|
|
|
|
|
|
return nvme_ctrlr_submit_admin_request(ctrlr, req);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_pcie_ctrlr_cmd_delete_io_cq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
|
|
|
|
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct nvme_request *req;
|
|
|
|
struct spdk_nvme_cmd *cmd;
|
|
|
|
|
|
|
|
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
|
|
|
if (req == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd = &req->cmd;
|
|
|
|
cmd->opc = SPDK_NVME_OPC_DELETE_IO_CQ;
|
|
|
|
cmd->cdw10_bits.delete_io_q.qid = qpair->id;
|
|
|
|
|
|
|
|
return nvme_ctrlr_submit_admin_request(ctrlr, req);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_pcie_ctrlr_cmd_delete_io_sq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
|
|
|
|
spdk_nvme_cmd_cb cb_fn, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct nvme_request *req;
|
|
|
|
struct spdk_nvme_cmd *cmd;
|
|
|
|
|
|
|
|
req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
|
|
|
|
if (req == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd = &req->cmd;
|
|
|
|
cmd->opc = SPDK_NVME_OPC_DELETE_IO_SQ;
|
|
|
|
cmd->cdw10_bits.delete_io_q.qid = qpair->id;
|
|
|
|
|
|
|
|
return nvme_ctrlr_submit_admin_request(ctrlr, req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
_nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
|
|
|
|
uint16_t qid)
|
|
|
|
{
|
|
|
|
struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr);
|
|
|
|
struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair);
|
|
|
|
struct nvme_completion_poll_status *status;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
status = calloc(1, sizeof(*status));
|
|
|
|
if (!status) {
|
|
|
|
SPDK_ERRLOG("Failed to allocate status tracker\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = nvme_pcie_ctrlr_cmd_create_io_cq(ctrlr, qpair, nvme_completion_poll_cb, status);
|
|
|
|
if (rc != 0) {
|
|
|
|
free(status);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nvme_wait_for_completion(ctrlr->adminq, status)) {
|
|
|
|
SPDK_ERRLOG("nvme_create_io_cq failed!\n");
|
|
|
|
if (!status->timed_out) {
|
|
|
|
free(status);
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(status, 0, sizeof(*status));
|
|
|
|
rc = nvme_pcie_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, nvme_completion_poll_cb, status);
|
|
|
|
if (rc != 0) {
|
|
|
|
free(status);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nvme_wait_for_completion(ctrlr->adminq, status)) {
|
|
|
|
SPDK_ERRLOG("nvme_create_io_sq failed!\n");
|
|
|
|
if (status->timed_out) {
|
|
|
|
/* Request is still queued, the memory will be freed in a completion callback.
|
|
|
|
allocate a new request */
|
|
|
|
status = calloc(1, sizeof(*status));
|
|
|
|
if (!status) {
|
|
|
|
SPDK_ERRLOG("Failed to allocate status tracker\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(status, 0, sizeof(*status));
|
|
|
|
/* Attempt to delete the completion queue */
|
|
|
|
rc = nvme_pcie_ctrlr_cmd_delete_io_cq(qpair->ctrlr, qpair, nvme_completion_poll_cb, status);
|
|
|
|
if (rc != 0) {
|
|
|
|
/* The originall or newly allocated status structure can be freed since
|
|
|
|
* the corresponding request has been completed of failed to submit */
|
|
|
|
free(status);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
nvme_wait_for_completion(ctrlr->adminq, status);
|
|
|
|
if (!status->timed_out) {
|
|
|
|
/* status can be freed regardless of nvme_wait_for_completion return value */
|
|
|
|
free(status);
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrlr->shadow_doorbell) {
|
|
|
|
pqpair->shadow_doorbell.sq_tdbl = ctrlr->shadow_doorbell + (2 * qpair->id + 0) *
|
|
|
|
pctrlr->doorbell_stride_u32;
|
|
|
|
pqpair->shadow_doorbell.cq_hdbl = ctrlr->shadow_doorbell + (2 * qpair->id + 1) *
|
|
|
|
pctrlr->doorbell_stride_u32;
|
|
|
|
pqpair->shadow_doorbell.sq_eventidx = ctrlr->eventidx + (2 * qpair->id + 0) *
|
|
|
|
pctrlr->doorbell_stride_u32;
|
|
|
|
pqpair->shadow_doorbell.cq_eventidx = ctrlr->eventidx + (2 * qpair->id + 1) *
|
|
|
|
pctrlr->doorbell_stride_u32;
|
|
|
|
pqpair->flags.has_shadow_doorbell = 1;
|
|
|
|
} else {
|
|
|
|
pqpair->flags.has_shadow_doorbell = 0;
|
|
|
|
}
|
|
|
|
nvme_pcie_qpair_reset(qpair);
|
|
|
|
free(status);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_pcie_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
|
|
|
|
{
|
|
|
|
if (nvme_qpair_is_admin_queue(qpair)) {
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
return _nvme_pcie_ctrlr_create_io_qpair(ctrlr, qpair, qpair->id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nvme_pcie_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
struct spdk_nvme_transport_poll_group *
|
|
|
|
nvme_pcie_poll_group_create(void)
|
|
|
|
{
|
|
|
|
struct nvme_pcie_poll_group *group = calloc(1, sizeof(*group));
|
|
|
|
|
|
|
|
if (group == NULL) {
|
|
|
|
SPDK_ERRLOG("Unable to allocate poll group.\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return &group->group;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_pcie_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_pcie_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_pcie_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
|
|
|
|
struct spdk_nvme_qpair *qpair)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_pcie_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
|
|
|
|
struct spdk_nvme_qpair *qpair)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t
|
|
|
|
nvme_pcie_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
|
|
|
|
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
|
|
|
|
{
|
|
|
|
struct spdk_nvme_qpair *qpair, *tmp_qpair;
|
|
|
|
int32_t local_completions = 0;
|
|
|
|
int64_t total_completions = 0;
|
|
|
|
|
|
|
|
STAILQ_FOREACH_SAFE(qpair, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_qpair) {
|
|
|
|
disconnected_qpair_cb(qpair, tgroup->group->ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
STAILQ_FOREACH_SAFE(qpair, &tgroup->connected_qpairs, poll_group_stailq, tmp_qpair) {
|
|
|
|
local_completions = spdk_nvme_qpair_process_completions(qpair, completions_per_qpair);
|
|
|
|
if (local_completions < 0) {
|
|
|
|
disconnected_qpair_cb(qpair, tgroup->group->ctx);
|
|
|
|
local_completions = 0;
|
|
|
|
}
|
|
|
|
total_completions += local_completions;
|
|
|
|
}
|
|
|
|
|
|
|
|
return total_completions;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
nvme_pcie_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
|
|
|
|
{
|
|
|
|
if (!STAILQ_EMPTY(&tgroup->connected_qpairs) || !STAILQ_EMPTY(&tgroup->disconnected_qpairs)) {
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
free(tgroup);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|