Create a utility and enable WRR arbitration mechanism

Change-Id: I2194863ae812ece72c17b78e003ccf7895b8a812
Signed-off-by: GangCao <gang.cao@intel.com>
This commit is contained in:
Ziye Yang 2016-06-14 15:19:38 +08:00 committed by Daniel Verkamp
parent 389fcba814
commit 7991eb1957
9 changed files with 1797 additions and 9 deletions

View File

@ -34,7 +34,7 @@
SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..)
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
DIRS-y += hello_world identify perf reserve nvme_manage
DIRS-y += hello_world identify perf reserve nvme_manage arbitration
DIRS-$(CONFIG_FIO_PLUGIN) += fio_plugin

1
examples/nvme/arbitration/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
arbitration

View File

@ -0,0 +1,57 @@
#
# BSD LICENSE
#
# Copyright (c) Intel Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SPDK_ROOT_DIR := $(CURDIR)/../../..
include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
APP = arbitration
C_SRCS := arbitration.c
CFLAGS += -I. $(DPDK_INC)
SPDK_LIBS += $(SPDK_ROOT_DIR)/lib/nvme/libspdk_nvme.a \
$(SPDK_ROOT_DIR)/lib/util/libspdk_util.a \
$(SPDK_ROOT_DIR)/lib/memory/libspdk_memory.a
LIBS += $(SPDK_LIBS) $(PCIACCESS_LIB) -lpthread $(DPDK_LIB) -lrt
all : $(APP)
$(APP) : $(OBJS) $(SPDK_LIBS)
$(LINK_C)
clean :
$(CLEAN_C) $(APP)
include $(SPDK_ROOT_DIR)/mk/spdk.deps.mk

File diff suppressed because it is too large Load Diff

View File

@ -72,6 +72,10 @@ struct spdk_nvme_ctrlr_opts {
* Enable submission queue in controller memory buffer
*/
bool use_cmb_sqs;
/**
* Type of arbitration mechanism
*/
enum spdk_nvme_cc_ams arb_mechanism;
};
/**

View File

@ -329,6 +329,28 @@ enum spdk_nvme_qprio {
SPDK_NVME_QPRIO_LOW = 0x3
};
/**
* Optional Arbitration Mechanism Supported by the controller.
*
* Two bits for CAP.AMS (18:17) field are set to '1' when the controller supports.
* There is no bit for AMS_RR where all controllers support and set to 0x0 by default.
*/
enum spdk_nvme_cap_ams {
SPDK_NVME_CAP_AMS_WRR = 0x1, /**< weighted round robin */
SPDK_NVME_CAP_AMS_VS = 0x2, /**< vendor specific */
};
/**
* Arbitration Mechanism Selected to the controller.
*
* Value 0x2 to 0x6 is reserved.
*/
enum spdk_nvme_cc_ams {
SPDK_NVME_CC_AMS_RR = 0x0, /**< default round robin */
SPDK_NVME_CC_AMS_WRR = 0x1, /**< weighted round robin */
SPDK_NVME_CC_AMS_VS = 0x7, /**< vendor specific */
};
struct spdk_nvme_cmd {
/* dword 0 */
uint16_t opc : 8; /* opcode */

View File

@ -43,6 +43,7 @@ spdk_nvme_ctrlr_opts_set_defaults(struct spdk_nvme_ctrlr_opts *opts)
{
opts->num_io_queues = DEFAULT_MAX_IO_QUEUES;
opts->use_cmb_sqs = false;
opts->arb_mechanism = SPDK_NVME_CC_AMS_RR;
}
static int
@ -98,12 +99,25 @@ spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
enum spdk_nvme_qprio qprio)
{
struct spdk_nvme_qpair *qpair;
union spdk_nvme_cc_register cc;
cc.raw = nvme_mmio_read_4(ctrlr, cc.raw);
/* Only the low 2 bits (values 0, 1, 2, 3) of QPRIO are valid. */
if ((qprio & 3) != qprio) {
return NULL;
}
/*
* Only value SPDK_NVME_QPRIO_URGENT(0) is valid for the
* default round robin arbitration method.
*/
if ((cc.bits.ams == SPDK_NVME_CC_AMS_RR) && (qprio != SPDK_NVME_QPRIO_URGENT)) {
nvme_printf(ctrlr,
"invalid queue priority for default round robin arbitration method\n");
return NULL;
}
nvme_mutex_lock(&ctrlr->ctrlr_lock);
/*
@ -436,6 +450,7 @@ nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
{
union spdk_nvme_cc_register cc;
union spdk_nvme_aqa_register aqa;
union spdk_nvme_cap_lo_register cap_lo;
cc.raw = nvme_mmio_read_4(ctrlr, cc.raw);
@ -455,7 +470,6 @@ nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
cc.bits.en = 1;
cc.bits.css = 0;
cc.bits.ams = 0;
cc.bits.shn = 0;
cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
@ -463,6 +477,27 @@ nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
/* Page size is 2 ^ (12 + mps). */
cc.bits.mps = nvme_u32log2(PAGE_SIZE) - 12;
cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo.raw);
switch (ctrlr->opts.arb_mechanism) {
case SPDK_NVME_CC_AMS_RR:
break;
case SPDK_NVME_CC_AMS_WRR:
if (SPDK_NVME_CAP_AMS_WRR & cap_lo.bits.ams) {
break;
}
return -EINVAL;
case SPDK_NVME_CC_AMS_VS:
if (SPDK_NVME_CAP_AMS_VS & cap_lo.bits.ams) {
break;
}
return -EINVAL;
default:
return -EINVAL;
}
cc.bits.ams = ctrlr->opts.arb_mechanism;
nvme_mmio_write_4(ctrlr, cc.raw, cc.raw);
return 0;

View File

@ -48,6 +48,10 @@ if [ -d /usr/src/fio ]; then
timing_exit fio_plugin
fi
timing_enter arbitration
$rootdir/examples/nvme/arbitration/arbitration -t 3
timing_exit arbitration
#Now test nvme reset function
timing_enter reset
$testdir/reset/reset -q 64 -w write -s 4096 -t 2

View File

@ -408,6 +408,363 @@ test_nvme_ctrlr_init_en_1_rdy_1(void)
nvme_ctrlr_destruct(&ctrlr);
}
static void
test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
/*
* Initial state: CC.EN = 0, CSTS.RDY = 0
* init() should set CC.EN = 1.
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Default round robin enabled
*/
g_ut_nvme_regs.cap_lo.bits.ams = 0x0;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
/*
* Case 1: default round robin arbitration mechanism selected
*/
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
/*
* Reset to initial state
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Case 2: weighted round robin arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
/*
* Reset to initial state
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Case 3: vendor specific arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
/*
* Reset to initial state
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Case 4: invalid arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
/*
* Reset to initial state
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Case 5: reset to default round robin arbitration mechanism
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
/*
* Transition to CSTS.RDY = 1.
*/
g_ut_nvme_regs.csts.bits.rdy = 1;
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
nvme_ctrlr_destruct(&ctrlr);
}
static void
test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
/*
* Initial state: CC.EN = 0, CSTS.RDY = 0
* init() should set CC.EN = 1.
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Weighted round robin enabled
*/
g_ut_nvme_regs.cap_lo.bits.ams = SPDK_NVME_CAP_AMS_WRR;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
/*
* Case 1: default round robin arbitration mechanism selected
*/
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
/*
* Reset to initial state
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Case 2: weighted round robin arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
/*
* Reset to initial state
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Case 3: vendor specific arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
/*
* Reset to initial state
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Case 4: invalid arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
/*
* Reset to initial state
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Case 5: reset to weighted round robin arbitration mechanism
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
/*
* Transition to CSTS.RDY = 1.
*/
g_ut_nvme_regs.csts.bits.rdy = 1;
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
nvme_ctrlr_destruct(&ctrlr);
}
static void
test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
/*
* Initial state: CC.EN = 0, CSTS.RDY = 0
* init() should set CC.EN = 1.
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Default round robin enabled
*/
g_ut_nvme_regs.cap_lo.bits.ams = SPDK_NVME_CAP_AMS_VS;
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
/*
* Case 1: default round robin arbitration mechanism selected
*/
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
/*
* Reset to initial state
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Case 2: weighted round robin arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
/*
* Reset to initial state
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Case 3: vendor specific arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
/*
* Reset to initial state
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Case 4: invalid arbitration mechanism selected
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
/*
* Reset to initial state
*/
g_ut_nvme_regs.cc.bits.en = 0;
g_ut_nvme_regs.csts.bits.rdy = 0;
/*
* Case 5: reset to vendor specific arbitration mechanism
*/
SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr, NULL) == 0);
ctrlr.cdata.nn = 1;
ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
/*
* Transition to CSTS.RDY = 1.
*/
g_ut_nvme_regs.csts.bits.rdy = 1;
CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_READY);
g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
nvme_ctrlr_destruct(&ctrlr);
}
static void
test_nvme_ctrlr_init_en_0_rdy_0(void)
{
@ -496,13 +853,19 @@ cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
}
static void
test_alloc_io_qpair_1(void)
test_alloc_io_qpair_rr_1(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_qpair *q0;
setup_qpairs(&ctrlr, 1);
/*
* Fake to simulate the controller with default round robin
* arbitration mechanism.
*/
g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
SPDK_CU_ASSERT_FATAL(q0 != NULL);
SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
@ -512,25 +875,41 @@ test_alloc_io_qpair_1(void)
/*
* Now that the qpair has been returned to the free list,
* we should be able to allocate it again
* we should be able to allocate it again.
*/
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
SPDK_CU_ASSERT_FATAL(q0 != NULL);
SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
/* Only 0 qprio is acceptable for default round robin arbitration mechanism */
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
SPDK_CU_ASSERT_FATAL(q0 == NULL);
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 2);
SPDK_CU_ASSERT_FATAL(q0 == NULL);
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
SPDK_CU_ASSERT_FATAL(q0 == NULL);
/* Only 0 ~ 3 qprio is acceptable */
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 4) == NULL);
cleanup_qpairs(&ctrlr);
}
static void
test_alloc_io_qpair_2(void)
test_alloc_io_qpair_wrr_1(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_qpair *q0, *q1;
setup_qpairs(&ctrlr, 2);
/*
* Fake to simulate the controller with weighted round robin
* arbitration mechanism.
*/
g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
/*
* Allocate 2 qpairs and free them
*/
@ -555,6 +934,72 @@ test_alloc_io_qpair_2(void)
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
/* Only 0 ~ 3 qprio is acceptable */
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 4) == NULL);
cleanup_qpairs(&ctrlr);
}
static void
test_alloc_io_qpair_wrr_2(void)
{
struct spdk_nvme_ctrlr ctrlr = {};
struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
setup_qpairs(&ctrlr, 4);
/*
* Fake to simulate the controller with weighted round robin
* arbitration mechanism.
*/
g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0);
SPDK_CU_ASSERT_FATAL(q0 != NULL);
SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
SPDK_CU_ASSERT_FATAL(q1 != NULL);
SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 2);
SPDK_CU_ASSERT_FATAL(q2 != NULL);
SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
SPDK_CU_ASSERT_FATAL(q3 != NULL);
SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
/* Only 4 I/O qpairs was allocated, so this should fail */
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 0) == NULL);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
/*
* Now that the qpair has been returned to the free list,
* we should be able to allocate it again.
*
* Allocate 4 I/O qpairs and half of them with same qprio.
*/
q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
SPDK_CU_ASSERT_FATAL(q0 != NULL);
SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 1);
SPDK_CU_ASSERT_FATAL(q1 != NULL);
SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
SPDK_CU_ASSERT_FATAL(q2 != NULL);
SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, 3);
SPDK_CU_ASSERT_FATAL(q3 != NULL);
SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
/*
* Free all I/O qpairs in reverse order
*/
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
cleanup_qpairs(&ctrlr);
}
@ -692,8 +1137,15 @@ int main(int argc, char **argv)
test_nvme_ctrlr_init_en_0_rdy_0) == NULL
|| CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 1",
test_nvme_ctrlr_init_en_0_rdy_1) == NULL
|| CU_add_test(suite, "alloc_io_qpair 1", test_alloc_io_qpair_1) == NULL
|| CU_add_test(suite, "alloc_io_qpair 2", test_alloc_io_qpair_2) == NULL
|| CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = RR",
test_nvme_ctrlr_init_en_0_rdy_0_ams_rr) == NULL
|| CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = WRR",
test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr) == NULL
|| CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = VS",
test_nvme_ctrlr_init_en_0_rdy_0_ams_vs) == NULL
|| CU_add_test(suite, "alloc_io_qpair_rr 1", test_alloc_io_qpair_rr_1) == NULL
|| CU_add_test(suite, "alloc_io_qpair_wrr 1", test_alloc_io_qpair_wrr_1) == NULL
|| CU_add_test(suite, "alloc_io_qpair_wrr 2", test_alloc_io_qpair_wrr_2) == NULL
|| CU_add_test(suite, "test nvme_ctrlr function nvme_ctrlr_fail", test_nvme_ctrlr_fail) == NULL
|| CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_construct_intel_support_log_page_list",
test_nvme_ctrlr_construct_intel_support_log_page_list) == NULL