Spdk/lib/event/subsystems/nvmf/nvmf_rpc.c

142 lines
5.1 KiB
C
Raw Normal View History

/*-
* BSD LICENSE
*
nvmf/rdma: Add shared receive queue support This is a new feature for NVMEoF RDMA target, that is intended to save resource allocation (by sharing them) and utilize the locality (completions and memory) to get the best performance with Shared Receive Queues (SRQs). We'll create a SRQ per core (poll group), per device and associate each created QP/CQ with an appropriate SRQ. Our testing environment has 2 hosts. Host 1: CPU: Intel(R) Xeon(R) CPU E5-2609 0 @ 2.40GHz dual socket (8 cores total) Network: ConnectX-5, ConnectX-5 VPI , 100GbE, single-port QSFP28, PCIe3.0 x16 Disk: Intel Optane SSD 900P Series OS: Fedora 27 x86_64 Host 2: CPU: Intel(R) Xeon(R) CPU E5-2630 v2 @ 2.60GHz dual-socket (24 cores total) Network: ConnectX-4 VPI , 100GbE, dual-port QSFP28 Disk: Intel Optane SSD 900P Series OS : CentOS 7.5.1804 x86_64 Hosts are connected via Spectrum switch. Host 1 is running SPDK NVMeoF target. Host 2 is used as initiator running fio with SPDK plugin. Configuration: - SPDK NVMeoF target: cpu mask 0x0F (4 cores), max queue depth 128, max SRQ depth 1024, max QPs per controller 1024 - Single NVMf subsystem with single namespace backed by physical SSD disk - fio with SPDK plugin: randread pattern, 1-256 jobs, block size 4k, IO depth 16, cpu_mask 0xFFF0, IO rate 10k, rate process “poisson” Here is a full fio command line: fio --name=Job --stats=1 --group_reporting=1 --idle-prof=percpu \ --loops=1 --numjobs=1 --thread=1 --time_based=1 --runtime=30s \ --ramp_time=5s --bs=4k --size=4G --iodepth=16 --readwrite=randread \ --rwmixread=75 --randrepeat=1 --ioengine=spdk --direct=1 \ --gtod_reduce=0 --cpumask=0xFFF0 --rate_iops=10k \ --rate_process=poisson \ --filename='trtype=RDMA adrfam=IPv4 traddr=1.1.79.1 trsvcid=4420 ns=1' SPDK allocates the following entities for every work request in receive queue (shared or not): reqs (1024 bytes), recvs (96 bytes), cmds (64 bytes), cpls (16 bytes), in_capsule_buffer. All except the last one are fixed size. In capsule data size is configured to 4096. Memory consumption calculation (target): - Multiple SRQ: core_num * ib_devs_num * SRQ_depth * (1200 + in_capsule_data_size) - Multiple RQ: queue_num * RQ_depth * (1200 + in_capsule_data_size) We ignore admin queues in calculations for simplicity. Cases: 1. Multiple SRQ with 1024 entries: - Mem = 4 * 1 * 1024 * (1200 + 4096) = 20.7 MiB (Constant number – does not depend on initiators number) 2. RQ with 128 entries for 64 initiators: - Mem = 64 * 128 * (1200 + 4096) = 41.4 MiB Results: FIO_JOBS kIOPS Bandwidth,MiB/s AvgLatency,us MaxResidentSize,kiB RQ SRQ RQ SRQ RQ SRQ RQ SRQ 1 8.623 8.623 33.7 33.7 13.89 14.03 144376 155624 2 17.3 17.3 67.4 67.4 14.03 14.1 145776 155700 4 34.5 34.5 135 135 14.15 14.23 146540 156184 8 69.1 69.1 270 270 14.64 14.49 148116 156960 16 138 138 540 540 14.84 15.38 151216 158668 32 276 276 1079 1079 16.5 16.61 157560 161936 64 513 502 2005 1960 1673 1612 170408 168440 128 535 526 2092 2054 3329 3344 195796 181524 256 571 571 2232 2233 6854 6873 246484 207856 We can see the benefit in memory consumption. Change-Id: I40c70f6ccbad7754918bcc6cb397e955b09d1033 Signed-off-by: Evgeniy Kochetov <evgeniik@mellanox.com> Signed-off-by: Sasha Kotchubievsky <sashakot@mellanox.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/428458 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2018-10-04 14:59:08 +00:00
* Copyright (c) Intel Corporation. All rights reserved.
* Copyright (c) 2018-2019 Mellanox Technologies LTD. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event_nvmf.h"
#include "spdk/rpc.h"
#include "spdk/util.h"
static const struct spdk_json_object_decoder nvmf_rpc_subsystem_tgt_opts_decoder[] = {
{"max_subsystems", 0, spdk_json_decode_uint32, true}
};
static void
spdk_rpc_set_nvmf_target_max_subsystems(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct spdk_json_write_ctx *w;
uint32_t max_subsystems = 0;
if (g_spdk_nvmf_tgt_max_subsystems != 0) {
SPDK_ERRLOG("this RPC must not be called more than once.\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"Must not call more than once");
return;
}
if (params != NULL) {
if (spdk_json_decode_object(params, nvmf_rpc_subsystem_tgt_opts_decoder,
SPDK_COUNTOF(nvmf_rpc_subsystem_tgt_opts_decoder), &max_subsystems)) {
SPDK_ERRLOG("spdk_json_decode_object() failed\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
"Invalid parameters");
return;
}
}
g_spdk_nvmf_tgt_max_subsystems = max_subsystems;
w = spdk_jsonrpc_begin_result(request);
spdk_json_write_bool(w, true);
spdk_jsonrpc_end_result(request, w);
}
SPDK_RPC_REGISTER("set_nvmf_target_max_subsystems", spdk_rpc_set_nvmf_target_max_subsystems,
SPDK_RPC_STARTUP)
static int decode_conn_sched(const struct spdk_json_val *val, void *out)
{
enum spdk_nvmf_connect_sched *sched = out;
if (spdk_json_strequal(val, "roundrobin") == true) {
*sched = CONNECT_SCHED_ROUND_ROBIN;
} else if (spdk_json_strequal(val, "hostip") == true) {
*sched = CONNECT_SCHED_HOST_IP;
} else if (spdk_json_strequal(val, "transport") == true) {
*sched = CONNECT_SCHED_TRANSPORT_OPTIMAL_GROUP;
} else {
SPDK_ERRLOG("Invalid connection scheduling parameter\n");
return -EINVAL;
}
return 0;
}
static const struct spdk_json_object_decoder nvmf_rpc_subsystem_tgt_conf_decoder[] = {
{"acceptor_poll_rate", offsetof(struct spdk_nvmf_tgt_conf, acceptor_poll_rate), spdk_json_decode_uint32, true},
{"conn_sched", offsetof(struct spdk_nvmf_tgt_conf, conn_sched), decode_conn_sched, true},
};
static void
spdk_rpc_set_nvmf_target_config(struct spdk_jsonrpc_request *request,
const struct spdk_json_val *params)
{
struct spdk_nvmf_tgt_conf *conf;
struct spdk_json_write_ctx *w;
if (g_spdk_nvmf_tgt_conf != NULL) {
SPDK_ERRLOG("this RPC must not be called more than once.\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"Must not call more than once");
return;
}
conf = calloc(1, sizeof(*conf));
if (conf == NULL) {
SPDK_ERRLOG("calloc() failed for target config\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
"Out of memory");
return;
}
conf->acceptor_poll_rate = ACCEPT_TIMEOUT_US;
conf->conn_sched = DEFAULT_CONN_SCHED;
if (params != NULL) {
if (spdk_json_decode_object(params, nvmf_rpc_subsystem_tgt_conf_decoder,
SPDK_COUNTOF(nvmf_rpc_subsystem_tgt_conf_decoder), conf)) {
free(conf);
SPDK_ERRLOG("spdk_json_decode_object() failed\n");
spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS,
"Invalid parameters");
return;
}
}
g_spdk_nvmf_tgt_conf = conf;
w = spdk_jsonrpc_begin_result(request);
spdk_json_write_bool(w, true);
spdk_jsonrpc_end_result(request, w);
}
SPDK_RPC_REGISTER("set_nvmf_target_config", spdk_rpc_set_nvmf_target_config, SPDK_RPC_STARTUP)