util: move common helper functions to util.h
These were repeated a few different places, so pull them into a common header file. Change-Id: Id807fa2cfec0de2e0363aeb081510fb801781985 Signed-off-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
parent
187ec2fde2
commit
84d904841f
@ -51,6 +51,7 @@
|
||||
#include "spdk/nvme.h"
|
||||
#include "spdk/nvmf.h"
|
||||
#include "spdk/string.h"
|
||||
#include "spdk/util.h"
|
||||
|
||||
#define MAX_LISTEN_ADDRESSES 255
|
||||
#define MAX_HOSTS 255
|
||||
@ -176,15 +177,15 @@ spdk_nvmf_parse_nvmf_tgt(void)
|
||||
if (max_queue_depth < 0) {
|
||||
max_queue_depth = SPDK_NVMF_CONFIG_QUEUE_DEPTH_DEFAULT;
|
||||
}
|
||||
max_queue_depth = nvmf_max(max_queue_depth, SPDK_NVMF_CONFIG_QUEUE_DEPTH_MIN);
|
||||
max_queue_depth = nvmf_min(max_queue_depth, SPDK_NVMF_CONFIG_QUEUE_DEPTH_MAX);
|
||||
max_queue_depth = spdk_max(max_queue_depth, SPDK_NVMF_CONFIG_QUEUE_DEPTH_MIN);
|
||||
max_queue_depth = spdk_min(max_queue_depth, SPDK_NVMF_CONFIG_QUEUE_DEPTH_MAX);
|
||||
|
||||
max_queues_per_sess = spdk_conf_section_get_intval(sp, "MaxQueuesPerSession");
|
||||
if (max_queues_per_sess < 0) {
|
||||
max_queues_per_sess = SPDK_NVMF_CONFIG_QUEUES_PER_SESSION_DEFAULT;
|
||||
}
|
||||
max_queues_per_sess = nvmf_max(max_queues_per_sess, SPDK_NVMF_CONFIG_QUEUES_PER_SESSION_MIN);
|
||||
max_queues_per_sess = nvmf_min(max_queues_per_sess, SPDK_NVMF_CONFIG_QUEUES_PER_SESSION_MAX);
|
||||
max_queues_per_sess = spdk_max(max_queues_per_sess, SPDK_NVMF_CONFIG_QUEUES_PER_SESSION_MIN);
|
||||
max_queues_per_sess = spdk_min(max_queues_per_sess, SPDK_NVMF_CONFIG_QUEUES_PER_SESSION_MAX);
|
||||
|
||||
in_capsule_data_size = spdk_conf_section_get_intval(sp, "InCapsuleDataSize");
|
||||
if (in_capsule_data_size < 0) {
|
||||
@ -193,8 +194,8 @@ spdk_nvmf_parse_nvmf_tgt(void)
|
||||
SPDK_ERRLOG("InCapsuleDataSize must be a multiple of 16\n");
|
||||
return -1;
|
||||
}
|
||||
in_capsule_data_size = nvmf_max(in_capsule_data_size, SPDK_NVMF_CONFIG_IN_CAPSULE_DATA_SIZE_MIN);
|
||||
in_capsule_data_size = nvmf_min(in_capsule_data_size, SPDK_NVMF_CONFIG_IN_CAPSULE_DATA_SIZE_MAX);
|
||||
in_capsule_data_size = spdk_max(in_capsule_data_size, SPDK_NVMF_CONFIG_IN_CAPSULE_DATA_SIZE_MIN);
|
||||
in_capsule_data_size = spdk_min(in_capsule_data_size, SPDK_NVMF_CONFIG_IN_CAPSULE_DATA_SIZE_MAX);
|
||||
|
||||
max_io_size = spdk_conf_section_get_intval(sp, "MaxIOSize");
|
||||
if (max_io_size < 0) {
|
||||
@ -203,8 +204,8 @@ spdk_nvmf_parse_nvmf_tgt(void)
|
||||
SPDK_ERRLOG("MaxIOSize must be a multiple of 4096\n");
|
||||
return -1;
|
||||
}
|
||||
max_io_size = nvmf_max(max_io_size, SPDK_NVMF_CONFIG_MAX_IO_SIZE_MIN);
|
||||
max_io_size = nvmf_min(max_io_size, SPDK_NVMF_CONFIG_MAX_IO_SIZE_MAX);
|
||||
max_io_size = spdk_max(max_io_size, SPDK_NVMF_CONFIG_MAX_IO_SIZE_MIN);
|
||||
max_io_size = spdk_min(max_io_size, SPDK_NVMF_CONFIG_MAX_IO_SIZE_MAX);
|
||||
|
||||
acceptor_lcore = spdk_conf_section_get_intval(sp, "AcceptorCore");
|
||||
if (acceptor_lcore < 0) {
|
||||
|
@ -47,9 +47,6 @@
|
||||
#define MAX_VIRTUAL_NAMESPACE 16
|
||||
#define MAX_SN_LEN 20
|
||||
|
||||
#define nvmf_min(a,b) (((a)<(b))?(a):(b))
|
||||
#define nvmf_max(a,b) (((a)>(b))?(a):(b))
|
||||
|
||||
int spdk_nvmf_tgt_init(uint16_t max_queue_depth, uint16_t max_conn_per_sess,
|
||||
uint32_t in_capsule_data_size, uint32_t max_io_size);
|
||||
|
||||
|
71
include/spdk/util.h
Normal file
71
include/spdk/util.h
Normal file
@ -0,0 +1,71 @@
|
||||
/*-
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (c) Intel Corporation.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/** \file
|
||||
* General utility functions
|
||||
*/
|
||||
|
||||
#ifndef SPDK_UTIL_H
|
||||
#define SPDK_UTIL_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define spdk_min(a,b) (((a)<(b))?(a):(b))
|
||||
#define spdk_max(a,b) (((a)>(b))?(a):(b))
|
||||
|
||||
static inline uint32_t
|
||||
spdk_u32log2(uint32_t x)
|
||||
{
|
||||
if (x == 0) {
|
||||
/* log(0) is undefined */
|
||||
return 0;
|
||||
}
|
||||
return 31u - __builtin_clz(x);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
spdk_align32pow2(uint32_t x)
|
||||
{
|
||||
return 1u << (1 + spdk_u32log2(x - 1));
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -34,6 +34,7 @@
|
||||
#include "ioat_internal.h"
|
||||
|
||||
#include "spdk/env.h"
|
||||
#include "spdk/util.h"
|
||||
|
||||
#include "spdk_internal/log.h"
|
||||
|
||||
@ -569,8 +570,6 @@ spdk_ioat_detach(struct spdk_ioat_chan *ioat)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define min(a, b) (((a)<(b))?(a):(b))
|
||||
|
||||
#define _2MB_PAGE(ptr) ((ptr) & ~(0x200000 - 1))
|
||||
#define _2MB_OFFSET(ptr) ((ptr) & (0x200000 - 1))
|
||||
|
||||
@ -608,9 +607,9 @@ spdk_ioat_submit_copy(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_c
|
||||
pdst_page = spdk_vtophys((void *)vdst_page);
|
||||
}
|
||||
op_size = remaining;
|
||||
op_size = min(op_size, (0x200000 - _2MB_OFFSET(vsrc)));
|
||||
op_size = min(op_size, (0x200000 - _2MB_OFFSET(vdst)));
|
||||
op_size = min(op_size, ioat->max_xfer_size);
|
||||
op_size = spdk_min(op_size, (0x200000 - _2MB_OFFSET(vsrc)));
|
||||
op_size = spdk_min(op_size, (0x200000 - _2MB_OFFSET(vdst)));
|
||||
op_size = spdk_min(op_size, ioat->max_xfer_size);
|
||||
remaining -= op_size;
|
||||
|
||||
last_desc = ioat_prep_copy(ioat,
|
||||
@ -672,7 +671,7 @@ spdk_ioat_submit_fill(struct spdk_ioat_chan *ioat, void *cb_arg, spdk_ioat_req_c
|
||||
|
||||
while (remaining) {
|
||||
op_size = remaining;
|
||||
op_size = min(op_size, ioat->max_xfer_size);
|
||||
op_size = spdk_min(op_size, ioat->max_xfer_size);
|
||||
remaining -= op_size;
|
||||
|
||||
last_desc = ioat_prep_fill(ioat,
|
||||
|
@ -432,7 +432,7 @@ nvme_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
|
||||
cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
|
||||
|
||||
/* Page size is 2 ^ (12 + mps). */
|
||||
cc.bits.mps = nvme_u32log2(PAGE_SIZE) - 12;
|
||||
cc.bits.mps = spdk_u32log2(PAGE_SIZE) - 12;
|
||||
|
||||
switch (ctrlr->opts.arb_mechanism) {
|
||||
case SPDK_NVME_CC_AMS_RR:
|
||||
@ -582,7 +582,7 @@ nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
|
||||
ctrlr->max_xfer_size = nvme_transport_ctrlr_get_max_xfer_size(ctrlr);
|
||||
SPDK_TRACELOG(SPDK_TRACE_NVME, "transport max_xfer_size %u\n", ctrlr->max_xfer_size);
|
||||
if (ctrlr->cdata.mdts > 0) {
|
||||
ctrlr->max_xfer_size = nvme_min(ctrlr->max_xfer_size,
|
||||
ctrlr->max_xfer_size = spdk_min(ctrlr->max_xfer_size,
|
||||
ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
|
||||
SPDK_TRACELOG(SPDK_TRACE_NVME, "MDTS max_xfer_size %u\n", ctrlr->max_xfer_size);
|
||||
}
|
||||
@ -631,7 +631,7 @@ nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr)
|
||||
sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
|
||||
cq_allocated = (status.cpl.cdw0 >> 16) + 1;
|
||||
|
||||
ctrlr->opts.num_io_queues = nvme_min(sq_allocated, cq_allocated);
|
||||
ctrlr->opts.num_io_queues = spdk_min(sq_allocated, cq_allocated);
|
||||
|
||||
ctrlr->free_io_qids = spdk_bit_array_create(ctrlr->opts.num_io_queues + 1);
|
||||
if (ctrlr->free_io_qids == NULL) {
|
||||
@ -851,7 +851,7 @@ nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
|
||||
}
|
||||
|
||||
/* aerl is a zero-based value, so we need to add 1 here. */
|
||||
ctrlr->num_aers = nvme_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
|
||||
ctrlr->num_aers = spdk_min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl + 1));
|
||||
|
||||
for (i = 0; i < ctrlr->num_aers; i++) {
|
||||
aer = &ctrlr->aer[i];
|
||||
@ -1312,8 +1312,8 @@ nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_reg
|
||||
|
||||
ctrlr->min_page_size = 1u << (12 + ctrlr->cap.bits.mpsmin);
|
||||
|
||||
ctrlr->opts.io_queue_size = nvme_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
|
||||
ctrlr->opts.io_queue_size = nvme_min(ctrlr->opts.io_queue_size, max_io_queue_size);
|
||||
ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
|
||||
ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, max_io_queue_size);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1628,7 +1628,7 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
|
||||
p = payload;
|
||||
|
||||
while (size_remaining > 0) {
|
||||
transfer = nvme_min(size_remaining, ctrlr->min_page_size);
|
||||
transfer = spdk_min(size_remaining, ctrlr->min_page_size);
|
||||
status.done = false;
|
||||
|
||||
res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
|
||||
|
@ -55,6 +55,7 @@
|
||||
#include "spdk/bit_array.h"
|
||||
#include "spdk/mmio.h"
|
||||
#include "spdk/pci_ids.h"
|
||||
#include "spdk/util.h"
|
||||
#include "spdk/nvme_intel.h"
|
||||
#include "spdk/nvmf_spec.h"
|
||||
|
||||
@ -427,26 +428,8 @@ struct nvme_driver {
|
||||
|
||||
extern struct nvme_driver *g_spdk_nvme_driver;
|
||||
|
||||
#define nvme_min(a,b) (((a)<(b))?(a):(b))
|
||||
|
||||
#define nvme_delay usleep
|
||||
|
||||
static inline uint32_t
|
||||
nvme_u32log2(uint32_t x)
|
||||
{
|
||||
if (x == 0) {
|
||||
/* __builtin_clz(0) is undefined, so just bail */
|
||||
return 0;
|
||||
}
|
||||
return 31u - __builtin_clz(x);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
nvme_align32pow2(uint32_t x)
|
||||
{
|
||||
return 1u << (1 + nvme_u32log2(x - 1));
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nvme_qpair_is_admin_queue(struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
|
@ -153,7 +153,7 @@ _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
|
||||
|
||||
while (remaining_lba_count > 0) {
|
||||
lba_count = sectors_per_max_io - (lba & sector_mask);
|
||||
lba_count = nvme_min(remaining_lba_count, lba_count);
|
||||
lba_count = spdk_min(remaining_lba_count, lba_count);
|
||||
|
||||
child = _nvme_add_child_request(ns, payload, payload_offset, md_offset,
|
||||
lba, lba_count, cb_fn, cb_arg, opc,
|
||||
|
@ -836,7 +836,7 @@ nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair)
|
||||
* Note also that for a queue size of N, we can only have (N-1)
|
||||
* commands outstanding, hence the "-1" here.
|
||||
*/
|
||||
num_trackers = nvme_min(NVME_IO_TRACKERS, pqpair->num_entries - 1);
|
||||
num_trackers = spdk_min(NVME_IO_TRACKERS, pqpair->num_entries - 1);
|
||||
}
|
||||
|
||||
assert(num_trackers != 0);
|
||||
@ -1512,11 +1512,11 @@ nvme_pcie_qpair_build_contig_request(struct spdk_nvme_qpair *qpair, struct nvme_
|
||||
nvme_pcie_fail_request_bad_vtophys(qpair, tr);
|
||||
return -1;
|
||||
}
|
||||
nseg = req->payload_size >> nvme_u32log2(PAGE_SIZE);
|
||||
nseg = req->payload_size >> spdk_u32log2(PAGE_SIZE);
|
||||
modulo = req->payload_size & (PAGE_SIZE - 1);
|
||||
unaligned = phys_addr & (PAGE_SIZE - 1);
|
||||
if (modulo || unaligned) {
|
||||
nseg += 1 + ((modulo + unaligned - 1) >> nvme_u32log2(PAGE_SIZE));
|
||||
nseg += 1 + ((modulo + unaligned - 1) >> spdk_u32log2(PAGE_SIZE));
|
||||
}
|
||||
|
||||
if (req->payload.md) {
|
||||
@ -1598,7 +1598,7 @@ nvme_pcie_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_
|
||||
return -1;
|
||||
}
|
||||
|
||||
length = nvme_min(remaining_transfer_len, length);
|
||||
length = spdk_min(remaining_transfer_len, length);
|
||||
remaining_transfer_len -= length;
|
||||
|
||||
sgl->unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
|
||||
@ -1680,13 +1680,13 @@ nvme_pcie_qpair_build_prps_sgl_request(struct spdk_nvme_qpair *qpair, struct nvm
|
||||
/* All SGe except first must start on a page boundary. */
|
||||
assert((sge_count == 0) || _is_page_aligned(phys_addr));
|
||||
|
||||
data_transferred = nvme_min(remaining_transfer_len, length);
|
||||
data_transferred = spdk_min(remaining_transfer_len, length);
|
||||
|
||||
nseg = data_transferred >> nvme_u32log2(PAGE_SIZE);
|
||||
nseg = data_transferred >> spdk_u32log2(PAGE_SIZE);
|
||||
modulo = data_transferred & (PAGE_SIZE - 1);
|
||||
unaligned = phys_addr & (PAGE_SIZE - 1);
|
||||
if (modulo || unaligned) {
|
||||
nseg += 1 + ((modulo + unaligned - 1) >> nvme_u32log2(PAGE_SIZE));
|
||||
nseg += 1 + ((modulo + unaligned - 1) >> spdk_u32log2(PAGE_SIZE));
|
||||
}
|
||||
|
||||
if (total_nseg == 0) {
|
||||
|
@ -479,7 +479,7 @@ nvme_rdma_copy_mem(struct spdk_nvme_rdma_req *rdma_req, bool copy_from_user)
|
||||
return -1;
|
||||
}
|
||||
|
||||
len = nvme_min(remaining_transfer_len, len);
|
||||
len = spdk_min(remaining_transfer_len, len);
|
||||
remaining_transfer_len -= len;
|
||||
|
||||
if (copy_from_user) {
|
||||
@ -592,7 +592,7 @@ nvme_rdma_connect(struct nvme_rdma_qpair *rqpair)
|
||||
return ret;
|
||||
}
|
||||
|
||||
param.responder_resources = nvme_min(rqpair->num_entries, attr.max_qp_rd_atom);
|
||||
param.responder_resources = spdk_min(rqpair->num_entries, attr.max_qp_rd_atom);
|
||||
|
||||
ctrlr = rqpair->qpair.ctrlr;
|
||||
if (!ctrlr) {
|
||||
@ -631,7 +631,7 @@ nvme_rdma_connect(struct nvme_rdma_qpair *rqpair)
|
||||
SPDK_TRACELOG(SPDK_TRACE_NVME, "Requested queue depth %d. Actually got queue depth %d.\n",
|
||||
rqpair->num_entries, accept_data->crqsize);
|
||||
|
||||
rqpair->num_entries = nvme_min(rqpair->num_entries , accept_data->crqsize);
|
||||
rqpair->num_entries = spdk_min(rqpair->num_entries, accept_data->crqsize);
|
||||
|
||||
rdma_ack_cm_event(event);
|
||||
|
||||
@ -1178,7 +1178,7 @@ nvme_rdma_ctrlr_scan(const struct spdk_nvme_transport_id *discovery_trid,
|
||||
*/
|
||||
buffer_max_entries = (sizeof(buffer) - offsetof(struct spdk_nvmf_discovery_log_page, entries[0])) /
|
||||
sizeof(struct spdk_nvmf_discovery_log_page_entry);
|
||||
numrec = nvme_min(log_page->numrec, buffer_max_entries);
|
||||
numrec = spdk_min(log_page->numrec, buffer_max_entries);
|
||||
if (numrec != log_page->numrec) {
|
||||
SPDK_WARNLOG("Discovery service returned %" PRIu64 " entries,"
|
||||
"but buffer can only hold %" PRIu64 "\n",
|
||||
@ -1382,13 +1382,13 @@ nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair,
|
||||
if (max_completions == 0) {
|
||||
max_completions = rqpair->num_entries;
|
||||
} else {
|
||||
max_completions = nvme_min(max_completions, rqpair->num_entries);
|
||||
max_completions = spdk_min(max_completions, rqpair->num_entries);
|
||||
}
|
||||
|
||||
/* Consume all send completions */
|
||||
reaped = 0;
|
||||
do {
|
||||
batch_size = nvme_min((max_completions - reaped),
|
||||
batch_size = spdk_min((max_completions - reaped),
|
||||
MAX_COMPLETIONS_PER_POLL);
|
||||
rc = ibv_poll_cq(rqpair->cm_id->send_cq, batch_size, wc);
|
||||
if (rc < 0) {
|
||||
@ -1405,7 +1405,7 @@ nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair,
|
||||
/* Poll for recv completions */
|
||||
reaped = 0;
|
||||
do {
|
||||
batch_size = nvme_min((max_completions - reaped),
|
||||
batch_size = spdk_min((max_completions - reaped),
|
||||
MAX_COMPLETIONS_PER_POLL);
|
||||
rc = ibv_poll_cq(rqpair->cm_id->recv_cq, batch_size, wc);
|
||||
if (rc < 0) {
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include "spdk/nvmf_spec.h"
|
||||
#include "spdk/assert.h"
|
||||
#include "spdk/queue.h"
|
||||
#include "spdk/util.h"
|
||||
|
||||
#define SPDK_NVMF_DEFAULT_NUM_SESSIONS_PER_LCORE 1
|
||||
|
||||
@ -53,16 +54,6 @@ struct spdk_nvmf_tgt {
|
||||
uint32_t max_io_size;
|
||||
};
|
||||
|
||||
static inline uint32_t
|
||||
nvmf_u32log2(uint32_t x)
|
||||
{
|
||||
if (x == 0) {
|
||||
/* __builtin_clz(0) is undefined, so just bail */
|
||||
return 0;
|
||||
}
|
||||
return 31u - __builtin_clz(x);
|
||||
}
|
||||
|
||||
extern struct spdk_nvmf_tgt g_nvmf_tgt;
|
||||
|
||||
struct spdk_nvmf_listen_addr *spdk_nvmf_listen_addr_create(const char *trname, const char *traddr,
|
||||
|
@ -675,15 +675,15 @@ nvmf_rdma_connect(struct rdma_cm_event *event)
|
||||
SPDK_TRACELOG(SPDK_TRACE_RDMA,
|
||||
"Local NIC Max Send/Recv Queue Depth: %d Max Read/Write Queue Depth: %d\n",
|
||||
addr->attr.max_qp_wr, addr->attr.max_qp_rd_atom);
|
||||
max_queue_depth = nvmf_min(max_queue_depth, addr->attr.max_qp_wr);
|
||||
max_rw_depth = nvmf_min(max_rw_depth, addr->attr.max_qp_rd_atom);
|
||||
max_queue_depth = spdk_min(max_queue_depth, addr->attr.max_qp_wr);
|
||||
max_rw_depth = spdk_min(max_rw_depth, addr->attr.max_qp_rd_atom);
|
||||
|
||||
/* Next check the remote NIC's hardware limitations */
|
||||
SPDK_TRACELOG(SPDK_TRACE_RDMA,
|
||||
"Host (Initiator) NIC Max Incoming RDMA R/W operations: %d Max Outgoing RDMA R/W operations: %d\n",
|
||||
rdma_param->initiator_depth, rdma_param->responder_resources);
|
||||
if (rdma_param->initiator_depth > 0) {
|
||||
max_rw_depth = nvmf_min(max_rw_depth, rdma_param->initiator_depth);
|
||||
max_rw_depth = spdk_min(max_rw_depth, rdma_param->initiator_depth);
|
||||
}
|
||||
|
||||
/* Finally check for the host software requested values, which are
|
||||
@ -692,8 +692,8 @@ nvmf_rdma_connect(struct rdma_cm_event *event)
|
||||
rdma_param->private_data_len >= sizeof(struct spdk_nvmf_rdma_request_private_data)) {
|
||||
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Host Receive Queue Size: %d\n", private_data->hrqsize);
|
||||
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Host Send Queue Size: %d\n", private_data->hsqsize);
|
||||
max_queue_depth = nvmf_min(max_queue_depth, private_data->hrqsize);
|
||||
max_queue_depth = nvmf_min(max_queue_depth, private_data->hsqsize);
|
||||
max_queue_depth = spdk_min(max_queue_depth, private_data->hrqsize);
|
||||
max_queue_depth = spdk_min(max_queue_depth, private_data->hsqsize);
|
||||
}
|
||||
|
||||
SPDK_TRACELOG(SPDK_TRACE_RDMA, "Final Negotiated Queue Depth: %d R/W Depth: %d\n",
|
||||
|
@ -99,7 +99,7 @@ nvmf_init_nvme_session_properties(struct spdk_nvmf_session *session)
|
||||
session->vcdata.cntlid = session->cntlid;
|
||||
session->vcdata.kas = 10;
|
||||
session->vcdata.maxcmd = g_nvmf_tgt.max_queue_depth;
|
||||
session->vcdata.mdts = nvmf_u32log2(g_nvmf_tgt.max_io_size / 4096);
|
||||
session->vcdata.mdts = spdk_u32log2(g_nvmf_tgt.max_io_size / 4096);
|
||||
session->vcdata.sgls.keyed_sgl = 1;
|
||||
session->vcdata.sgls.sgl_offset = 1;
|
||||
|
||||
|
@ -395,7 +395,7 @@ spdk_nvmf_get_discovery_log_page(void *buffer, uint64_t offset, uint32_t length)
|
||||
|
||||
/* Copy the valid part of the discovery log page, if any */
|
||||
if (g_discovery_log_page && offset < g_discovery_log_page_size) {
|
||||
copy_len = nvmf_min(g_discovery_log_page_size - offset, length);
|
||||
copy_len = spdk_min(g_discovery_log_page_size - offset, length);
|
||||
zero_len -= copy_len;
|
||||
memcpy(buffer, (char *)g_discovery_log_page + offset, copy_len);
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ identify_ns(struct spdk_nvmf_subsystem *subsystem,
|
||||
nsdata->nuse = bdev->blockcnt;
|
||||
nsdata->nlbaf = 0;
|
||||
nsdata->flbas.format = 0;
|
||||
nsdata->lbaf[0].lbads = nvmf_u32log2(bdev->blocklen);
|
||||
nsdata->lbaf[0].lbads = spdk_u32log2(bdev->blocklen);
|
||||
|
||||
return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
|
||||
}
|
||||
|
@ -43,13 +43,14 @@
|
||||
#include <string.h>
|
||||
|
||||
#include "spdk/likely.h"
|
||||
#include "spdk/util.h"
|
||||
|
||||
typedef uint64_t spdk_bit_array_word;
|
||||
#define SPDK_BIT_ARRAY_WORD_TZCNT(x) (__builtin_ctzll(x))
|
||||
#define SPDK_BIT_ARRAY_WORD_C(x) ((spdk_bit_array_word)(x))
|
||||
#define SPDK_BIT_ARRAY_WORD_BYTES sizeof(spdk_bit_array_word)
|
||||
#define SPDK_BIT_ARRAY_WORD_BITS (SPDK_BIT_ARRAY_WORD_BYTES * 8)
|
||||
#define SPDK_BIT_ARRAY_WORD_INDEX_SHIFT (31u - __builtin_clz(SPDK_BIT_ARRAY_WORD_BITS))
|
||||
#define SPDK_BIT_ARRAY_WORD_INDEX_SHIFT spdk_u32log2(SPDK_BIT_ARRAY_WORD_BITS)
|
||||
#define SPDK_BIT_ARRAY_WORD_INDEX_MASK ((1u << SPDK_BIT_ARRAY_WORD_INDEX_SHIFT) - 1)
|
||||
|
||||
struct spdk_bit_array {
|
||||
|
Loading…
Reference in New Issue
Block a user