lib: accel, bdev, blob, env_dpdk remove spdk_ prefix.
Hitting only the static functions from the above libraries with the spdk_ prefix. Signed-off-by: Seth Howell <seth.howell@intel.com> Change-Id: Ic6df38dfbeb53f0b1c30d350921f7216acba3170 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2362 Community-CI: Mellanox Build Bot Community-CI: Broadcom CI Reviewed-by: Paul Luse <paul.e.luse@intel.com> Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
d18e63206a
commit
3456377b45
@ -79,14 +79,14 @@ spdk_accel_hw_engine_register(struct spdk_accel_engine *accel_engine)
|
||||
|
||||
/* Registration of sw modules (currently supports only 1) */
|
||||
static void
|
||||
spdk_accel_sw_register(struct spdk_accel_engine *accel_engine)
|
||||
accel_sw_register(struct spdk_accel_engine *accel_engine)
|
||||
{
|
||||
assert(g_sw_accel_engine == NULL);
|
||||
g_sw_accel_engine = accel_engine;
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_accel_sw_unregister(void)
|
||||
accel_sw_unregister(void)
|
||||
{
|
||||
g_sw_accel_engine = NULL;
|
||||
}
|
||||
@ -214,7 +214,7 @@ spdk_accel_engine_get_io_channel(void)
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_accel_engine_module_initialize(void)
|
||||
accel_engine_module_initialize(void)
|
||||
{
|
||||
struct spdk_accel_module_if *accel_engine_module;
|
||||
|
||||
@ -227,7 +227,7 @@ int
|
||||
spdk_accel_engine_initialize(void)
|
||||
{
|
||||
SPDK_NOTICELOG("Accel engine initialized to use software engine.\n");
|
||||
spdk_accel_engine_module_initialize();
|
||||
accel_engine_module_initialize();
|
||||
/*
|
||||
* We need a unique identifier for the accel engine framework, so use the
|
||||
* spdk_accel_module_list address for this purpose.
|
||||
@ -239,7 +239,7 @@ spdk_accel_engine_initialize(void)
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_accel_engine_module_finish_cb(void)
|
||||
accel_engine_module_finish_cb(void)
|
||||
{
|
||||
spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
|
||||
|
||||
@ -274,7 +274,7 @@ spdk_accel_engine_module_finish(void)
|
||||
}
|
||||
|
||||
if (!g_accel_engine_module) {
|
||||
spdk_accel_engine_module_finish_cb();
|
||||
accel_engine_module_finish_cb();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -415,7 +415,7 @@ sw_accel_engine_get_ctx_size(void)
|
||||
static int
|
||||
sw_accel_engine_init(void)
|
||||
{
|
||||
spdk_accel_sw_register(&sw_accel_engine);
|
||||
accel_sw_register(&sw_accel_engine);
|
||||
spdk_io_device_register(&sw_accel_engine, sw_accel_create_cb, sw_accel_destroy_cb, 0,
|
||||
"sw_accel_engine");
|
||||
|
||||
@ -426,7 +426,7 @@ static void
|
||||
sw_accel_engine_fini(void *ctxt)
|
||||
{
|
||||
spdk_io_device_unregister(&sw_accel_engine, NULL);
|
||||
spdk_accel_sw_unregister();
|
||||
accel_sw_unregister();
|
||||
|
||||
spdk_accel_engine_module_finish();
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ static const struct spdk_json_object_decoder rpc_set_bdev_opts_decoders[] = {
|
||||
};
|
||||
|
||||
static void
|
||||
spdk_rpc_bdev_set_options(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
|
||||
rpc_bdev_set_options(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params)
|
||||
{
|
||||
struct spdk_rpc_set_bdev_opts rpc_opts;
|
||||
struct spdk_bdev_opts bdev_opts;
|
||||
@ -94,5 +94,5 @@ spdk_rpc_bdev_set_options(struct spdk_jsonrpc_request *request, const struct spd
|
||||
spdk_json_write_bool(w, true);
|
||||
spdk_jsonrpc_end_result(request, w);
|
||||
}
|
||||
SPDK_RPC_REGISTER("bdev_set_options", spdk_rpc_bdev_set_options, SPDK_RPC_STARTUP)
|
||||
SPDK_RPC_REGISTER("bdev_set_options", rpc_bdev_set_options, SPDK_RPC_STARTUP)
|
||||
SPDK_RPC_REGISTER_ALIAS_DEPRECATED(bdev_set_options, set_bdev_options)
|
||||
|
@ -117,7 +117,7 @@ spdk_bdev_part_base_free(struct spdk_bdev_part_base *base)
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_bdev_part_free_cb(void *io_device)
|
||||
bdev_part_free_cb(void *io_device)
|
||||
{
|
||||
struct spdk_bdev_part *part = io_device;
|
||||
struct spdk_bdev_part_base *base;
|
||||
@ -143,7 +143,7 @@ spdk_bdev_part_free_cb(void *io_device)
|
||||
int
|
||||
spdk_bdev_part_free(struct spdk_bdev_part *part)
|
||||
{
|
||||
spdk_io_device_unregister(part, spdk_bdev_part_free_cb);
|
||||
spdk_io_device_unregister(part, bdev_part_free_cb);
|
||||
|
||||
/* Return 1 to indicate that this is an asynchronous operation that isn't complete
|
||||
* until spdk_bdev_destruct_done is called */
|
||||
@ -163,7 +163,7 @@ spdk_bdev_part_base_hotremove(struct spdk_bdev_part_base *part_base, struct bdev
|
||||
}
|
||||
|
||||
static bool
|
||||
spdk_bdev_part_io_type_supported(void *_part, enum spdk_bdev_io_type io_type)
|
||||
bdev_part_io_type_supported(void *_part, enum spdk_bdev_io_type io_type)
|
||||
{
|
||||
struct spdk_bdev_part *part = _part;
|
||||
|
||||
@ -185,7 +185,7 @@ spdk_bdev_part_io_type_supported(void *_part, enum spdk_bdev_io_type io_type)
|
||||
}
|
||||
|
||||
static struct spdk_io_channel *
|
||||
spdk_bdev_part_get_io_channel(void *_part)
|
||||
bdev_part_get_io_channel(void *_part)
|
||||
{
|
||||
struct spdk_bdev_part *part = _part;
|
||||
|
||||
@ -217,8 +217,8 @@ spdk_bdev_part_get_offset_blocks(struct spdk_bdev_part *part)
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_bdev_part_remap_dif(struct spdk_bdev_io *bdev_io, uint32_t offset,
|
||||
uint32_t remapped_offset)
|
||||
bdev_part_remap_dif(struct spdk_bdev_io *bdev_io, uint32_t offset,
|
||||
uint32_t remapped_offset)
|
||||
{
|
||||
struct spdk_bdev *bdev = bdev_io->bdev;
|
||||
struct spdk_dif_ctx dif_ctx;
|
||||
@ -261,7 +261,7 @@ spdk_bdev_part_remap_dif(struct spdk_bdev_io *bdev_io, uint32_t offset,
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_bdev_part_complete_read_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
||||
bdev_part_complete_read_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
||||
{
|
||||
struct spdk_bdev_io *part_io = cb_arg;
|
||||
uint32_t offset, remapped_offset;
|
||||
@ -271,7 +271,7 @@ spdk_bdev_part_complete_read_io(struct spdk_bdev_io *bdev_io, bool success, void
|
||||
remapped_offset = part_io->u.bdev.offset_blocks;
|
||||
|
||||
if (success) {
|
||||
rc = spdk_bdev_part_remap_dif(bdev_io, offset, remapped_offset);
|
||||
rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset);
|
||||
if (rc != 0) {
|
||||
success = false;
|
||||
}
|
||||
@ -284,7 +284,7 @@ spdk_bdev_part_complete_read_io(struct spdk_bdev_io *bdev_io, bool success, void
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_bdev_part_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
||||
bdev_part_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
||||
{
|
||||
struct spdk_bdev_io *part_io = cb_arg;
|
||||
int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
|
||||
@ -294,7 +294,7 @@ spdk_bdev_part_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_bdev_part_complete_zcopy_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
||||
bdev_part_complete_zcopy_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
|
||||
{
|
||||
struct spdk_bdev_io *part_io = cb_arg;
|
||||
int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
|
||||
@ -323,18 +323,18 @@ spdk_bdev_part_submit_request(struct spdk_bdev_part_channel *ch, struct spdk_bde
|
||||
rc = spdk_bdev_readv_blocks(base_desc, base_ch, bdev_io->u.bdev.iovs,
|
||||
bdev_io->u.bdev.iovcnt, remapped_offset,
|
||||
bdev_io->u.bdev.num_blocks,
|
||||
spdk_bdev_part_complete_read_io, bdev_io);
|
||||
bdev_part_complete_read_io, bdev_io);
|
||||
} else {
|
||||
rc = spdk_bdev_readv_blocks_with_md(base_desc, base_ch,
|
||||
bdev_io->u.bdev.iovs,
|
||||
bdev_io->u.bdev.iovcnt,
|
||||
bdev_io->u.bdev.md_buf, remapped_offset,
|
||||
bdev_io->u.bdev.num_blocks,
|
||||
spdk_bdev_part_complete_read_io, bdev_io);
|
||||
bdev_part_complete_read_io, bdev_io);
|
||||
}
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_WRITE:
|
||||
rc = spdk_bdev_part_remap_dif(bdev_io, offset, remapped_offset);
|
||||
rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset);
|
||||
if (rc != 0) {
|
||||
return SPDK_BDEV_IO_STATUS_FAILED;
|
||||
}
|
||||
@ -343,39 +343,39 @@ spdk_bdev_part_submit_request(struct spdk_bdev_part_channel *ch, struct spdk_bde
|
||||
rc = spdk_bdev_writev_blocks(base_desc, base_ch, bdev_io->u.bdev.iovs,
|
||||
bdev_io->u.bdev.iovcnt, remapped_offset,
|
||||
bdev_io->u.bdev.num_blocks,
|
||||
spdk_bdev_part_complete_io, bdev_io);
|
||||
bdev_part_complete_io, bdev_io);
|
||||
} else {
|
||||
rc = spdk_bdev_writev_blocks_with_md(base_desc, base_ch,
|
||||
bdev_io->u.bdev.iovs,
|
||||
bdev_io->u.bdev.iovcnt,
|
||||
bdev_io->u.bdev.md_buf, remapped_offset,
|
||||
bdev_io->u.bdev.num_blocks,
|
||||
spdk_bdev_part_complete_io, bdev_io);
|
||||
bdev_part_complete_io, bdev_io);
|
||||
}
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
|
||||
rc = spdk_bdev_write_zeroes_blocks(base_desc, base_ch, remapped_offset,
|
||||
bdev_io->u.bdev.num_blocks, spdk_bdev_part_complete_io,
|
||||
bdev_io->u.bdev.num_blocks, bdev_part_complete_io,
|
||||
bdev_io);
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_UNMAP:
|
||||
rc = spdk_bdev_unmap_blocks(base_desc, base_ch, remapped_offset,
|
||||
bdev_io->u.bdev.num_blocks, spdk_bdev_part_complete_io,
|
||||
bdev_io->u.bdev.num_blocks, bdev_part_complete_io,
|
||||
bdev_io);
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_FLUSH:
|
||||
rc = spdk_bdev_flush_blocks(base_desc, base_ch, remapped_offset,
|
||||
bdev_io->u.bdev.num_blocks, spdk_bdev_part_complete_io,
|
||||
bdev_io->u.bdev.num_blocks, bdev_part_complete_io,
|
||||
bdev_io);
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_RESET:
|
||||
rc = spdk_bdev_reset(base_desc, base_ch,
|
||||
spdk_bdev_part_complete_io, bdev_io);
|
||||
bdev_part_complete_io, bdev_io);
|
||||
break;
|
||||
case SPDK_BDEV_IO_TYPE_ZCOPY:
|
||||
rc = spdk_bdev_zcopy_start(base_desc, base_ch, remapped_offset,
|
||||
bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.zcopy.populate,
|
||||
spdk_bdev_part_complete_zcopy_io, bdev_io);
|
||||
bdev_part_complete_zcopy_io, bdev_io);
|
||||
break;
|
||||
default:
|
||||
SPDK_ERRLOG("unknown I/O type %d\n", bdev_io->type);
|
||||
@ -386,7 +386,7 @@ spdk_bdev_part_submit_request(struct spdk_bdev_part_channel *ch, struct spdk_bde
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_bdev_part_channel_create_cb(void *io_device, void *ctx_buf)
|
||||
bdev_part_channel_create_cb(void *io_device, void *ctx_buf)
|
||||
{
|
||||
struct spdk_bdev_part *part = (struct spdk_bdev_part *)io_device;
|
||||
struct spdk_bdev_part_channel *ch = ctx_buf;
|
||||
@ -405,7 +405,7 @@ spdk_bdev_part_channel_create_cb(void *io_device, void *ctx_buf)
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_bdev_part_channel_destroy_cb(void *io_device, void *ctx_buf)
|
||||
bdev_part_channel_destroy_cb(void *io_device, void *ctx_buf)
|
||||
{
|
||||
struct spdk_bdev_part *part = (struct spdk_bdev_part *)io_device;
|
||||
struct spdk_bdev_part_channel *ch = ctx_buf;
|
||||
@ -432,8 +432,8 @@ struct spdk_bdev_part_base *
|
||||
SPDK_ERRLOG("Memory allocation failure\n");
|
||||
return NULL;
|
||||
}
|
||||
fn_table->get_io_channel = spdk_bdev_part_get_io_channel;
|
||||
fn_table->io_type_supported = spdk_bdev_part_io_type_supported;
|
||||
fn_table->get_io_channel = bdev_part_get_io_channel;
|
||||
fn_table->io_type_supported = bdev_part_io_type_supported;
|
||||
|
||||
base->bdev = bdev;
|
||||
base->desc = NULL;
|
||||
@ -512,8 +512,8 @@ spdk_bdev_part_construct(struct spdk_bdev_part *part, struct spdk_bdev_part_base
|
||||
base->claimed = true;
|
||||
}
|
||||
|
||||
spdk_io_device_register(part, spdk_bdev_part_channel_create_cb,
|
||||
spdk_bdev_part_channel_destroy_cb,
|
||||
spdk_io_device_register(part, bdev_part_channel_create_cb,
|
||||
bdev_part_channel_destroy_cb,
|
||||
base->channel_size,
|
||||
name);
|
||||
|
||||
|
@ -50,8 +50,8 @@
|
||||
|
||||
#define BLOB_CRC32C_INITIAL 0xffffffffUL
|
||||
|
||||
static int spdk_bs_register_md_thread(struct spdk_blob_store *bs);
|
||||
static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs);
|
||||
static int bs_register_md_thread(struct spdk_blob_store *bs);
|
||||
static int bs_unregister_md_thread(struct spdk_blob_store *bs);
|
||||
static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
|
||||
static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
|
||||
uint64_t cluster, uint32_t extent, spdk_blob_op_complete cb_fn, void *cb_arg);
|
||||
@ -1537,8 +1537,8 @@ struct spdk_blob_persist_ctx {
|
||||
};
|
||||
|
||||
static void
|
||||
spdk_bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba,
|
||||
uint32_t lba_count)
|
||||
bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba,
|
||||
uint32_t lba_count)
|
||||
{
|
||||
switch (ctx->blob->clear_method) {
|
||||
case BLOB_CLEAR_WITH_DEFAULT:
|
||||
@ -1671,7 +1671,7 @@ _spdk_blob_persist_clear_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bse
|
||||
|
||||
/* If a run of LBAs previously existing, clear them now */
|
||||
if (lba_count > 0) {
|
||||
spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count);
|
||||
bs_batch_clear_dev(ctx, batch, lba, lba_count);
|
||||
}
|
||||
|
||||
/* Start building the next batch */
|
||||
@ -1685,7 +1685,7 @@ _spdk_blob_persist_clear_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bse
|
||||
|
||||
/* If we ended with a contiguous set of LBAs, clear them now */
|
||||
if (lba_count > 0) {
|
||||
spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count);
|
||||
bs_batch_clear_dev(ctx, batch, lba, lba_count);
|
||||
}
|
||||
|
||||
bs_batch_close(batch);
|
||||
@ -3055,7 +3055,7 @@ _spdk_bs_free(struct spdk_blob_store *bs)
|
||||
{
|
||||
_spdk_bs_blob_list_free(bs);
|
||||
|
||||
spdk_bs_unregister_md_thread(bs);
|
||||
bs_unregister_md_thread(bs);
|
||||
spdk_io_device_unregister(bs, _spdk_bs_dev_destroy);
|
||||
}
|
||||
|
||||
@ -3145,7 +3145,7 @@ _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_b
|
||||
|
||||
spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy,
|
||||
sizeof(struct spdk_bs_channel), "blobstore");
|
||||
rc = spdk_bs_register_md_thread(bs);
|
||||
rc = bs_register_md_thread(bs);
|
||||
if (rc == -1) {
|
||||
spdk_io_device_unregister(bs, NULL);
|
||||
pthread_mutex_destroy(&bs->used_clusters_mutex);
|
||||
@ -5100,7 +5100,7 @@ spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_bs_register_md_thread(struct spdk_blob_store *bs)
|
||||
bs_register_md_thread(struct spdk_blob_store *bs)
|
||||
{
|
||||
bs->md_channel = spdk_get_io_channel(bs);
|
||||
if (!bs->md_channel) {
|
||||
@ -5112,7 +5112,7 @@ spdk_bs_register_md_thread(struct spdk_blob_store *bs)
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_bs_unregister_md_thread(struct spdk_blob_store *bs)
|
||||
bs_unregister_md_thread(struct spdk_blob_store *bs)
|
||||
{
|
||||
spdk_put_io_channel(bs->md_channel);
|
||||
|
||||
|
@ -80,7 +80,7 @@ bs_call_cpl(struct spdk_bs_cpl *cpl, int bserrno)
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_bs_request_set_complete(struct spdk_bs_request_set *set)
|
||||
bs_request_set_complete(struct spdk_bs_request_set *set)
|
||||
{
|
||||
struct spdk_bs_cpl cpl = set->cpl;
|
||||
int bserrno = set->bserrno;
|
||||
@ -91,7 +91,7 @@ spdk_bs_request_set_complete(struct spdk_bs_request_set *set)
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_bs_sequence_completion(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
|
||||
bs_sequence_completion(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
|
||||
{
|
||||
struct spdk_bs_request_set *set = cb_arg;
|
||||
|
||||
@ -118,7 +118,7 @@ bs_sequence_start(struct spdk_io_channel *_channel,
|
||||
set->bserrno = 0;
|
||||
set->channel = channel;
|
||||
|
||||
set->cb_args.cb_fn = spdk_bs_sequence_completion;
|
||||
set->cb_args.cb_fn = bs_sequence_completion;
|
||||
set->cb_args.cb_arg = set;
|
||||
set->cb_args.channel = channel->dev_channel;
|
||||
|
||||
@ -253,7 +253,7 @@ bs_sequence_finish(spdk_bs_sequence_t *seq, int bserrno)
|
||||
if (bserrno != 0) {
|
||||
seq->bserrno = bserrno;
|
||||
}
|
||||
spdk_bs_request_set_complete((struct spdk_bs_request_set *)seq);
|
||||
bs_request_set_complete((struct spdk_bs_request_set *)seq);
|
||||
}
|
||||
|
||||
void
|
||||
@ -265,8 +265,8 @@ bs_user_op_sequence_finish(void *cb_arg, int bserrno)
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_bs_batch_completion(struct spdk_io_channel *_channel,
|
||||
void *cb_arg, int bserrno)
|
||||
bs_batch_completion(struct spdk_io_channel *_channel,
|
||||
void *cb_arg, int bserrno)
|
||||
{
|
||||
struct spdk_bs_request_set *set = cb_arg;
|
||||
|
||||
@ -277,10 +277,10 @@ spdk_bs_batch_completion(struct spdk_io_channel *_channel,
|
||||
|
||||
if (set->u.batch.outstanding_ops == 0 && set->u.batch.batch_closed) {
|
||||
if (set->u.batch.cb_fn) {
|
||||
set->cb_args.cb_fn = spdk_bs_sequence_completion;
|
||||
set->cb_args.cb_fn = bs_sequence_completion;
|
||||
set->u.batch.cb_fn((spdk_bs_sequence_t *)set, set->u.batch.cb_arg, bserrno);
|
||||
} else {
|
||||
spdk_bs_request_set_complete(set);
|
||||
bs_request_set_complete(set);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -309,7 +309,7 @@ bs_batch_open(struct spdk_io_channel *_channel,
|
||||
set->u.batch.outstanding_ops = 0;
|
||||
set->u.batch.batch_closed = 0;
|
||||
|
||||
set->cb_args.cb_fn = spdk_bs_batch_completion;
|
||||
set->cb_args.cb_fn = bs_batch_completion;
|
||||
set->cb_args.cb_arg = set;
|
||||
set->cb_args.channel = channel->dev_channel;
|
||||
|
||||
@ -396,10 +396,10 @@ bs_batch_close(spdk_bs_batch_t *batch)
|
||||
|
||||
if (set->u.batch.outstanding_ops == 0) {
|
||||
if (set->u.batch.cb_fn) {
|
||||
set->cb_args.cb_fn = spdk_bs_sequence_completion;
|
||||
set->cb_args.cb_fn = bs_sequence_completion;
|
||||
set->u.batch.cb_fn((spdk_bs_sequence_t *)set, set->u.batch.cb_arg, set->bserrno);
|
||||
} else {
|
||||
spdk_bs_request_set_complete(set);
|
||||
bs_request_set_complete(set);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -414,7 +414,7 @@ bs_sequence_to_batch(spdk_bs_sequence_t *seq, spdk_bs_sequence_cpl cb_fn, void *
|
||||
set->u.batch.outstanding_ops = 0;
|
||||
set->u.batch.batch_closed = 0;
|
||||
|
||||
set->cb_args.cb_fn = spdk_bs_batch_completion;
|
||||
set->cb_args.cb_fn = bs_batch_completion;
|
||||
|
||||
return set;
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ _sprintf_alloc(const char *format, ...)
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_env_unlink_shared_files(void)
|
||||
env_unlink_shared_files(void)
|
||||
{
|
||||
/* Starting with DPDK 18.05, there are more files with unpredictable paths
|
||||
* and filenames. The --no-shconf option prevents from creating them, but
|
||||
@ -139,7 +139,7 @@ spdk_env_opts_init(struct spdk_env_opts *opts)
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_free_args(char **args, int argcount)
|
||||
free_args(char **args, int argcount)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -153,20 +153,20 @@ spdk_free_args(char **args, int argcount)
|
||||
}
|
||||
|
||||
static char **
|
||||
spdk_push_arg(char *args[], int *argcount, char *arg)
|
||||
push_arg(char *args[], int *argcount, char *arg)
|
||||
{
|
||||
char **tmp;
|
||||
|
||||
if (arg == NULL) {
|
||||
fprintf(stderr, "%s: NULL arg supplied\n", __func__);
|
||||
spdk_free_args(args, *argcount);
|
||||
free_args(args, *argcount);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tmp = realloc(args, sizeof(char *) * (*argcount + 1));
|
||||
if (tmp == NULL) {
|
||||
free(arg);
|
||||
spdk_free_args(args, *argcount);
|
||||
free_args(args, *argcount);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -184,7 +184,7 @@ spdk_push_arg(char *args[], int *argcount, char *arg)
|
||||
#define VTD_CAP_MGAW_MASK (0x3F << VTD_CAP_MGAW_SHIFT)
|
||||
|
||||
static int
|
||||
spdk_get_iommu_width(void)
|
||||
get_iommu_width(void)
|
||||
{
|
||||
DIR *dir;
|
||||
FILE *file;
|
||||
@ -246,7 +246,7 @@ spdk_get_iommu_width(void)
|
||||
#endif
|
||||
|
||||
static int
|
||||
spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
{
|
||||
int argcount = 0;
|
||||
char **args;
|
||||
@ -254,14 +254,14 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
args = NULL;
|
||||
|
||||
/* set the program name */
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("%s", opts->name));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("%s", opts->name));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* disable shared configuration files when in single process mode. This allows for cleaner shutdown */
|
||||
if (opts->shm_id < 0) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("%s", "--no-shconf"));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("%s", "--no-shconf"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -280,9 +280,9 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
l_arg[len - 1] = '\0';
|
||||
}
|
||||
}
|
||||
args = spdk_push_arg(args, &argcount, l_arg);
|
||||
args = push_arg(args, &argcount, l_arg);
|
||||
} else {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("-c %s", opts->core_mask));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("-c %s", opts->core_mask));
|
||||
}
|
||||
|
||||
if (args == NULL) {
|
||||
@ -291,7 +291,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
|
||||
/* set the memory channel number */
|
||||
if (opts->mem_channel > 0) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("-n %d", opts->mem_channel));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("-n %d", opts->mem_channel));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -299,7 +299,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
|
||||
/* set the memory size */
|
||||
if (opts->mem_size >= 0) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("-m %d", opts->mem_size));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("-m %d", opts->mem_size));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -307,8 +307,8 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
|
||||
/* set the master core */
|
||||
if (opts->master_core > 0) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--master-lcore=%d",
|
||||
opts->master_core));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--master-lcore=%d",
|
||||
opts->master_core));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -316,7 +316,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
|
||||
/* set no pci if enabled */
|
||||
if (opts->no_pci) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--no-pci"));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--no-pci"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -324,7 +324,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
|
||||
/* create just one hugetlbfs file */
|
||||
if (opts->hugepage_single_segments) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--single-file-segments"));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--single-file-segments"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -332,7 +332,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
|
||||
/* unlink hugepages after initialization */
|
||||
if (opts->unlink_hugepage) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--huge-unlink"));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--huge-unlink"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -340,7 +340,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
|
||||
/* use a specific hugetlbfs mount */
|
||||
if (opts->hugedir) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--huge-dir=%s", opts->hugedir));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--huge-dir=%s", opts->hugedir));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -349,7 +349,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
#if RTE_VERSION >= RTE_VERSION_NUM(18, 05, 0, 0) && RTE_VERSION < RTE_VERSION_NUM(18, 5, 1, 0)
|
||||
/* Dynamic memory management is buggy in DPDK 18.05.0. Don't use it. */
|
||||
if (!opts->env_context || strstr(opts->env_context, "--legacy-mem") == NULL) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--legacy-mem"));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--legacy-mem"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -364,9 +364,9 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
|
||||
for (i = 0; i < opts->num_pci_addr; i++) {
|
||||
spdk_pci_addr_fmt(bdf, 32, &pci_addr[i]);
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("%s=%s",
|
||||
(opts->pci_blacklist ? "--pci-blacklist" : "--pci-whitelist"),
|
||||
bdf));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("%s=%s",
|
||||
(opts->pci_blacklist ? "--pci-blacklist" : "--pci-whitelist"),
|
||||
bdf));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -378,7 +378,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
/* Lower default EAL loglevel to RTE_LOG_NOTICE - normal, but significant messages.
|
||||
* This can be overridden by specifying the same option in opts->env_context
|
||||
*/
|
||||
args = spdk_push_arg(args, &argcount, strdup("--log-level=lib.eal:6"));
|
||||
args = push_arg(args, &argcount, strdup("--log-level=lib.eal:6"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -386,7 +386,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
/* Lower default CRYPTO loglevel to RTE_LOG_ERR to avoid a ton of init msgs.
|
||||
* This can be overridden by specifying the same option in opts->env_context
|
||||
*/
|
||||
args = spdk_push_arg(args, &argcount, strdup("--log-level=lib.cryptodev:5"));
|
||||
args = push_arg(args, &argcount, strdup("--log-level=lib.cryptodev:5"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -396,14 +396,14 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
* of other DPDK libs, but none of which we make use right now. If necessary, this can
|
||||
* be overridden via opts->env_context.
|
||||
*/
|
||||
args = spdk_push_arg(args, &argcount, strdup("--log-level=user1:6"));
|
||||
args = push_arg(args, &argcount, strdup("--log-level=user1:6"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (opts->env_context) {
|
||||
args = spdk_push_arg(args, &argcount, strdup(opts->env_context));
|
||||
args = push_arg(args, &argcount, strdup(opts->env_context));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -415,7 +415,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
* but DPDK guesses it should be iova-mode=va. Add a check and force
|
||||
* iova-mode=pa here. */
|
||||
if (rte_vfio_noiommu_is_enabled()) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--iova-mode=pa"));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--iova-mode=pa"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -427,8 +427,8 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
* virtual machines) don't have an IOMMU capable of handling the full virtual
|
||||
* address space and DPDK doesn't currently catch that. Add a check in SPDK
|
||||
* and force iova-mode=pa here. */
|
||||
if (spdk_get_iommu_width() < SPDK_IOMMU_VA_REQUIRED_WIDTH) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--iova-mode=pa"));
|
||||
if (get_iommu_width() < SPDK_IOMMU_VA_REQUIRED_WIDTH) {
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--iova-mode=pa"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -436,7 +436,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
#elif defined(__PPC64__)
|
||||
/* On Linux + PowerPC, DPDK doesn't support VA mode at all. Unfortunately, it doesn't correctly
|
||||
* auto-detect at the moment, so we'll just force it here. */
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--iova-mode=pa"));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--iova-mode=pa"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -449,7 +449,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
*
|
||||
* Ref: https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
|
||||
*/
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--base-virtaddr=0x200000000000"));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--base-virtaddr=0x200000000000"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -461,7 +461,7 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
*/
|
||||
#if RTE_VERSION >= RTE_VERSION_NUM(19, 02, 0, 0)
|
||||
if (!opts->env_context || strstr(opts->env_context, "--legacy-mem") == NULL) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("%s", "--match-allocations"));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("%s", "--match-allocations"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -469,20 +469,20 @@ spdk_build_eal_cmdline(const struct spdk_env_opts *opts)
|
||||
#endif
|
||||
|
||||
if (opts->shm_id < 0) {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--file-prefix=spdk_pid%d",
|
||||
getpid()));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--file-prefix=spdk_pid%d",
|
||||
getpid()));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--file-prefix=spdk%d",
|
||||
opts->shm_id));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--file-prefix=spdk%d",
|
||||
opts->shm_id));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* set the process type */
|
||||
args = spdk_push_arg(args, &argcount, _sprintf_alloc("--proc-type=auto"));
|
||||
args = push_arg(args, &argcount, _sprintf_alloc("--proc-type=auto"));
|
||||
if (args == NULL) {
|
||||
return -1;
|
||||
}
|
||||
@ -521,7 +521,7 @@ spdk_env_dpdk_post_fini(void)
|
||||
{
|
||||
pci_fini();
|
||||
|
||||
spdk_free_args(g_eal_cmdline, g_eal_cmdline_argcount);
|
||||
free_args(g_eal_cmdline, g_eal_cmdline_argcount);
|
||||
}
|
||||
|
||||
int
|
||||
@ -534,7 +534,7 @@ spdk_env_init(const struct spdk_env_opts *opts)
|
||||
|
||||
g_external_init = false;
|
||||
|
||||
rc = spdk_build_eal_cmdline(opts);
|
||||
rc = build_eal_cmdline(opts);
|
||||
if (rc < 0) {
|
||||
fprintf(stderr, "Invalid arguments to initialize DPDK\n");
|
||||
return -EINVAL;
|
||||
@ -583,7 +583,7 @@ spdk_env_init(const struct spdk_env_opts *opts)
|
||||
* apps will need to open these files. These files are not created for
|
||||
* "single file segments".
|
||||
*/
|
||||
spdk_env_unlink_shared_files();
|
||||
env_unlink_shared_files();
|
||||
}
|
||||
|
||||
legacy_mem = false;
|
||||
|
@ -49,11 +49,11 @@
|
||||
#include "spdk/env_dpdk.h"
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
#define SPDK_VFIO_ENABLED 0
|
||||
#define VFIO_ENABLED 0
|
||||
#else
|
||||
#include <linux/version.h>
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
|
||||
#define SPDK_VFIO_ENABLED 1
|
||||
#define VFIO_ENABLED 1
|
||||
#include <linux/vfio.h>
|
||||
#include <rte_vfio.h>
|
||||
|
||||
@ -82,7 +82,7 @@ static struct vfio_cfg g_vfio = {
|
||||
};
|
||||
|
||||
#else
|
||||
#define SPDK_VFIO_ENABLED 0
|
||||
#define VFIO_ENABLED 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@ -154,7 +154,7 @@ static bool g_legacy_mem;
|
||||
* and call the new map's notify callback for each virtually contiguous region.
|
||||
*/
|
||||
static int
|
||||
spdk_mem_map_notify_walk(struct spdk_mem_map *map, enum spdk_mem_map_notify_action action)
|
||||
mem_map_notify_walk(struct spdk_mem_map *map, enum spdk_mem_map_notify_action action)
|
||||
{
|
||||
size_t idx_256tb;
|
||||
uint64_t idx_1gb;
|
||||
@ -304,7 +304,7 @@ spdk_mem_map_alloc(uint64_t default_translation, const struct spdk_mem_map_ops *
|
||||
|
||||
if (ops && ops->notify_cb) {
|
||||
pthread_mutex_lock(&g_spdk_mem_map_mutex);
|
||||
rc = spdk_mem_map_notify_walk(map, SPDK_MEM_MAP_NOTIFY_REGISTER);
|
||||
rc = mem_map_notify_walk(map, SPDK_MEM_MAP_NOTIFY_REGISTER);
|
||||
if (rc != 0) {
|
||||
pthread_mutex_unlock(&g_spdk_mem_map_mutex);
|
||||
DEBUG_PRINT("Initial mem_map notify failed\n");
|
||||
@ -337,7 +337,7 @@ spdk_mem_map_free(struct spdk_mem_map **pmap)
|
||||
|
||||
if (map->ops.notify_cb) {
|
||||
pthread_mutex_lock(&g_spdk_mem_map_mutex);
|
||||
spdk_mem_map_notify_walk(map, SPDK_MEM_MAP_NOTIFY_UNREGISTER);
|
||||
mem_map_notify_walk(map, SPDK_MEM_MAP_NOTIFY_UNREGISTER);
|
||||
TAILQ_REMOVE(&g_spdk_mem_maps, map, tailq);
|
||||
pthread_mutex_unlock(&g_spdk_mem_map_mutex);
|
||||
}
|
||||
@ -556,7 +556,7 @@ spdk_mem_reserve(void *vaddr, size_t len)
|
||||
}
|
||||
|
||||
static struct map_1gb *
|
||||
spdk_mem_map_get_map_1gb(struct spdk_mem_map *map, uint64_t vfn_2mb)
|
||||
mem_map_get_map_1gb(struct spdk_mem_map *map, uint64_t vfn_2mb)
|
||||
{
|
||||
struct map_1gb *map_1gb;
|
||||
uint64_t idx_256tb = MAP_256TB_IDX(vfn_2mb);
|
||||
@ -619,7 +619,7 @@ spdk_mem_map_set_translation(struct spdk_mem_map *map, uint64_t vaddr, uint64_t
|
||||
vfn_2mb = vaddr >> SHIFT_2MB;
|
||||
|
||||
while (size) {
|
||||
map_1gb = spdk_mem_map_get_map_1gb(map, vfn_2mb);
|
||||
map_1gb = mem_map_get_map_1gb(map, vfn_2mb);
|
||||
if (!map_1gb) {
|
||||
DEBUG_PRINT("could not get %p map\n", (void *)vaddr);
|
||||
return -ENOMEM;
|
||||
@ -792,7 +792,7 @@ mem_map_init(bool legacy_mem)
|
||||
bool
|
||||
spdk_iommu_is_enabled(void)
|
||||
{
|
||||
#if SPDK_VFIO_ENABLED
|
||||
#if VFIO_ENABLED
|
||||
return g_vfio.enabled && !g_vfio.noiommu_enabled;
|
||||
#else
|
||||
return false;
|
||||
@ -811,7 +811,7 @@ static TAILQ_HEAD(, spdk_vtophys_pci_device) g_vtophys_pci_devices =
|
||||
static struct spdk_mem_map *g_vtophys_map;
|
||||
static struct spdk_mem_map *g_phys_ref_map;
|
||||
|
||||
#if SPDK_VFIO_ENABLED
|
||||
#if VFIO_ENABLED
|
||||
static int
|
||||
vtophys_iommu_map_dma(uint64_t vaddr, uint64_t iova, uint64_t size)
|
||||
{
|
||||
@ -1032,9 +1032,9 @@ vtophys_get_paddr_pci(uint64_t vaddr)
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_vtophys_notify(void *cb_ctx, struct spdk_mem_map *map,
|
||||
enum spdk_mem_map_notify_action action,
|
||||
void *vaddr, size_t len)
|
||||
vtophys_notify(void *cb_ctx, struct spdk_mem_map *map,
|
||||
enum spdk_mem_map_notify_action action,
|
||||
void *vaddr, size_t len)
|
||||
{
|
||||
int rc = 0, pci_phys = 0;
|
||||
uint64_t paddr;
|
||||
@ -1057,7 +1057,7 @@ spdk_vtophys_notify(void *cb_ctx, struct spdk_mem_map *map,
|
||||
case SPDK_MEM_MAP_NOTIFY_REGISTER:
|
||||
if (paddr == SPDK_VTOPHYS_ERROR) {
|
||||
/* This is not an address that DPDK is managing. */
|
||||
#if SPDK_VFIO_ENABLED
|
||||
#if VFIO_ENABLED
|
||||
enum rte_iova_mode iova_mode;
|
||||
|
||||
#if RTE_VERSION >= RTE_VERSION_NUM(19, 11, 0, 0)
|
||||
@ -1119,7 +1119,7 @@ spdk_vtophys_notify(void *cb_ctx, struct spdk_mem_map *map,
|
||||
DEBUG_PRINT("invalid paddr 0x%" PRIx64 " - must be 2MB aligned\n", paddr);
|
||||
return -EINVAL;
|
||||
}
|
||||
#if SPDK_VFIO_ENABLED
|
||||
#if VFIO_ENABLED
|
||||
/* If the IOMMU is on, but DPDK is using iova-mode=pa, we want to register this memory
|
||||
* with the IOMMU using the physical address to match. */
|
||||
if (spdk_iommu_is_enabled()) {
|
||||
@ -1161,7 +1161,7 @@ spdk_vtophys_notify(void *cb_ctx, struct spdk_mem_map *map,
|
||||
|
||||
break;
|
||||
case SPDK_MEM_MAP_NOTIFY_UNREGISTER:
|
||||
#if SPDK_VFIO_ENABLED
|
||||
#if VFIO_ENABLED
|
||||
if (paddr == SPDK_VTOPHYS_ERROR) {
|
||||
/*
|
||||
* This is not an address that DPDK is managing. If vfio is enabled,
|
||||
@ -1246,10 +1246,10 @@ vtophys_check_contiguous_entries(uint64_t paddr1, uint64_t paddr2)
|
||||
return (paddr2 - paddr1 == VALUE_2MB);
|
||||
}
|
||||
|
||||
#if SPDK_VFIO_ENABLED
|
||||
#if VFIO_ENABLED
|
||||
|
||||
static bool
|
||||
spdk_vfio_enabled(void)
|
||||
vfio_enabled(void)
|
||||
{
|
||||
return rte_vfio_is_enabled("vfio_pci");
|
||||
}
|
||||
@ -1276,13 +1276,13 @@ has_iommu_groups(void)
|
||||
}
|
||||
|
||||
static bool
|
||||
spdk_vfio_noiommu_enabled(void)
|
||||
vfio_noiommu_enabled(void)
|
||||
{
|
||||
return rte_vfio_noiommu_is_enabled();
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_vtophys_iommu_init(void)
|
||||
vtophys_iommu_init(void)
|
||||
{
|
||||
char proc_fd_path[PATH_MAX + 1];
|
||||
char link_path[PATH_MAX + 1];
|
||||
@ -1290,11 +1290,11 @@ spdk_vtophys_iommu_init(void)
|
||||
DIR *dir;
|
||||
struct dirent *d;
|
||||
|
||||
if (!spdk_vfio_enabled()) {
|
||||
if (!vfio_enabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (spdk_vfio_noiommu_enabled()) {
|
||||
if (vfio_noiommu_enabled()) {
|
||||
g_vfio.noiommu_enabled = true;
|
||||
} else if (!has_iommu_groups()) {
|
||||
return;
|
||||
@ -1351,7 +1351,7 @@ vtophys_pci_device_added(struct rte_pci_device *pci_device)
|
||||
}
|
||||
pthread_mutex_unlock(&g_vtophys_pci_devices_mutex);
|
||||
|
||||
#if SPDK_VFIO_ENABLED
|
||||
#if VFIO_ENABLED
|
||||
struct spdk_vfio_dma_map *dma_map;
|
||||
int ret;
|
||||
|
||||
@ -1396,7 +1396,7 @@ vtophys_pci_device_removed(struct rte_pci_device *pci_device)
|
||||
}
|
||||
pthread_mutex_unlock(&g_vtophys_pci_devices_mutex);
|
||||
|
||||
#if SPDK_VFIO_ENABLED
|
||||
#if VFIO_ENABLED
|
||||
struct spdk_vfio_dma_map *dma_map;
|
||||
int ret;
|
||||
|
||||
@ -1433,7 +1433,7 @@ int
|
||||
vtophys_init(void)
|
||||
{
|
||||
const struct spdk_mem_map_ops vtophys_map_ops = {
|
||||
.notify_cb = spdk_vtophys_notify,
|
||||
.notify_cb = vtophys_notify,
|
||||
.are_contiguous = vtophys_check_contiguous_entries,
|
||||
};
|
||||
|
||||
@ -1442,8 +1442,8 @@ vtophys_init(void)
|
||||
.are_contiguous = NULL,
|
||||
};
|
||||
|
||||
#if SPDK_VFIO_ENABLED
|
||||
spdk_vtophys_iommu_init();
|
||||
#if VFIO_ENABLED
|
||||
vtophys_iommu_init();
|
||||
#endif
|
||||
|
||||
g_phys_ref_map = spdk_mem_map_alloc(0, &phys_ref_map_ops, NULL);
|
||||
|
@ -56,8 +56,8 @@ static TAILQ_HEAD(, spdk_pci_device) g_pci_hotplugged_devices =
|
||||
static TAILQ_HEAD(, spdk_pci_driver) g_pci_drivers = TAILQ_HEAD_INITIALIZER(g_pci_drivers);
|
||||
|
||||
static int
|
||||
spdk_map_bar_rte(struct spdk_pci_device *device, uint32_t bar,
|
||||
void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
|
||||
map_bar_rte(struct spdk_pci_device *device, uint32_t bar,
|
||||
void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
|
||||
{
|
||||
struct rte_pci_device *dev = device->dev_handle;
|
||||
|
||||
@ -69,13 +69,13 @@ spdk_map_bar_rte(struct spdk_pci_device *device, uint32_t bar,
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_unmap_bar_rte(struct spdk_pci_device *device, uint32_t bar, void *addr)
|
||||
unmap_bar_rte(struct spdk_pci_device *device, uint32_t bar, void *addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_cfg_read_rte(struct spdk_pci_device *dev, void *value, uint32_t len, uint32_t offset)
|
||||
cfg_read_rte(struct spdk_pci_device *dev, void *value, uint32_t len, uint32_t offset)
|
||||
{
|
||||
int rc;
|
||||
|
||||
@ -89,7 +89,7 @@ spdk_cfg_read_rte(struct spdk_pci_device *dev, void *value, uint32_t len, uint32
|
||||
}
|
||||
|
||||
static int
|
||||
spdk_cfg_write_rte(struct spdk_pci_device *dev, void *value, uint32_t len, uint32_t offset)
|
||||
cfg_write_rte(struct spdk_pci_device *dev, void *value, uint32_t len, uint32_t offset)
|
||||
{
|
||||
int rc;
|
||||
|
||||
@ -103,7 +103,7 @@ spdk_cfg_write_rte(struct spdk_pci_device *dev, void *value, uint32_t len, uint3
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_detach_rte_cb(void *_dev)
|
||||
detach_rte_cb(void *_dev)
|
||||
{
|
||||
struct rte_pci_device *rte_dev = _dev;
|
||||
|
||||
@ -121,7 +121,7 @@ spdk_detach_rte_cb(void *_dev)
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_detach_rte(struct spdk_pci_device *dev)
|
||||
detach_rte(struct spdk_pci_device *dev)
|
||||
{
|
||||
struct rte_pci_device *rte_dev = dev->dev_handle;
|
||||
int i;
|
||||
@ -132,11 +132,11 @@ spdk_detach_rte(struct spdk_pci_device *dev)
|
||||
*/
|
||||
dev->internal.pending_removal = true;
|
||||
if (!spdk_process_is_primary() || pthread_equal(g_dpdk_tid, pthread_self())) {
|
||||
spdk_detach_rte_cb(rte_dev);
|
||||
detach_rte_cb(rte_dev);
|
||||
return;
|
||||
}
|
||||
|
||||
rte_eal_alarm_set(1, spdk_detach_rte_cb, rte_dev);
|
||||
rte_eal_alarm_set(1, detach_rte_cb, rte_dev);
|
||||
/* wait up to 2s for the cb to finish executing */
|
||||
for (i = 2000; i > 0; i--) {
|
||||
|
||||
@ -156,7 +156,7 @@ spdk_detach_rte(struct spdk_pci_device *dev)
|
||||
* cancel the alarm - if it started executing already, this
|
||||
* call will block and wait for it to finish.
|
||||
*/
|
||||
rte_eal_alarm_cancel(spdk_detach_rte_cb, rte_dev);
|
||||
rte_eal_alarm_cancel(detach_rte_cb, rte_dev);
|
||||
|
||||
/* the device could have been finally removed, so just check
|
||||
* it again.
|
||||
@ -180,15 +180,15 @@ pci_driver_register(struct spdk_pci_driver *driver)
|
||||
|
||||
#if RTE_VERSION >= RTE_VERSION_NUM(18, 5, 0, 0)
|
||||
static void
|
||||
spdk_pci_device_rte_hotremove_cb(void *dev)
|
||||
pci_device_rte_hotremove_cb(void *dev)
|
||||
{
|
||||
spdk_detach_rte((struct spdk_pci_device *)dev);
|
||||
detach_rte((struct spdk_pci_device *)dev);
|
||||
}
|
||||
|
||||
static void
|
||||
spdk_pci_device_rte_hotremove(const char *device_name,
|
||||
enum rte_dev_event_type event,
|
||||
void *cb_arg)
|
||||
pci_device_rte_hotremove(const char *device_name,
|
||||
enum rte_dev_event_type event,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct spdk_pci_device *dev;
|
||||
bool can_detach = false;
|
||||
@ -221,7 +221,7 @@ spdk_pci_device_rte_hotremove(const char *device_name,
|
||||
* moved into the eal in the future, the deferred removal could
|
||||
* be deleted.
|
||||
*/
|
||||
rte_eal_alarm_set(1, spdk_pci_device_rte_hotremove_cb, dev);
|
||||
rte_eal_alarm_set(1, pci_device_rte_hotremove_cb, dev);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -287,7 +287,7 @@ pci_init(void)
|
||||
#if RTE_VERSION >= RTE_VERSION_NUM(18, 5, 0, 0)
|
||||
/* Register a single hotremove callback for all devices. */
|
||||
if (spdk_process_is_primary()) {
|
||||
rte_dev_event_callback_register(NULL, spdk_pci_device_rte_hotremove, NULL);
|
||||
rte_dev_event_callback_register(NULL, pci_device_rte_hotremove, NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -313,7 +313,7 @@ pci_fini(void)
|
||||
|
||||
#if RTE_VERSION >= RTE_VERSION_NUM(18, 5, 0, 0)
|
||||
if (spdk_process_is_primary()) {
|
||||
rte_dev_event_callback_unregister(NULL, spdk_pci_device_rte_hotremove, NULL);
|
||||
rte_dev_event_callback_unregister(NULL, pci_device_rte_hotremove, NULL);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -353,11 +353,11 @@ pci_device_init(struct rte_pci_driver *_drv,
|
||||
dev->socket_id = _dev->device.numa_node;
|
||||
dev->type = "pci";
|
||||
|
||||
dev->map_bar = spdk_map_bar_rte;
|
||||
dev->unmap_bar = spdk_unmap_bar_rte;
|
||||
dev->cfg_read = spdk_cfg_read_rte;
|
||||
dev->cfg_write = spdk_cfg_write_rte;
|
||||
dev->detach = spdk_detach_rte;
|
||||
dev->map_bar = map_bar_rte;
|
||||
dev->unmap_bar = unmap_bar_rte;
|
||||
dev->cfg_read = cfg_read_rte;
|
||||
dev->cfg_write = cfg_write_rte;
|
||||
dev->detach = detach_rte;
|
||||
|
||||
dev->internal.driver = driver;
|
||||
dev->internal.claim_fd = -1;
|
||||
|
Loading…
Reference in New Issue
Block a user