misc: Fix spelling mistakes
Found with misspell-fixer. Signed-off-by: Michal Berger <michal.berger@intel.com> Change-Id: If062df0189d92e4fb2da3f055fb981909780dc04 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15207 Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com> Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
acd2cc94e7
commit
3f912cf0e9
@ -2075,7 +2075,7 @@ A new `spdk_bdev_open_ext` function has been added and `spdk_bdev_open` function
|
|||||||
The new open function introduces requirement to provide callback function that will be called by
|
The new open function introduces requirement to provide callback function that will be called by
|
||||||
asynchronous event such as bdev removal. `spdk_bdev_open_ext` function takes bdev name as
|
asynchronous event such as bdev removal. `spdk_bdev_open_ext` function takes bdev name as
|
||||||
an argument instead of bdev structure to avoid a race condition that can happen when the bdev
|
an argument instead of bdev structure to avoid a race condition that can happen when the bdev
|
||||||
is being removed between a call to get its structure based on a name and actually openning it.
|
is being removed between a call to get its structure based on a name and actually opening it.
|
||||||
|
|
||||||
New 'resize' event has been added to notify about change of block count property of block device.
|
New 'resize' event has been added to notify about change of block count property of block device.
|
||||||
Event is delivered only if block device was opened with `spdk_bdev_open_ext` function.
|
Event is delivered only if block device was opened with `spdk_bdev_open_ext` function.
|
||||||
@ -3001,7 +3001,7 @@ Net framework initialization and finish is now done asynchronously.
|
|||||||
|
|
||||||
Added `spdk_rpc_is_method_allowed` function for checking whether method is permitted in a given state.
|
Added `spdk_rpc_is_method_allowed` function for checking whether method is permitted in a given state.
|
||||||
Added `spdk_rpc_get_state` to check current state of RPC server.
|
Added `spdk_rpc_get_state` to check current state of RPC server.
|
||||||
RPC `wait_subsystem_init` has been added to allow clients to block untill all subsystems are initialized.
|
RPC `wait_subsystem_init` has been added to allow clients to block until all subsystems are initialized.
|
||||||
|
|
||||||
### json rpc
|
### json rpc
|
||||||
|
|
||||||
@ -3299,7 +3299,7 @@ to link only the minimal set of components required.
|
|||||||
### git pre-commit and pre-push hooks
|
### git pre-commit and pre-push hooks
|
||||||
|
|
||||||
The pre-commit hook will run `scripts/check_format.sh` and verify there are no
|
The pre-commit hook will run `scripts/check_format.sh` and verify there are no
|
||||||
formating errors before allowing `git commit` to run. The pre-push hook runs
|
formatting errors before allowing `git commit` to run. The pre-push hook runs
|
||||||
`make CONFIG_WERROR=y` with and without `CONFIG_DEBUG=y` using both the gcc and
|
`make CONFIG_WERROR=y` with and without `CONFIG_DEBUG=y` using both the gcc and
|
||||||
clang compiler before allowing `git push` to run. Following each DEBUG build
|
clang compiler before allowing `git push` to run. Following each DEBUG build
|
||||||
`test/unit/unittest.sh` is run and verified. Results are recorded in the
|
`test/unit/unittest.sh` is run and verified. Results are recorded in the
|
||||||
|
2
CONFIG
2
CONFIG
@ -146,7 +146,7 @@ CONFIG_CRYPTO=n
|
|||||||
# Build spdk shared libraries in addition to the static ones.
|
# Build spdk shared libraries in addition to the static ones.
|
||||||
CONFIG_SHARED=n
|
CONFIG_SHARED=n
|
||||||
|
|
||||||
# Build with VTune suport.
|
# Build with VTune support.
|
||||||
CONFIG_VTUNE=n
|
CONFIG_VTUNE=n
|
||||||
CONFIG_VTUNE_DIR=
|
CONFIG_VTUNE_DIR=
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ Design software to deal with the inability to get a channel.
|
|||||||
There are several dependencies to leverage the Linux idxd driver for driving DSA devices.
|
There are several dependencies to leverage the Linux idxd driver for driving DSA devices.
|
||||||
|
|
||||||
1 Linux kernel support: You need to have a Linux kernel with the `idxd` driver
|
1 Linux kernel support: You need to have a Linux kernel with the `idxd` driver
|
||||||
loaded. Futher, add the following command line options to the kernel boot
|
loaded. Further, add the following command line options to the kernel boot
|
||||||
commands:
|
commands:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -170,5 +170,5 @@ The following RPCs would accomplish the copy override:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
To detemine the name of available modules and their supported operations use the
|
To determine the name of available modules and their supported operations use the
|
||||||
RPC `accel_get_module_info`.
|
RPC `accel_get_module_info`.
|
||||||
|
@ -145,7 +145,7 @@ well as on the base devices. The following types of metadata are persisted:
|
|||||||
|
|
||||||
After power failure, FTL needs to rebuild the whole L2P using the address maps (`P2L`) stored within each band/chunk.
|
After power failure, FTL needs to rebuild the whole L2P using the address maps (`P2L`) stored within each band/chunk.
|
||||||
This needs to done, because while individual L2P pages may have been paged out and persisted to the cache device,
|
This needs to done, because while individual L2P pages may have been paged out and persisted to the cache device,
|
||||||
there's no way to tell which, if any, pages were dirty before the power failure occured. The P2L consists of not only
|
there's no way to tell which, if any, pages were dirty before the power failure occurred. The P2L consists of not only
|
||||||
the mapping itself, but also a sequence id (`seq_id`), which describes the relative age of a given logical block
|
the mapping itself, but also a sequence id (`seq_id`), which describes the relative age of a given logical block
|
||||||
(multiple writes to the same logical block would produce the same amount of P2L entries, only the last one having the current data).
|
(multiple writes to the same logical block would produce the same amount of P2L entries, only the last one having the current data).
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ iterate the spdk lists and build iterable representations of the list objects.
|
|||||||
This will result in errors if these are not available which is very possible if
|
This will result in errors if these are not available which is very possible if
|
||||||
gdb is used for reasons other than debugging spdk core dumps.
|
gdb is used for reasons other than debugging spdk core dumps.
|
||||||
|
|
||||||
In the example bellow, I attempted to load the macros when the globals are not
|
In the example below, I attempted to load the macros when the globals are not
|
||||||
available causing gdb to fail loading the gdb_macros:
|
available causing gdb to fail loading the gdb_macros:
|
||||||
|
|
||||||
~~~{.sh}
|
~~~{.sh}
|
||||||
|
@ -4343,7 +4343,7 @@ This method is available only if SPDK was build with Ceph RBD support.
|
|||||||
|
|
||||||
Name | Optional | Type | Description
|
Name | Optional | Type | Description
|
||||||
----------------------- | -------- | ----------- | -----------
|
----------------------- | -------- | ----------- | -----------
|
||||||
name | Required | string | Registerd Rados cluster object name
|
name | Required | string | Registered Rados cluster object name
|
||||||
user_id | Optional | string | Ceph ID (i.e. admin, not client.admin)
|
user_id | Optional | string | Ceph ID (i.e. admin, not client.admin)
|
||||||
config_param | Optional | string map | Explicit librados configuration
|
config_param | Optional | string map | Explicit librados configuration
|
||||||
config_file | Optional | string | File path of libraodos configuration file
|
config_file | Optional | string | File path of libraodos configuration file
|
||||||
@ -7374,7 +7374,7 @@ io_unit_size | Optional | number | I/O unit size (bytes)
|
|||||||
max_aq_depth | Optional | number | Max number of admin cmds per AQ
|
max_aq_depth | Optional | number | Max number of admin cmds per AQ
|
||||||
num_shared_buffers | Optional | number | The number of pooled data buffers available to the transport
|
num_shared_buffers | Optional | number | The number of pooled data buffers available to the transport
|
||||||
buf_cache_size | Optional | number | The number of shared buffers to reserve for each poll group
|
buf_cache_size | Optional | number | The number of shared buffers to reserve for each poll group
|
||||||
num_cqe | Optional | number | The number of CQ entires. Only used when no_srq=true (RDMA only)
|
num_cqe | Optional | number | The number of CQ entries. Only used when no_srq=true (RDMA only)
|
||||||
max_srq_depth | Optional | number | The number of elements in a per-thread shared receive queue (RDMA only)
|
max_srq_depth | Optional | number | The number of elements in a per-thread shared receive queue (RDMA only)
|
||||||
no_srq | Optional | boolean | Disable shared receive queue even for devices that support it. (RDMA only)
|
no_srq | Optional | boolean | Disable shared receive queue even for devices that support it. (RDMA only)
|
||||||
c2h_success | Optional | boolean | Disable C2H success optimization (TCP only)
|
c2h_success | Optional | boolean | Disable C2H success optimization (TCP only)
|
||||||
@ -8782,7 +8782,7 @@ ctrlr | string | Controller name
|
|||||||
cpumask | string | @ref cpu_mask of this controller
|
cpumask | string | @ref cpu_mask of this controller
|
||||||
delay_base_us | number | Base (minimum) coalescing time in microseconds (0 if disabled)
|
delay_base_us | number | Base (minimum) coalescing time in microseconds (0 if disabled)
|
||||||
iops_threshold | number | Coalescing activation level
|
iops_threshold | number | Coalescing activation level
|
||||||
backend_specific | object | Backend specific informations
|
backend_specific | object | Backend specific information
|
||||||
|
|
||||||
### Vhost block {#rpc_vhost_get_controllers_blk}
|
### Vhost block {#rpc_vhost_get_controllers_blk}
|
||||||
|
|
||||||
@ -10171,7 +10171,7 @@ Example response:
|
|||||||
|
|
||||||
### notify_get_notifications {#notify_get_notifications}
|
### notify_get_notifications {#notify_get_notifications}
|
||||||
|
|
||||||
Request notifications. Returns array of notifications that happend since the specified id (or first that is available).
|
Request notifications. Returns array of notifications that happened since the specified id (or first that is available).
|
||||||
|
|
||||||
Notice: Notifications are kept in circular buffer with limited size. Older notifications might be inaccessible
|
Notice: Notifications are kept in circular buffer with limited size. Older notifications might be inaccessible
|
||||||
due to being overwritten by new ones.
|
due to being overwritten by new ones.
|
||||||
@ -10679,7 +10679,7 @@ Example response:
|
|||||||
|
|
||||||
Send NVMe command directly to NVMe controller or namespace. Parameters and responses encoded by base64 urlsafe need further processing.
|
Send NVMe command directly to NVMe controller or namespace. Parameters and responses encoded by base64 urlsafe need further processing.
|
||||||
|
|
||||||
Notice: bdev_nvme_send_cmd requires user to guarentee the correctness of NVMe command itself, and also optional parameters.
|
Notice: bdev_nvme_send_cmd requires user to guarantee the correctness of NVMe command itself, and also optional parameters.
|
||||||
Illegal command contents or mismatching buffer size may result in unpredictable behavior.
|
Illegal command contents or mismatching buffer size may result in unpredictable behavior.
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
@ -11042,7 +11042,7 @@ uuid | Optional | string | UUID of new bdev
|
|||||||
oclass | Optional | string | DAOS object class (default SX)
|
oclass | Optional | string | DAOS object class (default SX)
|
||||||
|
|
||||||
To find more about various object classes please visit [DAOS documentation](https://github.com/daos-stack/daos/blob/master/src/object/README.md).
|
To find more about various object classes please visit [DAOS documentation](https://github.com/daos-stack/daos/blob/master/src/object/README.md).
|
||||||
Please note, that DAOS bdev module uses the same CLI flag notation as `dmg` and `daos` commmands,
|
Please note, that DAOS bdev module uses the same CLI flag notation as `dmg` and `daos` commands,
|
||||||
for instance, `SX` or `EC_4P2G2` rather than in DAOS header file `OC_SX` or `OC_EC_4P2G2`.
|
for instance, `SX` or `EC_4P2G2` rather than in DAOS header file `OC_SX` or `OC_EC_4P2G2`.
|
||||||
|
|
||||||
#### Result
|
#### Result
|
||||||
|
@ -116,7 +116,7 @@ Bringing machine 'default' up with 'virtualbox' provider...
|
|||||||
Use vagrant "destroy" followed by "rm -rf ubuntu18" to destroy all trace of vm.
|
Use vagrant "destroy" followed by "rm -rf ubuntu18" to destroy all trace of vm.
|
||||||
~~~
|
~~~
|
||||||
|
|
||||||
Check the enviroment.
|
Check the environment.
|
||||||
|
|
||||||
~~~{.sh}
|
~~~{.sh}
|
||||||
user@dev-system:~/spdk/scripts/vagrant$ cd ubuntu18
|
user@dev-system:~/spdk/scripts/vagrant$ cd ubuntu18
|
||||||
|
@ -75,7 +75,7 @@ docker-compose run traffic-generator-nvme
|
|||||||
docker-compose run traffic-generator-virtio
|
docker-compose run traffic-generator-virtio
|
||||||
~~~
|
~~~
|
||||||
|
|
||||||
Enviroment variables to containers can be passed as shown in
|
Environment variables to containers can be passed as shown in
|
||||||
[docs](https://docs.docker.com/compose/environment-variables/).
|
[docs](https://docs.docker.com/compose/environment-variables/).
|
||||||
For example extra arguments to fio can be passed as so:
|
For example extra arguments to fio can be passed as so:
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ hello_sock_usage(void)
|
|||||||
printf(" -T tls_ver TLS version, e.g., -T 12 or -T 13. If omitted, auto-negotiation will take place\n");
|
printf(" -T tls_ver TLS version, e.g., -T 12 or -T 13. If omitted, auto-negotiation will take place\n");
|
||||||
printf(" -k disable KTLS for the given sock implementation (default)\n");
|
printf(" -k disable KTLS for the given sock implementation (default)\n");
|
||||||
printf(" -K enable KTLS for the given sock implementation\n");
|
printf(" -K enable KTLS for the given sock implementation\n");
|
||||||
printf(" -V print out additional informations\n");
|
printf(" -V print out additional information\n");
|
||||||
printf(" -z disable zero copy send for the given sock implementation\n");
|
printf(" -z disable zero copy send for the given sock implementation\n");
|
||||||
printf(" -Z enable zero copy send for the given sock implementation\n");
|
printf(" -Z enable zero copy send for the given sock implementation\n");
|
||||||
}
|
}
|
||||||
|
@ -261,7 +261,7 @@ int spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_i
|
|||||||
spdk_accel_completion_cb cb_fn, void *cb_arg);
|
spdk_accel_completion_cb cb_fn, void *cb_arg);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the name of the module assigned to a specfic opcode.
|
* Return the name of the module assigned to a specific opcode.
|
||||||
*
|
*
|
||||||
* \param opcode Accel Framework Opcode enum value. Valid codes can be retrieved using
|
* \param opcode Accel Framework Opcode enum value. Valid codes can be retrieved using
|
||||||
* `accel_get_opc_assignments` or `spdk_accel_get_opc_name`.
|
* `accel_get_opc_assignments` or `spdk_accel_get_opc_name`.
|
||||||
|
@ -392,7 +392,7 @@ typedef int (*spdk_for_each_bdev_fn)(void *ctx, struct spdk_bdev *bdev);
|
|||||||
* \param ctx Context passed to the callback function.
|
* \param ctx Context passed to the callback function.
|
||||||
* \param fn Callback function for each block device.
|
* \param fn Callback function for each block device.
|
||||||
*
|
*
|
||||||
* \return 0 if operation is sucessful, or suitable errno value one of the
|
* \return 0 if operation is successful, or suitable errno value one of the
|
||||||
* callback returned otherwise.
|
* callback returned otherwise.
|
||||||
*/
|
*/
|
||||||
int spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn);
|
int spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn);
|
||||||
@ -405,7 +405,7 @@ int spdk_for_each_bdev(void *ctx, spdk_for_each_bdev_fn fn);
|
|||||||
* callback function for each unclaimed bdev internally.
|
* callback function for each unclaimed bdev internally.
|
||||||
*
|
*
|
||||||
* \param ctx Context passed to the callback function.
|
* \param ctx Context passed to the callback function.
|
||||||
* \param fn Callback funciton for each block device without virtual block devices on top.
|
* \param fn Callback function for each block device without virtual block devices on top.
|
||||||
*
|
*
|
||||||
* \return 0 if operation is successful, or suitable errno value one of the
|
* \return 0 if operation is successful, or suitable errno value one of the
|
||||||
* callback returned otherwise.
|
* callback returned otherwise.
|
||||||
|
@ -515,7 +515,7 @@ void spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size);
|
|||||||
* \param bs blobstore.
|
* \param bs blobstore.
|
||||||
* \param opts The structure which contains the option values for the new blob.
|
* \param opts The structure which contains the option values for the new blob.
|
||||||
* \param cb_fn Called when the operation is complete.
|
* \param cb_fn Called when the operation is complete.
|
||||||
* \param cb_arg Argument passed to funcion cb_fn.
|
* \param cb_arg Argument passed to function cb_fn.
|
||||||
*/
|
*/
|
||||||
void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
|
void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
|
||||||
spdk_blob_op_with_id_complete cb_fn, void *cb_arg);
|
spdk_blob_op_with_id_complete cb_fn, void *cb_arg);
|
||||||
|
@ -294,7 +294,7 @@ int spdk_dix_inject_error(struct iovec *iovs, int iovcnt, struct iovec *md_iov,
|
|||||||
* This function removes the necessity of data copy in the SPDK application
|
* This function removes the necessity of data copy in the SPDK application
|
||||||
* during DIF insertion and strip.
|
* during DIF insertion and strip.
|
||||||
*
|
*
|
||||||
* When the extended LBA payload is splitted into multiple data segments,
|
* When the extended LBA payload is split into multiple data segments,
|
||||||
* start of each data segment is passed through the DIF context. data_offset
|
* start of each data segment is passed through the DIF context. data_offset
|
||||||
* and data_len is within a data segment.
|
* and data_len is within a data segment.
|
||||||
*
|
*
|
||||||
@ -321,7 +321,7 @@ int spdk_dif_set_md_interleave_iovs(struct iovec *iovs, int iovcnt,
|
|||||||
/**
|
/**
|
||||||
* Generate and insert DIF into metadata space for newly read data block.
|
* Generate and insert DIF into metadata space for newly read data block.
|
||||||
*
|
*
|
||||||
* When the extended LBA payload is splitted into multiple data segments,
|
* When the extended LBA payload is split into multiple data segments,
|
||||||
* start of each data segment is passed through the DIF context. data_offset
|
* start of each data segment is passed through the DIF context. data_offset
|
||||||
* and data_len is within a data segment.
|
* and data_len is within a data segment.
|
||||||
*
|
*
|
||||||
|
@ -250,7 +250,7 @@ int virtio_dev_reset(struct virtio_dev *vdev, uint64_t req_features);
|
|||||||
* \param max_queues number of queues to allocate. The max number of
|
* \param max_queues number of queues to allocate. The max number of
|
||||||
* usable I/O queues is also limited by the host device. `vdev` will be
|
* usable I/O queues is also limited by the host device. `vdev` will be
|
||||||
* started successfully even if the host supports less queues than requested.
|
* started successfully even if the host supports less queues than requested.
|
||||||
* \param fixed_queue_num number of queues preceeding the first
|
* \param fixed_queue_num number of queues preceding the first
|
||||||
* request queue. For Virtio-SCSI this is equal to 2, as there are
|
* request queue. For Virtio-SCSI this is equal to 2, as there are
|
||||||
* additional event and control queues.
|
* additional event and control queues.
|
||||||
*/
|
*/
|
||||||
|
@ -593,7 +593,7 @@ spdk_accel_initialize(void)
|
|||||||
/* Create our priority global map of opcodes to modules, we populate starting
|
/* Create our priority global map of opcodes to modules, we populate starting
|
||||||
* with the software module (guaranteed to be first on the list) and then
|
* with the software module (guaranteed to be first on the list) and then
|
||||||
* updating opcodes with HW modules that have been initilaized.
|
* updating opcodes with HW modules that have been initilaized.
|
||||||
* NOTE: all opcodes must be suported by software in the event that no HW
|
* NOTE: all opcodes must be supported by software in the event that no HW
|
||||||
* modules are initilaized to support the operation.
|
* modules are initilaized to support the operation.
|
||||||
*/
|
*/
|
||||||
TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
|
TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
|
||||||
|
@ -6039,7 +6039,7 @@ _bdev_abort(struct spdk_bdev_io *parent_io)
|
|||||||
|
|
||||||
/* matched_ios is returned and will be kept by the caller.
|
/* matched_ios is returned and will be kept by the caller.
|
||||||
*
|
*
|
||||||
* This funcion will be used for two cases, 1) the same cb_arg is used for
|
* This function will be used for two cases, 1) the same cb_arg is used for
|
||||||
* multiple I/Os, 2) a single large I/O is split into smaller ones.
|
* multiple I/Os, 2) a single large I/O is split into smaller ones.
|
||||||
* Incrementing split_outstanding directly here may confuse readers especially
|
* Incrementing split_outstanding directly here may confuse readers especially
|
||||||
* for the 1st case.
|
* for the 1st case.
|
||||||
|
@ -3864,7 +3864,7 @@ bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
|
|||||||
struct spdk_blob_md_page) * 8));
|
struct spdk_blob_md_page) * 8));
|
||||||
/*
|
/*
|
||||||
* The length of the mask must be equal to or larger than the total number of clusters. It may be
|
* The length of the mask must be equal to or larger than the total number of clusters. It may be
|
||||||
* larger than the total nubmer of clusters due to a failure spdk_bs_grow.
|
* larger than the total number of clusters due to a failure spdk_bs_grow.
|
||||||
*/
|
*/
|
||||||
assert(ctx->mask->length >= ctx->bs->total_clusters);
|
assert(ctx->mask->length >= ctx->bs->total_clusters);
|
||||||
if (ctx->mask->length > ctx->bs->total_clusters) {
|
if (ctx->mask->length > ctx->bs->total_clusters) {
|
||||||
|
@ -566,7 +566,7 @@ event_queue_run_batch(void *arg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Execute the events. There are still some remaining events
|
/* Execute the events. There are still some remaining events
|
||||||
* that must occur on an SPDK thread. To accomodate those, try to
|
* that must occur on an SPDK thread. To accommodate those, try to
|
||||||
* run them on the first thread in the list, if it exists. */
|
* run them on the first thread in the list, if it exists. */
|
||||||
lw_thread = TAILQ_FIRST(&reactor->threads);
|
lw_thread = TAILQ_FIRST(&reactor->threads);
|
||||||
if (lw_thread) {
|
if (lw_thread) {
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
static int
|
static int
|
||||||
init_static(void)
|
init_static(void)
|
||||||
{
|
{
|
||||||
/* There is no scheduling perfomed by static scheduler,
|
/* There is no scheduling performed by static scheduler,
|
||||||
* do not set the scheduling period. */
|
* do not set the scheduling period. */
|
||||||
spdk_scheduler_set_period(0);
|
spdk_scheduler_set_period(0);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -468,7 +468,7 @@ spdk_ftl_unmap(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_chann
|
|||||||
uint64_t lba, uint64_t lba_cnt, spdk_ftl_fn cb_fn, void *cb_arg)
|
uint64_t lba, uint64_t lba_cnt, spdk_ftl_fn cb_fn, void *cb_arg)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
uint64_t aligment = dev->layout.l2p.lbas_in_page;
|
uint64_t alignment = dev->layout.l2p.lbas_in_page;
|
||||||
|
|
||||||
if (lba_cnt == 0) {
|
if (lba_cnt == 0) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -486,7 +486,7 @@ spdk_ftl_unmap(struct spdk_ftl_dev *dev, struct ftl_io *io, struct spdk_io_chann
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lba % aligment || lba_cnt % aligment) {
|
if (lba % alignment || lba_cnt % alignment) {
|
||||||
if (!io) {
|
if (!io) {
|
||||||
/* This is management/RPC path, its parameters must be aligned to 1MiB. */
|
/* This is management/RPC path, its parameters must be aligned to 1MiB. */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -665,7 +665,7 @@ ftl_process_io_queue(struct spdk_ftl_dev *dev)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Unmap operation requires generating a sequence id for itself, which it gets based on the open chunk
|
* Unmap operation requires generating a sequence id for itself, which it gets based on the open chunk
|
||||||
* in nv cache. If there are no open chunks (because we're in the middle of state transistion or compaction
|
* in nv cache. If there are no open chunks (because we're in the middle of state transition or compaction
|
||||||
* lagged behind), then we need to wait for the nv cache to resolve the situation - it's fine to just put the
|
* lagged behind), then we need to wait for the nv cache to resolve the situation - it's fine to just put the
|
||||||
* unmap and try again later.
|
* unmap and try again later.
|
||||||
*/
|
*/
|
||||||
|
@ -69,7 +69,7 @@ struct ftl_p2l_map_entry {
|
|||||||
* Mapping of physical (actual location on disk) to logical (user's POV) addresses. Used in two main scenarios:
|
* Mapping of physical (actual location on disk) to logical (user's POV) addresses. Used in two main scenarios:
|
||||||
* - during relocation FTL needs to pin L2P pages (this allows to check which pages to pin) and move still valid blocks
|
* - during relocation FTL needs to pin L2P pages (this allows to check which pages to pin) and move still valid blocks
|
||||||
* (valid map allows for preliminary elimination of invalid physical blocks, but user data could invalidate a location
|
* (valid map allows for preliminary elimination of invalid physical blocks, but user data could invalidate a location
|
||||||
* during read/write operation, so actual comparision against L2P needs to be done)
|
* during read/write operation, so actual comparison against L2P needs to be done)
|
||||||
* - After dirty shutdown the state of the L2P is unknown and needs to be rebuilt - it is done by applying all P2L, taking
|
* - After dirty shutdown the state of the L2P is unknown and needs to be rebuilt - it is done by applying all P2L, taking
|
||||||
* into account ordering of user writes
|
* into account ordering of user writes
|
||||||
*/
|
*/
|
||||||
|
@ -121,7 +121,7 @@ struct ftl_l2p_cache {
|
|||||||
struct ftl_mempool *page_sets_pool;
|
struct ftl_mempool *page_sets_pool;
|
||||||
TAILQ_HEAD(, ftl_l2p_page_set) deferred_page_set_list; /* for deferred page sets */
|
TAILQ_HEAD(, ftl_l2p_page_set) deferred_page_set_list; /* for deferred page sets */
|
||||||
|
|
||||||
/* Process unmap in backgorund */
|
/* Process unmap in background */
|
||||||
struct {
|
struct {
|
||||||
#define FTL_L2P_MAX_LAZY_UNMAP_QD 1
|
#define FTL_L2P_MAX_LAZY_UNMAP_QD 1
|
||||||
/* Unmap queue depth */
|
/* Unmap queue depth */
|
||||||
|
@ -38,7 +38,7 @@
|
|||||||
* zero if the number of free chunks is at the threshold, negative if below and positive if above.
|
* zero if the number of free chunks is at the threshold, negative if below and positive if above.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Interval in miliseconds between write throttle updates. */
|
/* Interval in milliseconds between write throttle updates. */
|
||||||
#define FTL_NV_CACHE_THROTTLE_INTERVAL_MS 20
|
#define FTL_NV_CACHE_THROTTLE_INTERVAL_MS 20
|
||||||
/* Throttle modifier proportional gain */
|
/* Throttle modifier proportional gain */
|
||||||
#define FTL_NV_CACHE_THROTTLE_MODIFIER_KP 20
|
#define FTL_NV_CACHE_THROTTLE_MODIFIER_KP 20
|
||||||
|
@ -30,7 +30,7 @@ typedef void (*ftl_mngt_fn)(struct spdk_ftl_dev *dev, struct ftl_mngt_process *m
|
|||||||
typedef void (*ftl_mngt_completion)(struct spdk_ftl_dev *dev, void *ctx, int status);
|
typedef void (*ftl_mngt_completion)(struct spdk_ftl_dev *dev, void *ctx, int status);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The FTL management step descriptior
|
* The FTL management step descriptor
|
||||||
*/
|
*/
|
||||||
struct ftl_mngt_step_desc {
|
struct ftl_mngt_step_desc {
|
||||||
/**
|
/**
|
||||||
|
@ -334,7 +334,7 @@ ftl_mngt_persist_md(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fast clean shutdown path - skips the persistance of most metadata regions and
|
* Fast clean shutdown path - skips the persistence of most metadata regions and
|
||||||
* relies on their shared memory state instead.
|
* relies on their shared memory state instead.
|
||||||
*/
|
*/
|
||||||
static const struct ftl_mngt_process_desc desc_fast_persist = {
|
static const struct ftl_mngt_process_desc desc_fast_persist = {
|
||||||
|
@ -742,7 +742,7 @@ ftl_mngt_complete_unmap(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
|
|||||||
|
|
||||||
assert(seq_id <= dev->sb->seq_id);
|
assert(seq_id <= dev->sb->seq_id);
|
||||||
|
|
||||||
FTL_NOTICELOG(dev, "Uncomplete unmap detected lba: %"PRIu64" num_blocks: %"PRIu64"\n",
|
FTL_NOTICELOG(dev, "Incomplete unmap detected lba: %"PRIu64" num_blocks: %"PRIu64"\n",
|
||||||
start_lba, num_blocks);
|
start_lba, num_blocks);
|
||||||
|
|
||||||
ftl_set_unmap_map(dev, start_lba, num_blocks, seq_id);
|
ftl_set_unmap_map(dev, start_lba, num_blocks, seq_id);
|
||||||
|
@ -331,7 +331,7 @@ void ftl_md_read_entry(struct ftl_md *md, uint64_t start_entry, void *buffer, vo
|
|||||||
void ftl_md_clear(struct ftl_md *md, int pattern, union ftl_md_vss *vss_pattern);
|
void ftl_md_clear(struct ftl_md *md, int pattern, union ftl_md_vss *vss_pattern);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Gets the number of blocks that are transfered in a single IO operation
|
* @brief Gets the number of blocks that are transferred in a single IO operation
|
||||||
*
|
*
|
||||||
* @param dev The FTL device
|
* @param dev The FTL device
|
||||||
*
|
*
|
||||||
|
@ -335,7 +335,7 @@ app_json_config_load_subsystem_config_entry(void *_ctx)
|
|||||||
SPDK_DEBUG_APP_CFG("Subsystem '%.*s': configuration done.\n", ctx->subsystem_name->len,
|
SPDK_DEBUG_APP_CFG("Subsystem '%.*s': configuration done.\n", ctx->subsystem_name->len,
|
||||||
(char *)ctx->subsystem_name->start);
|
(char *)ctx->subsystem_name->start);
|
||||||
ctx->subsystems_it = spdk_json_next(ctx->subsystems_it);
|
ctx->subsystems_it = spdk_json_next(ctx->subsystems_it);
|
||||||
/* Invoke later to avoid recurrency */
|
/* Invoke later to avoid recurrence */
|
||||||
spdk_thread_send_msg(ctx->thread, app_json_config_load_subsystem, ctx);
|
spdk_thread_send_msg(ctx->thread, app_json_config_load_subsystem, ctx);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -356,7 +356,7 @@ app_json_config_load_subsystem_config_entry(void *_ctx)
|
|||||||
cur_state_mask = spdk_rpc_get_state();
|
cur_state_mask = spdk_rpc_get_state();
|
||||||
if ((state_mask & cur_state_mask) != cur_state_mask) {
|
if ((state_mask & cur_state_mask) != cur_state_mask) {
|
||||||
SPDK_DEBUG_APP_CFG("Method '%s' not allowed -> skipping\n", cfg.method);
|
SPDK_DEBUG_APP_CFG("Method '%s' not allowed -> skipping\n", cfg.method);
|
||||||
/* Invoke later to avoid recurrency */
|
/* Invoke later to avoid recurrence */
|
||||||
ctx->config_it = spdk_json_next(ctx->config_it);
|
ctx->config_it = spdk_json_next(ctx->config_it);
|
||||||
spdk_thread_send_msg(ctx->thread, app_json_config_load_subsystem_config_entry, ctx);
|
spdk_thread_send_msg(ctx->thread, app_json_config_load_subsystem_config_entry, ctx);
|
||||||
goto out;
|
goto out;
|
||||||
@ -365,7 +365,7 @@ app_json_config_load_subsystem_config_entry(void *_ctx)
|
|||||||
/* Some methods are allowed to be run in both STARTUP and RUNTIME states.
|
/* Some methods are allowed to be run in both STARTUP and RUNTIME states.
|
||||||
* We should not call such methods twice, so ignore the second attempt in RUNTIME state */
|
* We should not call such methods twice, so ignore the second attempt in RUNTIME state */
|
||||||
SPDK_DEBUG_APP_CFG("Method '%s' has already been run in STARTUP state\n", cfg.method);
|
SPDK_DEBUG_APP_CFG("Method '%s' has already been run in STARTUP state\n", cfg.method);
|
||||||
/* Invoke later to avoid recurrency */
|
/* Invoke later to avoid recurrence */
|
||||||
ctx->config_it = spdk_json_next(ctx->config_it);
|
ctx->config_it = spdk_json_next(ctx->config_it);
|
||||||
spdk_thread_send_msg(ctx->thread, app_json_config_load_subsystem_config_entry, ctx);
|
spdk_thread_send_msg(ctx->thread, app_json_config_load_subsystem_config_entry, ctx);
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -3031,7 +3031,7 @@ iscsi_transfer_in(struct spdk_iscsi_conn *conn, struct spdk_iscsi_task *task)
|
|||||||
sequence_end = spdk_min(((i + 1) * conn->sess->MaxBurstLength),
|
sequence_end = spdk_min(((i + 1) * conn->sess->MaxBurstLength),
|
||||||
transfer_len);
|
transfer_len);
|
||||||
|
|
||||||
/* send data splitted by segment_len */
|
/* send data split by segment_len */
|
||||||
for (; offset < sequence_end; offset += segment_len) {
|
for (; offset < sequence_end; offset += segment_len) {
|
||||||
len = spdk_min(segment_len, (sequence_end - offset));
|
len = spdk_min(segment_len, (sequence_end - offset));
|
||||||
|
|
||||||
|
@ -792,7 +792,7 @@ opal_discovery0_end(struct spdk_opal_dev *dev, void *payload, uint32_t payload_s
|
|||||||
supported = true;
|
supported = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
SPDK_INFOLOG(opal, "Unknow feature code: %d\n", feat_code);
|
SPDK_INFOLOG(opal, "Unknown feature code: %d\n", feat_code);
|
||||||
}
|
}
|
||||||
cpos += feat_hdr->length + sizeof(*feat_hdr);
|
cpos += feat_hdr->length + sizeof(*feat_hdr);
|
||||||
}
|
}
|
||||||
|
@ -2003,7 +2003,7 @@ nvme_rdma_stale_conn_retry(struct nvme_rdma_qpair *rqpair)
|
|||||||
|
|
||||||
rqpair->stale_conn_retry_count++;
|
rqpair->stale_conn_retry_count++;
|
||||||
|
|
||||||
SPDK_NOTICELOG("%d times, retry stale connnection to qpair (cntlid:%u, qid:%u).\n",
|
SPDK_NOTICELOG("%d times, retry stale connection to qpair (cntlid:%u, qid:%u).\n",
|
||||||
rqpair->stale_conn_retry_count, qpair->ctrlr->cntlid, qpair->id);
|
rqpair->stale_conn_retry_count, qpair->ctrlr->cntlid, qpair->id);
|
||||||
|
|
||||||
_nvme_rdma_ctrlr_disconnect_qpair(qpair->ctrlr, qpair, nvme_rdma_stale_conn_disconnected);
|
_nvme_rdma_ctrlr_disconnect_qpair(qpair->ctrlr, qpair, nvme_rdma_stale_conn_disconnected);
|
||||||
|
@ -449,7 +449,7 @@ pdu_data_crc32_compute(struct nvme_tcp_pdu *pdu)
|
|||||||
/* Data Digest */
|
/* Data Digest */
|
||||||
if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] &&
|
if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] &&
|
||||||
tqpair->flags.host_ddgst_enable) {
|
tqpair->flags.host_ddgst_enable) {
|
||||||
/* Only suport this limited case for the first step */
|
/* Only support this limited case for the first step */
|
||||||
if ((nvme_qpair_get_state(&tqpair->qpair) >= NVME_QPAIR_CONNECTED) &&
|
if ((nvme_qpair_get_state(&tqpair->qpair) >= NVME_QPAIR_CONNECTED) &&
|
||||||
(tgroup != NULL && tgroup->group.group->accel_fn_table.submit_accel_crc32c) &&
|
(tgroup != NULL && tgroup->group.group->accel_fn_table.submit_accel_crc32c) &&
|
||||||
spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0))) {
|
spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0))) {
|
||||||
@ -1121,7 +1121,7 @@ nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
|
|||||||
/* But if the data digest is enabled, tcp_req cannot be NULL */
|
/* But if the data digest is enabled, tcp_req cannot be NULL */
|
||||||
assert(tcp_req != NULL);
|
assert(tcp_req != NULL);
|
||||||
tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
|
tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
|
||||||
/* Only suport this limitated case that the request has only one c2h pdu */
|
/* Only support this limitated case that the request has only one c2h pdu */
|
||||||
if ((nvme_qpair_get_state(&tqpair->qpair) >= NVME_QPAIR_CONNECTED) &&
|
if ((nvme_qpair_get_state(&tqpair->qpair) >= NVME_QPAIR_CONNECTED) &&
|
||||||
(tgroup != NULL && tgroup->group.group->accel_fn_table.submit_accel_crc32c) &&
|
(tgroup != NULL && tgroup->group.group->accel_fn_table.submit_accel_crc32c) &&
|
||||||
spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0)
|
spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0)
|
||||||
|
@ -41,7 +41,7 @@ nvme_get_next_transport(const struct spdk_nvme_transport *transport)
|
|||||||
* Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the
|
* Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the
|
||||||
* transport object in either the controller struct or the admin qpair. THis means
|
* transport object in either the controller struct or the admin qpair. THis means
|
||||||
* that a lot of admin related transport calls will have to call nvme_get_transport
|
* that a lot of admin related transport calls will have to call nvme_get_transport
|
||||||
* in order to knwo which functions to call.
|
* in order to know which functions to call.
|
||||||
* In the I/O path, we have the ability to store the transport struct in the I/O
|
* In the I/O path, we have the ability to store the transport struct in the I/O
|
||||||
* qpairs to avoid taking a performance hit.
|
* qpairs to avoid taking a performance hit.
|
||||||
*/
|
*/
|
||||||
|
@ -3051,7 +3051,7 @@ nvmf_fc_adm_evnt_hw_port_offline(void *arg)
|
|||||||
|
|
||||||
free(arg);
|
free(arg);
|
||||||
|
|
||||||
/* Wait untill all the hwqps are removed from poll groups. */
|
/* Wait until all the hwqps are removed from poll groups. */
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
|
SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
|
||||||
|
@ -78,7 +78,7 @@ enum spdk_nvmf_tcp_req_state {
|
|||||||
/* The request is currently executing at the block device */
|
/* The request is currently executing at the block device */
|
||||||
TCP_REQUEST_STATE_EXECUTING = 8,
|
TCP_REQUEST_STATE_EXECUTING = 8,
|
||||||
|
|
||||||
/* The request is waiting for zcopy buffers to be commited */
|
/* The request is waiting for zcopy buffers to be committed */
|
||||||
TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT = 9,
|
TCP_REQUEST_STATE_AWAITING_ZCOPY_COMMIT = 9,
|
||||||
|
|
||||||
/* The request finished executing at the block device */
|
/* The request finished executing at the block device */
|
||||||
@ -999,7 +999,7 @@ pdu_data_crc32_compute(struct nvme_tcp_pdu *pdu)
|
|||||||
|
|
||||||
/* Data Digest */
|
/* Data Digest */
|
||||||
if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] && tqpair->host_ddgst_enable) {
|
if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] && tqpair->host_ddgst_enable) {
|
||||||
/* Only suport this limitated case for the first step */
|
/* Only support this limitated case for the first step */
|
||||||
if (spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0)
|
if (spdk_likely(!pdu->dif_ctx && (pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT == 0)
|
||||||
&& tqpair->group)) {
|
&& tqpair->group)) {
|
||||||
rc = spdk_accel_submit_crc32cv(tqpair->group->accel_channel, &pdu->data_digest_crc32, pdu->data_iov,
|
rc = spdk_accel_submit_crc32cv(tqpair->group->accel_channel, &pdu->data_digest_crc32, pdu->data_iov,
|
||||||
|
@ -40,7 +40,7 @@ bdev_scsi_set_naa_ieee_extended(const char *name, uint8_t *buf)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* see spc3r23 7.6.3.6.2,
|
* see spc3r23 7.6.3.6.2,
|
||||||
* NAA IEEE Extended identifer format
|
* NAA IEEE Extended identifier format
|
||||||
*/
|
*/
|
||||||
id_a = local_value & 0x0000000fff000000ull;
|
id_a = local_value & 0x0000000fff000000ull;
|
||||||
id_a = id_a << 24;
|
id_a = id_a << 24;
|
||||||
|
@ -113,7 +113,7 @@ vhost_log_req_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqu
|
|||||||
|
|
||||||
do {
|
do {
|
||||||
if (vhost_vring_desc_is_wr(desc)) {
|
if (vhost_vring_desc_is_wr(desc)) {
|
||||||
/* To be honest, only pages realy touched should be logged, but
|
/* To be honest, only pages really touched should be logged, but
|
||||||
* doing so would require tracking those changes in each backed.
|
* doing so would require tracking those changes in each backed.
|
||||||
* Also backend most likely will touch all/most of those pages so
|
* Also backend most likely will touch all/most of those pages so
|
||||||
* for lets assume we touched all pages passed to as writeable buffers. */
|
* for lets assume we touched all pages passed to as writeable buffers. */
|
||||||
@ -1456,7 +1456,7 @@ extern_vhost_pre_msg_handler(int vid, void *_msg)
|
|||||||
|
|
||||||
vsession = vhost_session_find_by_vid(vid);
|
vsession = vhost_session_find_by_vid(vid);
|
||||||
if (vsession == NULL) {
|
if (vsession == NULL) {
|
||||||
SPDK_ERRLOG("Received a message to unitialized session (vid %d).\n", vid);
|
SPDK_ERRLOG("Received a message to uninitialized session (vid %d).\n", vid);
|
||||||
assert(false);
|
assert(false);
|
||||||
return RTE_VHOST_MSG_RESULT_ERR;
|
return RTE_VHOST_MSG_RESULT_ERR;
|
||||||
}
|
}
|
||||||
@ -1512,7 +1512,7 @@ extern_vhost_post_msg_handler(int vid, void *_msg)
|
|||||||
|
|
||||||
vsession = vhost_session_find_by_vid(vid);
|
vsession = vhost_session_find_by_vid(vid);
|
||||||
if (vsession == NULL) {
|
if (vsession == NULL) {
|
||||||
SPDK_ERRLOG("Received a message to unitialized session (vid %d).\n", vid);
|
SPDK_ERRLOG("Received a message to uninitialized session (vid %d).\n", vid);
|
||||||
assert(false);
|
assert(false);
|
||||||
return RTE_VHOST_MSG_RESULT_ERR;
|
return RTE_VHOST_MSG_RESULT_ERR;
|
||||||
}
|
}
|
||||||
|
@ -1627,7 +1627,7 @@ spdk_vhost_blk_construct(const char *name, const char *cpumask, const char *dev_
|
|||||||
* be started/stopped many times, related to the queues num, as the
|
* be started/stopped many times, related to the queues num, as the
|
||||||
* exact number of queues used for this device is not known at the time.
|
* exact number of queues used for this device is not known at the time.
|
||||||
* The target has to stop and start the device once got a valid IO queue.
|
* The target has to stop and start the device once got a valid IO queue.
|
||||||
* When stoping and starting the vhost device, the backend bdev io device
|
* When stopping and starting the vhost device, the backend bdev io device
|
||||||
* will be deleted and created repeatedly.
|
* will be deleted and created repeatedly.
|
||||||
* Hold a bdev reference so that in the struct spdk_vhost_blk_dev, so that
|
* Hold a bdev reference so that in the struct spdk_vhost_blk_dev, so that
|
||||||
* the io device will not be deleted.
|
* the io device will not be deleted.
|
||||||
|
@ -74,7 +74,7 @@ ifeq ($(TARGET_MACHINE),x86_64)
|
|||||||
ifneq (,$(shell $(CC) --target-help | grep -qe -mavx512f && echo 1))
|
ifneq (,$(shell $(CC) --target-help | grep -qe -mavx512f && echo 1))
|
||||||
# Don't use AVX-512 instructions in SPDK code - it breaks Valgrind for
|
# Don't use AVX-512 instructions in SPDK code - it breaks Valgrind for
|
||||||
# some cases where compiler decides to hyper-optimize a relatively
|
# some cases where compiler decides to hyper-optimize a relatively
|
||||||
# simple operation (like int-to-float converstion) using AVX-512
|
# simple operation (like int-to-float conversion) using AVX-512
|
||||||
COMMON_CFLAGS += -mno-avx512f
|
COMMON_CFLAGS += -mno-avx512f
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
@ -117,7 +117,7 @@ uint8_t g_number_of_claimed_volumes = 0;
|
|||||||
|
|
||||||
#define AESNI_MB_NUM_QP 64
|
#define AESNI_MB_NUM_QP 64
|
||||||
|
|
||||||
/* Common for suported devices. */
|
/* Common for supported devices. */
|
||||||
#define DEFAULT_NUM_XFORMS 2
|
#define DEFAULT_NUM_XFORMS 2
|
||||||
#define IV_OFFSET (sizeof(struct rte_crypto_op) + \
|
#define IV_OFFSET (sizeof(struct rte_crypto_op) + \
|
||||||
sizeof(struct rte_crypto_sym_op) + \
|
sizeof(struct rte_crypto_sym_op) + \
|
||||||
@ -241,7 +241,7 @@ create_vbdev_dev(uint8_t index, uint16_t num_lcores)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Before going any further, make sure we have enough resources for this
|
/* Before going any further, make sure we have enough resources for this
|
||||||
* device type to function. We need a unique queue pair per core accross each
|
* device type to function. We need a unique queue pair per core across each
|
||||||
* device type to remain lockless....
|
* device type to remain lockless....
|
||||||
*/
|
*/
|
||||||
if ((rte_cryptodev_device_count_by_driver(cdrv_id) *
|
if ((rte_cryptodev_device_count_by_driver(cdrv_id) *
|
||||||
|
@ -616,7 +616,7 @@ bdev_ftl_finish(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
bdev_ftl_create_defered_cb(const struct ftl_bdev_info *info, void *ctx, int status)
|
bdev_ftl_create_deferred_cb(const struct ftl_bdev_info *info, void *ctx, int status)
|
||||||
{
|
{
|
||||||
struct ftl_deferred_init *opts = ctx;
|
struct ftl_deferred_init *opts = ctx;
|
||||||
|
|
||||||
@ -636,8 +636,8 @@ bdev_ftl_examine(struct spdk_bdev *bdev)
|
|||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
LIST_FOREACH(opts, &g_deferred_init, entry) {
|
LIST_FOREACH(opts, &g_deferred_init, entry) {
|
||||||
/* spdk_bdev_module_examine_done will be called by bdev_ftl_create_defered_cb */
|
/* spdk_bdev_module_examine_done will be called by bdev_ftl_create_deferred_cb */
|
||||||
rc = bdev_ftl_create_bdev(&opts->conf, bdev_ftl_create_defered_cb, opts);
|
rc = bdev_ftl_create_bdev(&opts->conf, bdev_ftl_create_deferred_cb, opts);
|
||||||
if (rc == -ENODEV) {
|
if (rc == -ENODEV) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -645,7 +645,7 @@ bdev_ftl_examine(struct spdk_bdev *bdev)
|
|||||||
LIST_REMOVE(opts, entry);
|
LIST_REMOVE(opts, entry);
|
||||||
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
bdev_ftl_create_defered_cb(NULL, opts, rc);
|
bdev_ftl_create_deferred_cb(NULL, opts, rc);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,7 @@ bdev_iscsi_finish(void)
|
|||||||
|
|
||||||
/* clear out pending connection requests here. We cannot
|
/* clear out pending connection requests here. We cannot
|
||||||
* simply set the state to a non SCSI_STATUS_GOOD state as
|
* simply set the state to a non SCSI_STATUS_GOOD state as
|
||||||
* the connection poller wont run anymore
|
* the connection poller won't run anymore
|
||||||
*/
|
*/
|
||||||
TAILQ_FOREACH_SAFE(req, &g_iscsi_conn_req, link, tmp) {
|
TAILQ_FOREACH_SAFE(req, &g_iscsi_conn_req, link, tmp) {
|
||||||
_bdev_iscsi_conn_req_free(req);
|
_bdev_iscsi_conn_req_free(req);
|
||||||
|
@ -198,7 +198,7 @@ class QMPClient():
|
|||||||
:return command exec response or optionally execute result event
|
:return command exec response or optionally execute result event
|
||||||
:raise QMPRequestError: on response from QMP server being of error type
|
:raise QMPRequestError: on response from QMP server being of error type
|
||||||
:raise QMPSocketError: on timeout or socket errors
|
:raise QMPSocketError: on timeout or socket errors
|
||||||
:raise QMPError: on id missmatch and JSONdecoder errors
|
:raise QMPError: on id mismatch and JSONdecoder errors
|
||||||
'''
|
'''
|
||||||
cmd_id = self._get_next_exec_id()
|
cmd_id = self._get_next_exec_id()
|
||||||
msg = {'execute': cmd, 'id': cmd_id}
|
msg = {'execute': cmd, 'id': cmd_id}
|
||||||
|
@ -33,7 +33,7 @@ fio_conf() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -e $fio_extra_conf ]]; then
|
if [[ -e $fio_extra_conf ]]; then
|
||||||
# Overriden through cmdline|env
|
# Overridden through cmdline|env
|
||||||
cat "$fio_extra_conf"
|
cat "$fio_extra_conf"
|
||||||
elif [[ ! -t $fio_extra_conf ]]; then
|
elif [[ ! -t $fio_extra_conf ]]; then
|
||||||
# Attached to stdin
|
# Attached to stdin
|
||||||
|
@ -2260,7 +2260,7 @@ Format: 'user:u1 secret:s1 muser:mu1 msecret:ms1,user:u2 secret:s2 muser:mu2 mse
|
|||||||
p.add_argument('-b', '--buf-cache-size', help='The number of shared buffers to reserve for each poll group', type=int)
|
p.add_argument('-b', '--buf-cache-size', help='The number of shared buffers to reserve for each poll group', type=int)
|
||||||
p.add_argument('-z', '--zcopy', action='store_true', help='''Use zero-copy operations if the
|
p.add_argument('-z', '--zcopy', action='store_true', help='''Use zero-copy operations if the
|
||||||
underlying bdev supports them''')
|
underlying bdev supports them''')
|
||||||
p.add_argument('-d', '--num-cqe', help="""The number of CQ entires. Only used when no_srq=true.
|
p.add_argument('-d', '--num-cqe', help="""The number of CQ entries. Only used when no_srq=true.
|
||||||
Relevant only for RDMA transport""", type=int)
|
Relevant only for RDMA transport""", type=int)
|
||||||
p.add_argument('-s', '--max-srq-depth', help='Max number of outstanding I/O per SRQ. Relevant only for RDMA transport', type=int)
|
p.add_argument('-s', '--max-srq-depth', help='Max number of outstanding I/O per SRQ. Relevant only for RDMA transport', type=int)
|
||||||
p.add_argument('-r', '--no-srq', action='store_true', help='Disable per-thread shared receive queue. Relevant only for RDMA transport')
|
p.add_argument('-r', '--no-srq', action='store_true', help='Disable per-thread shared receive queue. Relevant only for RDMA transport')
|
||||||
|
@ -550,7 +550,7 @@ function stat_test_suite() {
|
|||||||
trap - SIGINT SIGTERM EXIT
|
trap - SIGINT SIGTERM EXIT
|
||||||
}
|
}
|
||||||
|
|
||||||
# Inital bdev creation and configuration
|
# Initial bdev creation and configuration
|
||||||
#-----------------------------------------------------
|
#-----------------------------------------------------
|
||||||
QOS_DEV_1="Malloc_0"
|
QOS_DEV_1="Malloc_0"
|
||||||
QOS_DEV_2="Null_1"
|
QOS_DEV_2="Null_1"
|
||||||
|
@ -1006,7 +1006,7 @@ function daos_cleanup() {
|
|||||||
function _start_stub() {
|
function _start_stub() {
|
||||||
# Disable ASLR for multi-process testing. SPDK does support using DPDK multi-process,
|
# Disable ASLR for multi-process testing. SPDK does support using DPDK multi-process,
|
||||||
# but ASLR can still be unreliable in some cases.
|
# but ASLR can still be unreliable in some cases.
|
||||||
# We will reenable it again after multi-process testing is complete in kill_stub().
|
# We will re-enable it again after multi-process testing is complete in kill_stub().
|
||||||
# Save current setting so it can be restored upon calling kill_stub().
|
# Save current setting so it can be restored upon calling kill_stub().
|
||||||
_randomize_va_space=$(< /proc/sys/kernel/randomize_va_space)
|
_randomize_va_space=$(< /proc/sys/kernel/randomize_va_space)
|
||||||
echo 0 > /proc/sys/kernel/randomize_va_space
|
echo 0 > /proc/sys/kernel/randomize_va_space
|
||||||
|
@ -19,7 +19,7 @@ def sort_json_object(o):
|
|||||||
sorted_o[key] = sort_json_object(o[key])
|
sorted_o[key] = sort_json_object(o[key])
|
||||||
return sorted_o
|
return sorted_o
|
||||||
if isinstance(o, list):
|
if isinstance(o, list):
|
||||||
""" Keep list in the same orded but sort each item """
|
""" Keep list in the same order but sort each item """
|
||||||
return [sort_json_object(item) for item in o]
|
return [sort_json_object(item) for item in o]
|
||||||
else:
|
else:
|
||||||
return o
|
return o
|
||||||
|
@ -71,7 +71,7 @@ function clean_up() {
|
|||||||
# This assumes every NVMe controller contains single namespace,
|
# This assumes every NVMe controller contains single namespace,
|
||||||
# encompassing Total NVM Capacity and formatted as 512 block size.
|
# encompassing Total NVM Capacity and formatted as 512 block size.
|
||||||
# 512 block size is needed for test/vhost/vhost_boot.sh to
|
# 512 block size is needed for test/vhost/vhost_boot.sh to
|
||||||
# succesfully run.
|
# successfully run.
|
||||||
|
|
||||||
tnvmcap=$($NVME_CMD id-ctrl ${nvme_dev} | grep tnvmcap | cut -d: -f2)
|
tnvmcap=$($NVME_CMD id-ctrl ${nvme_dev} | grep tnvmcap | cut -d: -f2)
|
||||||
blksize=512
|
blksize=512
|
||||||
|
@ -62,7 +62,7 @@ if [ "$CUSE_SMART_ERRLOG" != "$KERNEL_SMART_ERRLOG" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Data integrity was checked before, now make sure other commads didn't fail
|
# Data integrity was checked before, now make sure other commands didn't fail
|
||||||
${SMARTCTL_CMD} -i /dev/spdk/nvme0n1
|
${SMARTCTL_CMD} -i /dev/spdk/nvme0n1
|
||||||
${SMARTCTL_CMD} -c /dev/spdk/nvme0
|
${SMARTCTL_CMD} -c /dev/spdk/nvme0
|
||||||
${SMARTCTL_CMD} -A /dev/spdk/nvme0
|
${SMARTCTL_CMD} -A /dev/spdk/nvme0
|
||||||
|
@ -255,7 +255,7 @@ function create_fio_config() {
|
|||||||
|
|
||||||
total_disks_per_core=$disks_per_core
|
total_disks_per_core=$disks_per_core
|
||||||
# Check how many "stray" disks are unassigned to CPU cores
|
# Check how many "stray" disks are unassigned to CPU cores
|
||||||
# Assign one disk to current CPU core and substract it from the total of
|
# Assign one disk to current CPU core and subtract it from the total of
|
||||||
# unassigned disks
|
# unassigned disks
|
||||||
if [[ "$disks_per_core_mod" -gt "0" ]]; then
|
if [[ "$disks_per_core_mod" -gt "0" ]]; then
|
||||||
total_disks_per_core=$((disks_per_core + 1))
|
total_disks_per_core=$((disks_per_core + 1))
|
||||||
|
@ -153,7 +153,7 @@ dm_mount() {
|
|||||||
dm_mount=$SPDK_TEST_STORAGE/dm_mount
|
dm_mount=$SPDK_TEST_STORAGE/dm_mount
|
||||||
dm_dummy_test_file=$dm_mount/test_dm
|
dm_dummy_test_file=$dm_mount/test_dm
|
||||||
|
|
||||||
# Each partition is 1G in size, join their halfs
|
# Each partition is 1G in size, join their halves
|
||||||
dmsetup create "$dm_name" <<- DM_TABLE
|
dmsetup create "$dm_name" <<- DM_TABLE
|
||||||
0 1048576 linear /dev/$pv0 0
|
0 1048576 linear /dev/$pv0 0
|
||||||
1048576 1048576 linear /dev/$pv1 0
|
1048576 1048576 linear /dev/$pv1 0
|
||||||
|
@ -121,7 +121,7 @@ delete_device "$devid1"
|
|||||||
NOT rpc_cmd nvmf_get_subsystems nqn.2016-06.io.spdk:cnode1
|
NOT rpc_cmd nvmf_get_subsystems nqn.2016-06.io.spdk:cnode1
|
||||||
[[ $(rpc_cmd nvmf_get_subsystems | jq -r '. | length') -eq 1 ]]
|
[[ $(rpc_cmd nvmf_get_subsystems | jq -r '. | length') -eq 1 ]]
|
||||||
|
|
||||||
# Finally check that removing a non-existing device is also sucessful
|
# Finally check that removing a non-existing device is also successful
|
||||||
delete_device "$devid0"
|
delete_device "$devid0"
|
||||||
delete_device "$devid1"
|
delete_device "$devid1"
|
||||||
|
|
||||||
@ -147,7 +147,7 @@ detach_volume "$devid0" "$uuid"
|
|||||||
[[ $(rpc_cmd nvmf_get_subsystems nqn.2016-06.io.spdk:cnode0 | jq -r '.[0].namespaces | length') -eq 0 ]]
|
[[ $(rpc_cmd nvmf_get_subsystems nqn.2016-06.io.spdk:cnode0 | jq -r '.[0].namespaces | length') -eq 0 ]]
|
||||||
[[ $(rpc_cmd nvmf_get_subsystems nqn.2016-06.io.spdk:cnode1 | jq -r '.[0].namespaces | length') -eq 0 ]]
|
[[ $(rpc_cmd nvmf_get_subsystems nqn.2016-06.io.spdk:cnode1 | jq -r '.[0].namespaces | length') -eq 0 ]]
|
||||||
|
|
||||||
# Detach it again and verify it suceeds
|
# Detach it again and verify it succeeds
|
||||||
detach_volume "$devid0" "$uuid"
|
detach_volume "$devid0" "$uuid"
|
||||||
|
|
||||||
cleanup
|
cleanup
|
||||||
|
@ -204,7 +204,7 @@ NOT rpc_cmd nvmf_get_subsystems nqn.2016-06.io.spdk:vfiouser-1
|
|||||||
[[ $(rpc_cmd nvmf_get_subsystems | jq -r '. | length') -eq 1 ]]
|
[[ $(rpc_cmd nvmf_get_subsystems | jq -r '. | length') -eq 1 ]]
|
||||||
[[ $(vm_count_nvme ${vm_no}) -eq 0 ]]
|
[[ $(vm_count_nvme ${vm_no}) -eq 0 ]]
|
||||||
|
|
||||||
# Finally check that removing a non-existing device is also sucessful
|
# Finally check that removing a non-existing device is also successful
|
||||||
delete_device "$device0"
|
delete_device "$device0"
|
||||||
delete_device "$device1"
|
delete_device "$device1"
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ delete_device "$devid1"
|
|||||||
NOT rpc_cmd vhost_get_controllers -n sma-1
|
NOT rpc_cmd vhost_get_controllers -n sma-1
|
||||||
[[ $(rpc_cmd vhost_get_controllers | jq -r '. | length') -eq 0 ]]
|
[[ $(rpc_cmd vhost_get_controllers | jq -r '. | length') -eq 0 ]]
|
||||||
|
|
||||||
# Finally check that removing a non-existing device is also sucessful
|
# Finally check that removing a non-existing device is also successful
|
||||||
delete_device "$devid0"
|
delete_device "$devid0"
|
||||||
delete_device "$devid1"
|
delete_device "$devid1"
|
||||||
|
|
||||||
|
@ -2582,7 +2582,7 @@ bdev_io_mix_split_test(void)
|
|||||||
|
|
||||||
/* IO crossing the IO boundary requires split.
|
/* IO crossing the IO boundary requires split.
|
||||||
* The 1st child IO segment size exceeds the max_segment_size,
|
* The 1st child IO segment size exceeds the max_segment_size,
|
||||||
* So 1st child IO will be splitted to multiple segment entry.
|
* So 1st child IO will be split to multiple segment entry.
|
||||||
* Then it split to 2 child IOs because of the max_num_segments.
|
* Then it split to 2 child IOs because of the max_num_segments.
|
||||||
* Total 3 child IOs.
|
* Total 3 child IOs.
|
||||||
*/
|
*/
|
||||||
@ -2658,7 +2658,7 @@ bdev_io_mix_split_test(void)
|
|||||||
/* IO crossing the IO boundary requires split.
|
/* IO crossing the IO boundary requires split.
|
||||||
* The 1st child IO segment size exceeds the max_segment_size and after
|
* The 1st child IO segment size exceeds the max_segment_size and after
|
||||||
* splitting segment_size, the num_segments exceeds max_num_segments.
|
* splitting segment_size, the num_segments exceeds max_num_segments.
|
||||||
* So 1st child IO will be splitted to 2 child IOs.
|
* So 1st child IO will be split to 2 child IOs.
|
||||||
* Total 3 child IOs.
|
* Total 3 child IOs.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -2708,7 +2708,7 @@ bdev_io_mix_split_test(void)
|
|||||||
|
|
||||||
/* IO crossing the IO boundary requires split.
|
/* IO crossing the IO boundary requires split.
|
||||||
* 80 block length can split 5 child IOs base on offset and IO boundary.
|
* 80 block length can split 5 child IOs base on offset and IO boundary.
|
||||||
* Each iov entry needs to be splitted to 2 entries because of max_segment_size
|
* Each iov entry needs to be split to 2 entries because of max_segment_size
|
||||||
* Total 5 child IOs.
|
* Total 5 child IOs.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -2750,7 +2750,7 @@ bdev_io_mix_split_test(void)
|
|||||||
}
|
}
|
||||||
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
|
TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
|
||||||
|
|
||||||
/* 5th child IO and because of the child iov entry it should be splitted
|
/* 5th child IO and because of the child iov entry it should be split
|
||||||
* in next round.
|
* in next round.
|
||||||
*/
|
*/
|
||||||
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8);
|
expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8);
|
||||||
|
@ -5788,7 +5788,7 @@ test_set_preferred_path(void)
|
|||||||
CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
|
CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2);
|
||||||
|
|
||||||
/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
|
/* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is
|
||||||
* aquired, find_io_path() should return io_path to ctrlr3.
|
* acquired, find_io_path() should return io_path to ctrlr3.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
spdk_put_io_channel(ch);
|
spdk_put_io_channel(ch);
|
||||||
|
@ -1585,7 +1585,7 @@ _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
|
|||||||
uint8_t *buf;
|
uint8_t *buf;
|
||||||
uint64_t page_size = spdk_bs_get_page_size(blob->bs);
|
uint64_t page_size = spdk_bs_get_page_size(blob->bs);
|
||||||
|
|
||||||
/* To be sure that operation is NOT splitted, read one page at the time */
|
/* To be sure that operation is NOT split, read one page at the time */
|
||||||
buf = payload;
|
buf = payload;
|
||||||
for (i = 0; i < length; i++) {
|
for (i = 0; i < length; i++) {
|
||||||
spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
|
spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
|
||||||
@ -1609,7 +1609,7 @@ _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
|
|||||||
uint8_t *buf;
|
uint8_t *buf;
|
||||||
uint64_t page_size = spdk_bs_get_page_size(blob->bs);
|
uint64_t page_size = spdk_bs_get_page_size(blob->bs);
|
||||||
|
|
||||||
/* To be sure that operation is NOT splitted, write one page at the time */
|
/* To be sure that operation is NOT split, write one page at the time */
|
||||||
buf = payload;
|
buf = payload;
|
||||||
for (i = 0; i < length; i++) {
|
for (i = 0; i < length; i++) {
|
||||||
spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
|
spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
|
||||||
@ -2561,7 +2561,7 @@ bs_load_after_failed_grow(void)
|
|||||||
snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
|
snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
|
||||||
/*
|
/*
|
||||||
* The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
|
* The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
|
||||||
* blobstore will create 64 md pages by defualt. We set num_md_pages to 128,
|
* blobstore will create 64 md pages by default. We set num_md_pages to 128,
|
||||||
* thus the blobstore could grow to the double size.
|
* thus the blobstore could grow to the double size.
|
||||||
*/
|
*/
|
||||||
opts.num_md_pages = 128;
|
opts.num_md_pages = 128;
|
||||||
|
@ -1240,7 +1240,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
|
|||||||
(unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
(unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
|
||||||
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3);
|
CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3);
|
||||||
|
|
||||||
/* Test case: All parameters is conformed to the functon. Expect: PASS */
|
/* Test case: All parameters is conformed to the function. Expect: PASS */
|
||||||
tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
|
tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
|
||||||
tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
|
tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
|
||||||
tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
|
tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
|
||||||
|
@ -1150,7 +1150,7 @@ override_impl_opts(void)
|
|||||||
opts.impl_opts = &impl_opts;
|
opts.impl_opts = &impl_opts;
|
||||||
opts.impl_opts_size = sizeof(impl_opts);
|
opts.impl_opts_size = sizeof(impl_opts);
|
||||||
|
|
||||||
/* Use send_buf_size to verify that impl_opts get overriden */
|
/* Use send_buf_size to verify that impl_opts get overridden */
|
||||||
send_buf_size = impl_opts.send_buf_size;
|
send_buf_size = impl_opts.send_buf_size;
|
||||||
impl_opts.send_buf_size = send_buf_size + 1;
|
impl_opts.send_buf_size = send_buf_size + 1;
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ test_cpuset_parse(void)
|
|||||||
rc = spdk_cpuset_parse(NULL, "[1]");
|
rc = spdk_cpuset_parse(NULL, "[1]");
|
||||||
CU_ASSERT(rc < 0);
|
CU_ASSERT(rc < 0);
|
||||||
|
|
||||||
/* Wrong formated core lists */
|
/* Wrong formatted core lists */
|
||||||
rc = spdk_cpuset_parse(core_mask, "");
|
rc = spdk_cpuset_parse(core_mask, "");
|
||||||
CU_ASSERT(rc < 0);
|
CU_ASSERT(rc < 0);
|
||||||
|
|
||||||
|
@ -68,7 +68,7 @@ DEFINE_STUB(rte_vhost_get_vring_base_from_inflight, int,
|
|||||||
DEFINE_STUB(rte_vhost_extern_callback_register, int,
|
DEFINE_STUB(rte_vhost_extern_callback_register, int,
|
||||||
(int vid, struct rte_vhost_user_extern_ops const *const ops, void *ctx), 0);
|
(int vid, struct rte_vhost_user_extern_ops const *const ops, void *ctx), 0);
|
||||||
|
|
||||||
/* rte_vhost_user.c shutdowns vhost_user sessions in a separte pthread */
|
/* rte_vhost_user.c shutdowns vhost_user sessions in a separate pthread */
|
||||||
DECLARE_WRAPPER(pthread_create, int, (pthread_t *thread, const pthread_attr_t *attr,
|
DECLARE_WRAPPER(pthread_create, int, (pthread_t *thread, const pthread_attr_t *attr,
|
||||||
void *(*start_routine)(void *), void *arg));
|
void *(*start_routine)(void *), void *arg));
|
||||||
int
|
int
|
||||||
|
Loading…
Reference in New Issue
Block a user