lib: fix typos in the lib directory

Change-Id: Idcb60b79d2902bb316facc6f60e0a81e5cf847ed
Signed-off-by: Chen Wang <chenx.wang@intel.com>
Reviewed-on: https://review.gerrithub.io/423372
Reviewed-by: GangCao <gang.cao@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Chen Wang 2018-08-24 17:09:27 +08:00 committed by Jim Harris
parent 3e8a7c11da
commit 6fa48bbf62
15 changed files with 24 additions and 24 deletions

View File

@ -147,7 +147,7 @@ struct spdk_bdev_qos {
/** Submitted byte in one timeslice (e.g., 1ms) */ /** Submitted byte in one timeslice (e.g., 1ms) */
uint64_t byte_submitted_this_timeslice; uint64_t byte_submitted_this_timeslice;
/** Polller that processes queued I/O commands each time slice. */ /** Poller that processes queued I/O commands each time slice. */
struct spdk_poller *poller; struct spdk_poller *poller;
}; };
@ -172,7 +172,7 @@ struct spdk_bdev_mgmt_channel {
/* /*
* Per-module (or per-io_device) data. Multiple bdevs built on the same io_device * Per-module (or per-io_device) data. Multiple bdevs built on the same io_device
* will queue here their IO that awaits retry. It makes it posible to retry sending * will queue here their IO that awaits retry. It makes it possible to retry sending
* IO to one bdev after IO from other bdev completes. * IO to one bdev after IO from other bdev completes.
*/ */
struct spdk_bdev_shared_resource { struct spdk_bdev_shared_resource {

View File

@ -232,7 +232,7 @@ vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *b
} }
/* We'll just call the base bdev and let it answer however if we were more /* We'll just call the base bdev and let it answer however if we were more
* restrictive for some reason (or less) we could get the repsonse back * restrictive for some reason (or less) we could get the response back
* and modify according to our purposes. * and modify according to our purposes.
*/ */
static bool static bool
@ -256,7 +256,7 @@ vbdev_passthru_get_io_channel(void *ctx)
struct spdk_io_channel *pt_ch = NULL; struct spdk_io_channel *pt_ch = NULL;
/* The IO channel code will allocate a channel for us which consists of /* The IO channel code will allocate a channel for us which consists of
* the SPDK cahnnel structure plus the size of our pt_io_channel struct * the SPDK channel structure plus the size of our pt_io_channel struct
* that we passed in when we registered our IO device. It will then call * that we passed in when we registered our IO device. It will then call
* our channel create callback to populate any elements that we need to * our channel create callback to populate any elements that we need to
* update. * update.
@ -448,7 +448,7 @@ vbdev_passthru_write_json_config(struct spdk_bdev *bdev, struct spdk_json_write_
spdk_json_write_object_end(w); spdk_json_write_object_end(w);
} }
/* When we regsiter our bdev this is how we specify our entry points. */ /* When we register our bdev this is how we specify our entry points. */
static const struct spdk_bdev_fn_table vbdev_passthru_fn_table = { static const struct spdk_bdev_fn_table vbdev_passthru_fn_table = {
.destruct = vbdev_passthru_destruct, .destruct = vbdev_passthru_destruct,
.submit_request = vbdev_passthru_submit_request, .submit_request = vbdev_passthru_submit_request,
@ -507,7 +507,7 @@ vbdev_passthru_register(struct spdk_bdev *bdev)
} }
pt_node->pt_bdev.product_name = "passthru"; pt_node->pt_bdev.product_name = "passthru";
/* Copy some properties from the underying base bdev. */ /* Copy some properties from the underlying base bdev. */
pt_node->pt_bdev.write_cache = bdev->write_cache; pt_node->pt_bdev.write_cache = bdev->write_cache;
pt_node->pt_bdev.need_aligned_buffer = bdev->need_aligned_buffer; pt_node->pt_bdev.need_aligned_buffer = bdev->need_aligned_buffer;
pt_node->pt_bdev.optimal_io_boundary = bdev->optimal_io_boundary; pt_node->pt_bdev.optimal_io_boundary = bdev->optimal_io_boundary;

View File

@ -46,7 +46,7 @@ typedef void (*spdk_delete_pmem_complete)(void *cb_arg, int bdeverrno);
* \param bdev output parameter for bdev when operation is successful. * \param bdev output parameter for bdev when operation is successful.
* \return 0 on success. * \return 0 on success.
* -EIO if pool check failed * -EIO if pool check failed
* -EINVAL if input paramteres check failed * -EINVAL if input parameters check failed
* -ENOMEM if buffer cannot be allocated * -ENOMEM if buffer cannot be allocated
*/ */
int spdk_create_pmem_disk(const char *pmem_file, const char *name, struct spdk_bdev **bdev); int spdk_create_pmem_disk(const char *pmem_file, const char *name, struct spdk_bdev **bdev);

View File

@ -557,7 +557,7 @@ raid_bdev_io_submit_fail_process(struct raid_bdev *raid_bdev, struct spdk_bdev_i
/* /*
* brief: * brief:
* raid_bdev_waitq_io_process function is the callback function * raid_bdev_waitq_io_process function is the callback function
* registerd by raid bdev module to bdev when bdev_io was unavailable. * registered by raid bdev module to bdev when bdev_io was unavailable.
* params: * params:
* ctx - pointer to raid_bdev_io * ctx - pointer to raid_bdev_io
* returns: * returns:

View File

@ -52,7 +52,7 @@ int create_vbdev_split(const char *base_bdev_name, unsigned split_count, uint64_
* Remove all created split bdevs and split config. * Remove all created split bdevs and split config.
* *
* \param base_bdev_name base bdev name * \param base_bdev_name base bdev name
* \return 0 on succes or negative errno value. * \return 0 on success or negative errno value.
*/ */
int spdk_vbdev_split_destruct(const char *base_bdev_name); int spdk_vbdev_split_destruct(const char *base_bdev_name);

View File

@ -2678,7 +2678,7 @@ _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
* This case needs to be looked at further. Same problem * This case needs to be looked at further. Same problem
* exists with applications that rely on explicit blob * exists with applications that rely on explicit blob
* iteration. We should just skip the blob that failed * iteration. We should just skip the blob that failed
* to load and coontinue on to the next one. * to load and continue on to the next one.
*/ */
SPDK_ERRLOG("Error in iterating blobs\n"); SPDK_ERRLOG("Error in iterating blobs\n");
} }
@ -3223,7 +3223,7 @@ _spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int
/* /*
* We need to defer calling spdk_bs_call_cpl() until after * We need to defer calling spdk_bs_call_cpl() until after
* dev destuction, so tuck these away for later use. * dev destruction, so tuck these away for later use.
*/ */
ctx->bs->unload_err = bserrno; ctx->bs->unload_err = bserrno;
memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
@ -3760,7 +3760,7 @@ _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserr
/* /*
* We need to defer calling spdk_bs_call_cpl() until after * We need to defer calling spdk_bs_call_cpl() until after
* dev destuction, so tuck these away for later use. * dev destruction, so tuck these away for later use.
*/ */
ctx->bs->unload_err = bserrno; ctx->bs->unload_err = bserrno;
memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl)); memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));

View File

@ -140,7 +140,7 @@ struct spdk_blob {
struct spdk_bs_dev *back_bs_dev; struct spdk_bs_dev *back_bs_dev;
/* TODO: The xattrs are mutable, but we don't want to be /* TODO: The xattrs are mutable, but we don't want to be
* copying them unecessarily. Figure this out. * copying them unnecessarily. Figure this out.
*/ */
struct spdk_xattr_tailq xattrs; struct spdk_xattr_tailq xattrs;
struct spdk_xattr_tailq xattrs_internal; struct spdk_xattr_tailq xattrs_internal;

View File

@ -389,7 +389,7 @@ spdk_vtophys_notify(void *cb_ctx, struct spdk_mem_map *map,
} }
} }
} }
/* Since PCI paddr can break the 2MiB physical alginment skip this check for that. */ /* Since PCI paddr can break the 2MiB physical alignment skip this check for that. */
if (!pci_phys && (paddr & MASK_2MB)) { if (!pci_phys && (paddr & MASK_2MB)) {
DEBUG_PRINT("invalid paddr 0x%" PRIx64 " - must be 2MB aligned\n", paddr); DEBUG_PRINT("invalid paddr 0x%" PRIx64 " - must be 2MB aligned\n", paddr);
return -EINVAL; return -EINVAL;

View File

@ -1699,7 +1699,7 @@ spdk_iscsi_op_login_phase_none(struct spdk_iscsi_conn *conn,
} }
/* /*
* The function which is used to initalize the internal response data * The function which is used to initialize the internal response data
* structure of iscsi login function. * structure of iscsi login function.
* return: * return:
* 0, success; * 0, success;
@ -3914,7 +3914,7 @@ spdk_iscsi_op_snack(struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pdu)
return rc; return rc;
} }
/* This fucntion is used to refree the pdu when it is acknowledged */ /* This function is used to refree the pdu when it is acknowledged */
static void static void
spdk_remove_acked_pdu(struct spdk_iscsi_conn *conn, spdk_remove_acked_pdu(struct spdk_iscsi_conn *conn,
uint32_t ExpStatSN) uint32_t ExpStatSN)

View File

@ -503,7 +503,7 @@ static const char *target_declarative_params[] = {
NULL, NULL,
}; };
/* This function is used to contruct the data from the special param (e.g., /* This function is used to construct the data from the special param (e.g.,
* MaxRecvDataSegmentLength) * MaxRecvDataSegmentLength)
* return: * return:
* normal: the total len of the data * normal: the total len of the data
@ -598,7 +598,7 @@ spdk_iscsi_special_param_construction(struct spdk_iscsi_conn *conn,
/** /**
* spdk_iscsi_construct_data_from_param: * spdk_iscsi_construct_data_from_param:
* To construct the data which will be returned to the initiator * To construct the data which will be returned to the initiator
* return: length of the negotiated data, -1 inidicates error; * return: length of the negotiated data, -1 indicates error;
*/ */
static int static int
spdk_iscsi_construct_data_from_param(struct iscsi_param *param, char *new_val, spdk_iscsi_construct_data_from_param(struct iscsi_param *param, char *new_val,

View File

@ -351,7 +351,7 @@ static int netlink_addr_msg(uint32_t ifc_idx, uint32_t ip_address, uint32_t crea
la.nl_pid = getpid(); la.nl_pid = getpid();
bind(fd, (struct sockaddr *) &la, sizeof(la)); bind(fd, (struct sockaddr *) &la, sizeof(la));
/* initalize RTNETLINK request buffer. */ /* initialize RTNETLINK request buffer. */
bzero(&req, sizeof(req)); bzero(&req, sizeof(req));
/* compute the initial length of the service request. */ /* compute the initial length of the service request. */

View File

@ -344,7 +344,7 @@ nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl)
} }
case SPDK_NVME_SCT_PATH: case SPDK_NVME_SCT_PATH:
/* /*
* Per NVMe TP 4028 (Path and Transport Error Enhancments), retries should be * Per NVMe TP 4028 (Path and Transport Error Enhancements), retries should be
* based on the setting of the DNR bit for Internal Path Error * based on the setting of the DNR bit for Internal Path Error
*/ */
switch ((int)cpl->status.sc) { switch ((int)cpl->status.sc) {

View File

@ -58,7 +58,7 @@
#define NVME_RDMA_RW_BUFFER_SIZE 131072 #define NVME_RDMA_RW_BUFFER_SIZE 131072
/* /*
NVME RDMA qpair Resouce Defaults NVME RDMA qpair Resource Defaults
*/ */
#define NVME_RDMA_DEFAULT_TX_SGE 2 #define NVME_RDMA_DEFAULT_TX_SGE 2
#define NVME_RDMA_DEFAULT_RX_SGE 1 #define NVME_RDMA_DEFAULT_RX_SGE 1
@ -1137,7 +1137,7 @@ struct spdk_nvme_ctrlr *nvme_rdma_ctrlr_construct(const struct spdk_nvme_transpo
nvme_ctrlr_init_cap(&rctrlr->ctrlr, &cap, &vs); nvme_ctrlr_init_cap(&rctrlr->ctrlr, &cap, &vs);
SPDK_DEBUGLOG(SPDK_LOG_NVME, "succesully initialized the nvmf ctrlr\n"); SPDK_DEBUGLOG(SPDK_LOG_NVME, "successfully initialized the nvmf ctrlr\n");
return &rctrlr->ctrlr; return &rctrlr->ctrlr;
} }

View File

@ -866,7 +866,7 @@ spdk_nvmf_ctrlr_set_features_number_of_queues(struct spdk_nvmf_request *req)
req->cmd->nvme_cmd.cdw11); req->cmd->nvme_cmd.cdw11);
count = spdk_bit_array_count_set(ctrlr->qpair_mask); count = spdk_bit_array_count_set(ctrlr->qpair_mask);
/* verify that the contoller is ready to process commands */ /* verify that the controller is ready to process commands */
if (count > 1) { if (count > 1) {
SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Queue pairs already active!\n"); SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Queue pairs already active!\n");
rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR; rsp->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;

View File

@ -51,7 +51,7 @@
#include "spdk_internal/log.h" #include "spdk_internal/log.h"
/* /*
RDMA Connection Resouce Defaults RDMA Connection Resource Defaults
*/ */
#define NVMF_DEFAULT_TX_SGE 1 #define NVMF_DEFAULT_TX_SGE 1
#define NVMF_DEFAULT_RX_SGE 2 #define NVMF_DEFAULT_RX_SGE 2