diff --git a/include/spdk/bdev_module.h b/include/spdk/bdev_module.h index d975cc907..b85ef351b 100644 --- a/include/spdk/bdev_module.h +++ b/include/spdk/bdev_module.h @@ -31,7 +31,7 @@ extern "C" { /* This parameter is best defined for bdevs that share an underlying bdev, * such as multiple lvol bdevs sharing an nvme device, to avoid unnecessarily * resetting the underlying bdev and affecting other bdevs that are sharing it. */ -#define BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE 5 +#define SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE 5 /** Block device module */ struct spdk_bdev_module { @@ -464,7 +464,7 @@ struct spdk_bdev { * `reset_io_drain_timeout` seconds for outstanding IO that are present * on any bdev channel, before sending a reset down to the underlying device. * That way we can avoid sending "empty" resets and interrupting work of - * other lvols that use the same bdev. BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE + * other lvols that use the same bdev. SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE * is a good choice for the value of this parameter. * * If this parameter remains equal to zero, the bdev reset will be forcefully @@ -585,7 +585,8 @@ typedef void (*spdk_bdev_io_get_buf_cb)(struct spdk_io_channel *ch, struct spdk_ typedef void (*spdk_bdev_io_get_aux_buf_cb)(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, void *aux_buf); -#define BDEV_IO_NUM_CHILD_IOV 32 +/* Maximum number of IOVs used for I/O splitting */ +#define SPDK_BDEV_IO_NUM_CHILD_IOV 32 struct spdk_bdev_io { /** The block device that this I/O belongs to. */ @@ -601,7 +602,7 @@ struct spdk_bdev_io { struct iovec iov; /** Array of iovecs used for I/O splitting. */ - struct iovec child_iov[BDEV_IO_NUM_CHILD_IOV]; + struct iovec child_iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; union { struct { diff --git a/lib/bdev/bdev.c b/lib/bdev/bdev.c index 1a4aa589b..f09e7b79f 100644 --- a/lib/bdev/bdev.c +++ b/lib/bdev/bdev.c @@ -2521,8 +2521,8 @@ _bdev_rw_split(void *_bdev_io) int rc; max_segment_size = max_segment_size ? max_segment_size : UINT32_MAX; - max_child_iovcnt = max_child_iovcnt ? spdk_min(max_child_iovcnt, BDEV_IO_NUM_CHILD_IOV) : - BDEV_IO_NUM_CHILD_IOV; + max_child_iovcnt = max_child_iovcnt ? spdk_min(max_child_iovcnt, SPDK_BDEV_IO_NUM_CHILD_IOV) : + SPDK_BDEV_IO_NUM_CHILD_IOV; if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE && bdev->split_on_write_unit) { io_boundary = bdev->write_unit_size; @@ -2547,7 +2547,8 @@ _bdev_rw_split(void *_bdev_io) } child_iovcnt = 0; - while (remaining > 0 && parent_iovpos < parent_iovcnt && child_iovcnt < BDEV_IO_NUM_CHILD_IOV) { + while (remaining > 0 && parent_iovpos < parent_iovcnt && + child_iovcnt < SPDK_BDEV_IO_NUM_CHILD_IOV) { to_next_boundary = _to_next_boundary(current_offset, io_boundary); to_next_boundary = spdk_min(remaining, to_next_boundary); to_next_boundary_bytes = to_next_boundary * blocklen; @@ -2560,7 +2561,7 @@ _bdev_rw_split(void *_bdev_io) (current_offset - parent_offset) * spdk_bdev_get_md_size(bdev); } - child_iovsize = spdk_min(BDEV_IO_NUM_CHILD_IOV - child_iovcnt, max_child_iovcnt); + child_iovsize = spdk_min(SPDK_BDEV_IO_NUM_CHILD_IOV - child_iovcnt, max_child_iovcnt); while (to_next_boundary_bytes > 0 && parent_iovpos < parent_iovcnt && iovcnt < child_iovsize) { parent_iov = &bdev_io->u.bdev.iovs[parent_iovpos]; @@ -2590,12 +2591,12 @@ _bdev_rw_split(void *_bdev_io) * then adjust to_next_boundary before starting the * child I/O. */ - assert(child_iovcnt == BDEV_IO_NUM_CHILD_IOV || + assert(child_iovcnt == SPDK_BDEV_IO_NUM_CHILD_IOV || iovcnt == child_iovsize); to_last_block_bytes = to_next_boundary_bytes % blocklen; if (to_last_block_bytes != 0) { uint32_t child_iovpos = child_iovcnt - 1; - /* don't decrease child_iovcnt when it equals to BDEV_IO_NUM_CHILD_IOV + /* don't decrease child_iovcnt when it equals to SPDK_BDEV_IO_NUM_CHILD_IOV * so the loop will naturally end */ diff --git a/lib/vhost/vhost_blk.c b/lib/vhost/vhost_blk.c index d5ec677b1..ac1003032 100644 --- a/lib/vhost/vhost_blk.c +++ b/lib/vhost/vhost_blk.c @@ -1512,7 +1512,7 @@ vhost_blk_get_config(struct spdk_vhost_dev *vdev, uint8_t *config, blkcnt = spdk_bdev_get_num_blocks(bdev); if (spdk_bdev_get_buf_align(bdev) > 1) { blkcfg.size_max = SPDK_BDEV_LARGE_BUF_MAX_SIZE; - blkcfg.seg_max = spdk_min(SPDK_VHOST_IOVS_MAX - 2 - 1, BDEV_IO_NUM_CHILD_IOV - 2 - 1); + blkcfg.seg_max = spdk_min(SPDK_VHOST_IOVS_MAX - 2 - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 2 - 1); } else { blkcfg.size_max = 131072; /* -2 for REQ and RESP and -1 for region boundary splitting */ diff --git a/module/bdev/lvol/vbdev_lvol.c b/module/bdev/lvol/vbdev_lvol.c index 545f00218..ce57896d3 100644 --- a/module/bdev/lvol/vbdev_lvol.c +++ b/module/bdev/lvol/vbdev_lvol.c @@ -1046,7 +1046,7 @@ _create_lvol_disk(struct spdk_lvol *lvol, bool destroy) * bdev module. * Setting this parameter is mainly to avoid "empty" resets to a shared * bdev that may be used by multiple lvols. */ - bdev->reset_io_drain_timeout = BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; + bdev->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; rc = spdk_bdev_register(bdev); if (rc) { diff --git a/module/vfu_device/vfu_virtio_blk.c b/module/vfu_device/vfu_virtio_blk.c index 302d784da..d96248da2 100644 --- a/module/vfu_device/vfu_virtio_blk.c +++ b/module/vfu_device/vfu_virtio_blk.c @@ -344,7 +344,7 @@ virtio_blk_update_config(struct virtio_blk_config *blk_cfg, struct spdk_bdev *bd if (spdk_bdev_get_buf_align(bdev) > 1) { blk_cfg->size_max = SPDK_BDEV_LARGE_BUF_MAX_SIZE; - blk_cfg->seg_max = spdk_min(VIRTIO_DEV_MAX_IOVS - 2 - 1, BDEV_IO_NUM_CHILD_IOV - 2 - 1); + blk_cfg->seg_max = spdk_min(VIRTIO_DEV_MAX_IOVS - 2 - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 2 - 1); } else { blk_cfg->size_max = 131072; /* -2 for REQ and RESP and -1 for region boundary splitting */ diff --git a/module/vfu_device/vfu_virtio_scsi.c b/module/vfu_device/vfu_virtio_scsi.c index 35d8d1d10..727bf3d34 100644 --- a/module/vfu_device/vfu_virtio_scsi.c +++ b/module/vfu_device/vfu_virtio_scsi.c @@ -517,7 +517,7 @@ virtio_scsi_update_config(struct virtio_scsi_endpoint *scsi_endpoint) scsi_cfg->num_queues = scsi_endpoint->virtio.num_queues; /* -2 for REQ and RESP and -1 for region boundary splitting */ - scsi_cfg->seg_max = spdk_min(VIRTIO_DEV_MAX_IOVS - 2 - 1, BDEV_IO_NUM_CHILD_IOV - 2 - 1); + scsi_cfg->seg_max = spdk_min(VIRTIO_DEV_MAX_IOVS - 2 - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 2 - 1); /* we can set `max_sectors` and `cmd_per_lun` based on bdevs */ scsi_cfg->max_sectors = 131072; scsi_cfg->cmd_per_lun = scsi_endpoint->virtio.qsize; diff --git a/test/unit/lib/bdev/bdev.c/bdev_ut.c b/test/unit/lib/bdev/bdev.c/bdev_ut.c index c546d81da..a52bca41e 100644 --- a/test/unit/lib/bdev/bdev.c/bdev_ut.c +++ b/test/unit/lib/bdev/bdev.c/bdev_ut.c @@ -89,7 +89,7 @@ struct ut_expected_io { uint64_t src_offset; uint64_t length; int iovcnt; - struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; + struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; void *md_buf; struct spdk_bdev_ext_io_opts *ext_io_opts; bool copy_opts; @@ -1222,7 +1222,7 @@ bdev_io_spans_split_test(void) { struct spdk_bdev bdev; struct spdk_bdev_io bdev_io; - struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; + struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; memset(&bdev, 0, sizeof(bdev)); bdev_io.u.bdev.iovs = iov; @@ -1306,7 +1306,7 @@ bdev_io_boundary_split_test(void) struct spdk_bdev_desc *desc = NULL; struct spdk_io_channel *io_ch; struct spdk_bdev_opts bdev_opts = {}; - struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; + struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; struct ut_expected_io *expected_io; void *md_buf = (void *)0xFF000000; uint64_t i; @@ -1421,32 +1421,32 @@ bdev_io_boundary_split_test(void) /* Test multi vector command that needs to be split by strip and then needs to be * split further due to the capacity of child iovs. */ - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 512; } - bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; + bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; g_io_done = false; - expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, - BDEV_IO_NUM_CHILD_IOV); + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, + SPDK_BDEV_IO_NUM_CHILD_IOV); expected_io->md_buf = md_buf; - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); } TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); - expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, - BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); - expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, + SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); + expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { ut_expected_io_set_iov(expected_io, i, - (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); + (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); } TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); - rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, - 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); + rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, + 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); CU_ASSERT(rc == 0); CU_ASSERT(g_io_done == false); @@ -1465,54 +1465,54 @@ bdev_io_boundary_split_test(void) */ /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary - * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. + * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. */ - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 512; } - for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { + for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 256; } - iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); - iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; + iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); + iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512; /* Add an extra iovec to trigger split */ - iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); - iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; + iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); + iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; - bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; + bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; g_io_done = false; expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, - BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); + SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV); expected_io->md_buf = md_buf; - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); } - for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { + for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 256); } TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); - expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1, 1); - expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; + expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; ut_expected_io_set_iov(expected_io, 0, - (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); + (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); - expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1, 1); - expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; + expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; ut_expected_io_set_iov(expected_io, 0, - (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); + (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); - rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, - 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); + rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf, + 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); CU_ASSERT(rc == 0); CU_ASSERT(g_io_done == false); @@ -1529,50 +1529,50 @@ bdev_io_boundary_split_test(void) * split further due to the capacity of child iovs, the child request offset should * be rewind to last aligned offset and go success without error. */ - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 512; } - iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); - iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; + iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); + iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; - iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); - iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; + iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); + iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; - iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); - iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; + iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); + iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; - bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; + bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; g_io_done = false; g_io_status = 0; - /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ + /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, - BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); + SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1); expected_io->md_buf = md_buf; - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); } TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); - /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ - expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, + /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1, 2); - expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; + expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; ut_expected_io_set_iov(expected_io, 0, - (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); + (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); ut_expected_io_set_iov(expected_io, 1, - (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); + (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); - /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ - expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, + /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */ + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1, 1); - expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; + expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; ut_expected_io_set_iov(expected_io, 0, - (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); + (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); - rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, - 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); + rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, + 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); CU_ASSERT(rc == 0); CU_ASSERT(g_io_done == false); @@ -1850,27 +1850,27 @@ bdev_io_boundary_split_test(void) * The multi vector command is as same as the above that needs to be split by strip * and then needs to be split further due to the capacity of child iovs. */ - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 512; } - iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); - iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; + iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); + iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; - iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); - iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; + iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); + iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; - iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); - iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; + iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); + iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; - bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; + bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; g_io_done = false; g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; - rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, - BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, + SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); CU_ASSERT(rc == 0); CU_ASSERT(g_io_done == false); @@ -1898,42 +1898,42 @@ bdev_io_boundary_split_test(void) g_io_done = false; /* Init all parent IOVs to 0x212 */ - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 0x212; } - expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, - BDEV_IO_NUM_CHILD_IOV - 1); + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, + SPDK_BDEV_IO_NUM_CHILD_IOV - 1); /* expect 0-29 to be 1:1 with the parent iov */ - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); } /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment * where 0x1e is the amount we overshot the 16K boundary */ - ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, - (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); + ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, + (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was * shortened that take it to the next boundary and then a final one to get us to * 0x4200 bytes for the IO. */ - expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, - BDEV_IO_NUM_CHILD_IOV, 2); + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, + SPDK_BDEV_IO_NUM_CHILD_IOV, 2); /* position 30 picked up the remaining bytes to the next boundary */ ut_expected_io_set_iov(expected_io, 0, - (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); + (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); /* position 31 picked the the rest of the transfer to get us to 0x4200 */ ut_expected_io_set_iov(expected_io, 1, - (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); + (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); - rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, - BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0, + SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); CU_ASSERT(rc == 0); CU_ASSERT(g_io_done == false); @@ -1960,7 +1960,7 @@ bdev_io_max_size_and_segment_split_test(void) struct spdk_bdev_desc *desc = NULL; struct spdk_io_channel *io_ch; struct spdk_bdev_opts bdev_opts = {}; - struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; + struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; struct ut_expected_io *expected_io; uint64_t i; int rc; @@ -2118,7 +2118,7 @@ bdev_io_max_size_and_segment_split_test(void) bdev->max_num_segments = 1; g_io_done = false; - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 512 * 2; } @@ -2126,7 +2126,7 @@ bdev_io_max_size_and_segment_split_test(void) /* Each input iov.size is split into 2 iovs, * half of the input iov can fill all child iov entries of a single IO. */ - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) { expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); @@ -2137,7 +2137,7 @@ bdev_io_max_size_and_segment_split_test(void) } /* The remaining iov is split in the second round */ - for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { + for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); @@ -2147,17 +2147,17 @@ bdev_io_max_size_and_segment_split_test(void) TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); } - rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, - BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, + SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); CU_ASSERT(rc == 0); CU_ASSERT(g_io_done == false); - CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); - stub_complete_io(BDEV_IO_NUM_CHILD_IOV); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); + stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); CU_ASSERT(g_io_done == false); - CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); - stub_complete_io(BDEV_IO_NUM_CHILD_IOV); + CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); + stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); CU_ASSERT(g_io_done == true); CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); @@ -2206,37 +2206,37 @@ bdev_io_max_size_and_segment_split_test(void) * of child iovs, so it needs to wait until the first batch completed. */ bdev->max_segment_size = 512; - bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; + bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; g_io_done = false; - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 512; } - for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { + for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 512 * 2; } expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, - BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); - /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { + SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); + /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); } - /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ + /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */ ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); /* Child iov entries exceed the max num of parent IO so split it in next round */ - expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2); ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); - rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, - BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, + SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); CU_ASSERT(rc == 0); CU_ASSERT(g_io_done == false); @@ -2255,34 +2255,34 @@ bdev_io_max_size_and_segment_split_test(void) * cannot be put into this IO, but wait until the next time. */ bdev->max_segment_size = 512; - bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; + bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; g_io_done = false; - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 512; } - for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { + for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 128; } - /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. + /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2. * Because the left 2 iov is not enough for a blocklen. */ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, - BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { + SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2); + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); } TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); /* The second child io waits until the end of the first child io before executing. * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. - * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 + * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2 */ - expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, + expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 1, 4); ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); @@ -2290,8 +2290,8 @@ bdev_io_max_size_and_segment_split_test(void) ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); - rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, - BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, + SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); CU_ASSERT(rc == 0); CU_ASSERT(g_io_done == false); @@ -2312,12 +2312,12 @@ bdev_io_max_size_and_segment_split_test(void) bdev->max_num_segments = 3; g_io_done = false; - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 512 + 256; } - for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { + for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 512 + 128; } @@ -2429,7 +2429,7 @@ bdev_io_max_size_and_segment_split_test(void) ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); - rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 50, io_done, NULL); CU_ASSERT(rc == 0); CU_ASSERT(g_io_done == false); @@ -2521,7 +2521,7 @@ bdev_io_mix_split_test(void) struct spdk_bdev_desc *desc = NULL; struct spdk_io_channel *io_ch; struct spdk_bdev_opts bdev_opts = {}; - struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; + struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; struct ut_expected_io *expected_io; uint64_t i; int rc; @@ -2698,7 +2698,7 @@ bdev_io_mix_split_test(void) * optimal_io_boundary < max_segment_size * max_num_segments */ bdev->max_segment_size = 3 * 512; - bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; + bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; g_io_done = false; for (i = 0; i < 20; i++) { @@ -2927,7 +2927,7 @@ bdev_io_write_unit_split_test(void) struct spdk_bdev_desc *desc = NULL; struct spdk_io_channel *io_ch; struct spdk_bdev_opts bdev_opts = {}; - struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 4]; + struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4]; struct ut_expected_io *expected_io; uint64_t i; int rc; @@ -4975,7 +4975,7 @@ bdev_io_abort(void) struct spdk_bdev_channel *channel; struct spdk_bdev_mgmt_channel *mgmt_ch; struct spdk_bdev_opts bdev_opts = {}; - struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; + struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; uint64_t io_ctx1 = 0, io_ctx2 = 0, i; int rc; @@ -5089,15 +5089,15 @@ bdev_io_abort(void) * child I/O was submitted. The parent I/O should complete with failure without * submitting the second child I/O. */ - for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { + for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { iov[i].iov_base = (void *)((i + 1) * 0x10000); iov[i].iov_len = 512; } - bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; + bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; g_io_done = false; - rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, - BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); + rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, + SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); CU_ASSERT(rc == 0); CU_ASSERT(g_io_done == false); diff --git a/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c b/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c index d85895383..a6ceff878 100644 --- a/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c +++ b/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c @@ -567,7 +567,7 @@ aborted_reset_no_outstanding_io(void) io_ch[0] = spdk_bdev_get_io_channel(g_desc); bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]); bdev[0] = bdev_ch[0]->bdev; - bdev[0]->reset_io_drain_timeout = BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; + bdev[0]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; CU_ASSERT(io_ch[0] != NULL); spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1); poll_threads(); @@ -581,7 +581,7 @@ aborted_reset_no_outstanding_io(void) io_ch[1] = spdk_bdev_get_io_channel(g_desc); bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]); bdev[1] = bdev_ch[1]->bdev; - bdev[1]->reset_io_drain_timeout = BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; + bdev[1]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; CU_ASSERT(io_ch[1] != NULL); spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2); poll_threads(); @@ -777,7 +777,7 @@ reset_completions(void) /* Test case 2) no outstanding IO are present. Reset should perform one iteration over * channels and then be skipped. */ - bdev->reset_io_drain_timeout = BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; + bdev->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE; status_reset = SPDK_BDEV_IO_STATUS_PENDING; rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset); diff --git a/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c b/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c index d7254a710..7bdc84f19 100644 --- a/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c +++ b/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c @@ -470,7 +470,7 @@ test_nvmf_bdev_ctrlr_identify_ns(void) bdev.dif_type = SPDK_DIF_TYPE1; bdev.blocklen = 4096; bdev.md_interleave = 0; - bdev.optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; + bdev.optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; bdev.dif_is_head_of_md = true; nvmf_bdev_ctrlr_identify_ns(&ns, &nsdata, false); @@ -483,7 +483,7 @@ test_nvmf_bdev_ctrlr_identify_ns(void) CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096)); CU_ASSERT(nsdata.lbaf[0].ms == 512); CU_ASSERT(nsdata.dps.pit == SPDK_NVME_FMT_NVM_PROTECTION_DISABLE); - CU_ASSERT(nsdata.noiob == BDEV_IO_NUM_CHILD_IOV); + CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV); CU_ASSERT(nsdata.nmic.can_share == 1); CU_ASSERT(nsdata.nsrescap.rescap.persist == 1); CU_ASSERT(nsdata.nsrescap.rescap.write_exclusive == 1); @@ -509,7 +509,7 @@ test_nvmf_bdev_ctrlr_identify_ns(void) CU_ASSERT(nsdata.flbas.format == 0); CU_ASSERT(nsdata.nacwu == 0); CU_ASSERT(nsdata.lbaf[0].lbads == spdk_u32log2(4096)); - CU_ASSERT(nsdata.noiob == BDEV_IO_NUM_CHILD_IOV); + CU_ASSERT(nsdata.noiob == SPDK_BDEV_IO_NUM_CHILD_IOV); CU_ASSERT(nsdata.nmic.can_share == 1); CU_ASSERT(nsdata.lbaf[0].ms == 0); CU_ASSERT(nsdata.nsrescap.rescap.persist == 1);