bdev/raid: remove randomness from test_multi_raid_with_io()

No additional value in randomizing read vs write, which raid and
which channel. Still do read and write but fix the other values.

Change-Id: I5a4f023731119230d3eb49ae28421819144b90bc
Signed-off-by: paul luse <paul.e.luse@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/454509
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
paul luse 2019-05-14 17:04:16 -04:00 committed by Jim Harris
parent c009c2eb4a
commit 32e69f2968

View File

@ -2139,7 +2139,7 @@ test_multi_raid_no_io(void)
reset_globals(); reset_globals();
} }
/* Create multiple raids, fire IOs randomly on various raids */ /* Create multiple raids, fire IOs on raids */
static void static void
test_multi_raid_with_io(void) test_multi_raid_with_io(void)
{ {
@ -2154,11 +2154,8 @@ test_multi_raid_with_io(void)
struct raid_bdev_io_channel *ch_ctx; struct raid_bdev_io_channel *ch_ctx;
struct spdk_bdev_io *bdev_io; struct spdk_bdev_io *bdev_io;
uint64_t io_len; uint64_t io_len;
uint64_t lba; uint64_t lba = 0;
struct spdk_io_channel *ch_random;
struct raid_bdev_io_channel *ch_ctx_random;
int16_t iotype; int16_t iotype;
uint32_t raid_random;
set_globals(); set_globals();
construct_req = calloc(g_max_raids, sizeof(struct rpc_construct_raid_bdev)); construct_req = calloc(g_max_raids, sizeof(struct rpc_construct_raid_bdev));
@ -2196,28 +2193,30 @@ test_multi_raid_with_io(void)
} }
} }
lba = 0; /* This will perform a write on the first raid and a read on the second. It can be
* expanded in the future to perform r/w on each raid device in the event that
* multiple raid levels are supported.
*/
for (i = 0; i < g_max_raids; i++) {
bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
SPDK_CU_ASSERT_FATAL(bdev_io != NULL); SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
io_len = (rand() % g_strip_size) + 1; io_len = g_strip_size;
iotype = (rand() % 2) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ; iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output)); memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
g_io_output_index = 0; g_io_output_index = 0;
raid_random = rand() % g_max_raids;
ch_random = &ch[raid_random];
ch_ctx_random = spdk_io_channel_get_ctx(ch_random);
TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
if (strcmp(pbdev->bdev.name, construct_req[raid_random].name) == 0) { if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
break; break;
} }
} }
bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, iotype); bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, iotype);
CU_ASSERT(pbdev != NULL); CU_ASSERT(pbdev != NULL);
raid_bdev_submit_request(ch_random, bdev_io); raid_bdev_submit_request(ch, bdev_io);
verify_io(bdev_io, g_max_base_drives, ch_ctx_random, pbdev, verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev,
g_child_io_status_flag); g_child_io_status_flag);
bdev_io_cleanup(bdev_io); bdev_io_cleanup(bdev_io);
free(bdev_io); free(bdev_io);
}
for (i = 0; i < g_max_raids; i++) { for (i = 0; i < g_max_raids; i++) {
TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {