bdevperf: Move target management functions close to the caller

_end_target() is an exception, and move just above bdevperf_complete().

Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I2197a3b7ceb36ab29f0b69e31f3babd4e996f193
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/635
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Shuhei Matsumoto 2020-02-06 23:31:45 -05:00 committed by Tomasz Zawadzki
parent 9226dcce43
commit 3673217aa3

View File

@ -222,190 +222,6 @@ verify_data(void *wr_buf, int wr_buf_len, void *rd_buf, int rd_buf_len, int bloc
return true;
}
static void
bdevperf_free_target(struct io_target *target)
{
struct bdevperf_task *task, *tmp;
TAILQ_FOREACH_SAFE(task, &target->task_list, link, tmp) {
TAILQ_REMOVE(&target->task_list, task, link);
spdk_free(task->buf);
spdk_free(task->md_buf);
free(task);
}
free(target->name);
free(target);
}
static void
bdevperf_free_targets(void)
{
struct io_target_group *group, *tmp_group;
struct io_target *target, *tmp_target;
TAILQ_FOREACH_SAFE(group, &g_bdevperf.groups, link, tmp_group) {
TAILQ_FOREACH_SAFE(target, &group->targets, link, tmp_target) {
TAILQ_REMOVE(&group->targets, target, link);
bdevperf_free_target(target);
}
}
}
static void
_end_target(struct io_target *target)
{
spdk_poller_unregister(&target->run_timer);
if (g_reset) {
spdk_poller_unregister(&target->reset_timer);
}
target->is_draining = true;
}
static void
_target_gone(void *ctx)
{
struct io_target *target = ctx;
_end_target(target);
}
static void
bdevperf_target_gone(void *arg)
{
struct io_target *target = arg;
struct io_target_group *group = target->group;
spdk_thread_send_msg(spdk_io_channel_get_thread(spdk_io_channel_from_ctx(group)),
_target_gone, target);
}
static int
bdevperf_construct_target(struct spdk_bdev *bdev)
{
struct io_target_group *group;
struct io_target *target;
int block_size, data_block_size;
int rc;
if (g_unmap && !spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) {
printf("Skipping %s because it does not support unmap\n", spdk_bdev_get_name(bdev));
return 0;
}
target = malloc(sizeof(struct io_target));
if (!target) {
fprintf(stderr, "Unable to allocate memory for new target.\n");
/* Return immediately because all mallocs will presumably fail after this */
return -ENOMEM;
}
target->name = strdup(spdk_bdev_get_name(bdev));
if (!target->name) {
fprintf(stderr, "Unable to allocate memory for target name.\n");
free(target);
/* Return immediately because all mallocs will presumably fail after this */
return -ENOMEM;
}
rc = spdk_bdev_open(bdev, true, bdevperf_target_gone, target, &target->bdev_desc);
if (rc != 0) {
SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
free(target->name);
free(target);
return 0;
}
target->bdev = bdev;
target->io_completed = 0;
target->current_queue_depth = 0;
target->offset_in_ios = 0;
block_size = spdk_bdev_get_block_size(bdev);
data_block_size = spdk_bdev_get_data_block_size(bdev);
target->io_size_blocks = g_io_size / data_block_size;
if ((g_io_size % data_block_size) != 0) {
SPDK_ERRLOG("IO size (%d) is not multiples of data block size of bdev %s (%"PRIu32")\n",
g_io_size, spdk_bdev_get_name(bdev), data_block_size);
spdk_bdev_close(target->bdev_desc);
free(target->name);
free(target);
return 0;
}
target->buf_size = target->io_size_blocks * block_size;
target->dif_check_flags = 0;
if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_REFTAG)) {
target->dif_check_flags |= SPDK_DIF_FLAGS_REFTAG_CHECK;
}
if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_GUARD)) {
target->dif_check_flags |= SPDK_DIF_FLAGS_GUARD_CHECK;
}
target->size_in_ios = spdk_bdev_get_num_blocks(bdev) / target->io_size_blocks;
target->is_draining = false;
target->run_timer = NULL;
target->reset_timer = NULL;
TAILQ_INIT(&target->task_list);
/* Mapping each created target to target group */
if (g_next_tg == NULL) {
g_next_tg = TAILQ_FIRST(&g_bdevperf.groups);
assert(g_next_tg != NULL);
}
group = g_next_tg;
g_next_tg = TAILQ_NEXT(g_next_tg, link);
target->group = group;
TAILQ_INSERT_TAIL(&group->targets, target, link);
g_target_count++;
return 0;
}
static void
bdevperf_construct_targets(void)
{
struct spdk_bdev *bdev;
int rc;
uint8_t core_idx, core_count_for_each_bdev;
if (g_every_core_for_each_bdev == false) {
core_count_for_each_bdev = 1;
} else {
core_count_for_each_bdev = spdk_env_get_core_count();
}
if (g_target_bdev_name != NULL) {
bdev = spdk_bdev_get_by_name(g_target_bdev_name);
if (!bdev) {
fprintf(stderr, "Unable to find bdev '%s'\n", g_target_bdev_name);
return;
}
for (core_idx = 0; core_idx < core_count_for_each_bdev; core_idx++) {
rc = bdevperf_construct_target(bdev);
if (rc != 0) {
return;
}
}
} else {
bdev = spdk_bdev_first_leaf();
while (bdev != NULL) {
for (core_idx = 0; core_idx < core_count_for_each_bdev; core_idx++) {
rc = bdevperf_construct_target(bdev);
if (rc != 0) {
return;
}
}
bdev = spdk_bdev_next_leaf(bdev);
}
}
}
static void
_bdevperf_fini_thread_done(struct spdk_io_channel_iter *i, int status)
{
@ -437,6 +253,36 @@ bdevperf_fini(void)
_bdevperf_fini_thread_done);
}
static void
bdevperf_free_target(struct io_target *target)
{
struct bdevperf_task *task, *tmp;
TAILQ_FOREACH_SAFE(task, &target->task_list, link, tmp) {
TAILQ_REMOVE(&target->task_list, task, link);
spdk_free(task->buf);
spdk_free(task->md_buf);
free(task);
}
free(target->name);
free(target);
}
static void
bdevperf_free_targets(void)
{
struct io_target_group *group, *tmp_group;
struct io_target *target, *tmp_target;
TAILQ_FOREACH_SAFE(group, &g_bdevperf.groups, link, tmp_group) {
TAILQ_FOREACH_SAFE(target, &group->targets, link, tmp_target) {
TAILQ_REMOVE(&group->targets, target, link);
bdevperf_free_target(target);
}
}
}
static void
bdevperf_test_done(void)
{
@ -488,6 +334,17 @@ bdevperf_queue_io_wait_with_cb(struct bdevperf_task *task, spdk_bdev_io_wait_cb
spdk_bdev_queue_io_wait(target->bdev, target->ch, &task->bdev_io_wait);
}
static void
_end_target(struct io_target *target)
{
spdk_poller_unregister(&target->run_timer);
if (g_reset) {
spdk_poller_unregister(&target->reset_timer);
}
target->is_draining = true;
}
static void
bdevperf_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
@ -1122,6 +979,149 @@ bdevperf_test(void)
return 0;
}
static void
_target_gone(void *ctx)
{
struct io_target *target = ctx;
_end_target(target);
}
static void
bdevperf_target_gone(void *arg)
{
struct io_target *target = arg;
struct io_target_group *group = target->group;
spdk_thread_send_msg(spdk_io_channel_get_thread(spdk_io_channel_from_ctx(group)),
_target_gone, target);
}
static int
bdevperf_construct_target(struct spdk_bdev *bdev)
{
struct io_target_group *group;
struct io_target *target;
int block_size, data_block_size;
int rc;
if (g_unmap && !spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) {
printf("Skipping %s because it does not support unmap\n", spdk_bdev_get_name(bdev));
return 0;
}
target = malloc(sizeof(struct io_target));
if (!target) {
fprintf(stderr, "Unable to allocate memory for new target.\n");
/* Return immediately because all mallocs will presumably fail after this */
return -ENOMEM;
}
target->name = strdup(spdk_bdev_get_name(bdev));
if (!target->name) {
fprintf(stderr, "Unable to allocate memory for target name.\n");
free(target);
/* Return immediately because all mallocs will presumably fail after this */
return -ENOMEM;
}
rc = spdk_bdev_open(bdev, true, bdevperf_target_gone, target, &target->bdev_desc);
if (rc != 0) {
SPDK_ERRLOG("Could not open leaf bdev %s, error=%d\n", spdk_bdev_get_name(bdev), rc);
free(target->name);
free(target);
return 0;
}
target->bdev = bdev;
target->io_completed = 0;
target->current_queue_depth = 0;
target->offset_in_ios = 0;
block_size = spdk_bdev_get_block_size(bdev);
data_block_size = spdk_bdev_get_data_block_size(bdev);
target->io_size_blocks = g_io_size / data_block_size;
if ((g_io_size % data_block_size) != 0) {
SPDK_ERRLOG("IO size (%d) is not multiples of data block size of bdev %s (%"PRIu32")\n",
g_io_size, spdk_bdev_get_name(bdev), data_block_size);
spdk_bdev_close(target->bdev_desc);
free(target->name);
free(target);
return 0;
}
target->buf_size = target->io_size_blocks * block_size;
target->dif_check_flags = 0;
if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_REFTAG)) {
target->dif_check_flags |= SPDK_DIF_FLAGS_REFTAG_CHECK;
}
if (spdk_bdev_is_dif_check_enabled(bdev, SPDK_DIF_CHECK_TYPE_GUARD)) {
target->dif_check_flags |= SPDK_DIF_FLAGS_GUARD_CHECK;
}
target->size_in_ios = spdk_bdev_get_num_blocks(bdev) / target->io_size_blocks;
target->is_draining = false;
target->run_timer = NULL;
target->reset_timer = NULL;
TAILQ_INIT(&target->task_list);
/* Mapping each created target to target group */
if (g_next_tg == NULL) {
g_next_tg = TAILQ_FIRST(&g_bdevperf.groups);
assert(g_next_tg != NULL);
}
group = g_next_tg;
g_next_tg = TAILQ_NEXT(g_next_tg, link);
target->group = group;
TAILQ_INSERT_TAIL(&group->targets, target, link);
g_target_count++;
return 0;
}
static void
bdevperf_construct_targets(void)
{
struct spdk_bdev *bdev;
int rc;
uint8_t core_idx, core_count_for_each_bdev;
if (g_every_core_for_each_bdev == false) {
core_count_for_each_bdev = 1;
} else {
core_count_for_each_bdev = spdk_env_get_core_count();
}
if (g_target_bdev_name != NULL) {
bdev = spdk_bdev_get_by_name(g_target_bdev_name);
if (!bdev) {
fprintf(stderr, "Unable to find bdev '%s'\n", g_target_bdev_name);
return;
}
for (core_idx = 0; core_idx < core_count_for_each_bdev; core_idx++) {
rc = bdevperf_construct_target(bdev);
if (rc != 0) {
return;
}
}
} else {
bdev = spdk_bdev_first_leaf();
while (bdev != NULL) {
for (core_idx = 0; core_idx < core_count_for_each_bdev; core_idx++) {
rc = bdevperf_construct_target(bdev);
if (rc != 0) {
return;
}
}
bdev = spdk_bdev_next_leaf(bdev);
}
}
}
static int
io_target_group_create(void *io_device, void *ctx_buf)
{