2017-03-20 18:26:50 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright (c) Intel Corporation.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2021-06-05 12:50:15 +00:00
|
|
|
#include "thread/thread_internal.h"
|
2018-04-19 08:19:49 +00:00
|
|
|
#include "bs_scheduler.c"
|
|
|
|
|
2018-02-23 17:56:40 +00:00
|
|
|
|
2017-03-20 18:26:50 +00:00
|
|
|
#define DEV_BUFFER_SIZE (64 * 1024 * 1024)
|
|
|
|
#define DEV_BUFFER_BLOCKLEN (4096)
|
|
|
|
#define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
|
|
|
|
uint8_t *g_dev_buffer;
|
2018-07-04 18:45:34 +00:00
|
|
|
uint64_t g_dev_write_bytes;
|
|
|
|
uint64_t g_dev_read_bytes;
|
2022-01-31 10:16:21 +00:00
|
|
|
bool g_dev_writev_ext_called;
|
|
|
|
bool g_dev_readv_ext_called;
|
|
|
|
struct spdk_blob_ext_io_opts g_blob_ext_io_opts;
|
2017-03-20 18:26:50 +00:00
|
|
|
|
2019-05-14 09:43:44 +00:00
|
|
|
struct spdk_power_failure_counters {
|
|
|
|
uint64_t general_counter;
|
|
|
|
uint64_t read_counter;
|
|
|
|
uint64_t write_counter;
|
|
|
|
uint64_t unmap_counter;
|
|
|
|
uint64_t write_zero_counter;
|
|
|
|
uint64_t flush_counter;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct spdk_power_failure_counters g_power_failure_counters = {};
|
|
|
|
|
|
|
|
struct spdk_power_failure_thresholds {
|
|
|
|
uint64_t general_threshold;
|
|
|
|
uint64_t read_threshold;
|
|
|
|
uint64_t write_threshold;
|
|
|
|
uint64_t unmap_threshold;
|
|
|
|
uint64_t write_zero_threshold;
|
|
|
|
uint64_t flush_threshold;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct spdk_power_failure_thresholds g_power_failure_thresholds = {};
|
|
|
|
|
|
|
|
static uint64_t g_power_failure_rc;
|
|
|
|
|
|
|
|
void dev_reset_power_failure_event(void);
|
|
|
|
void dev_reset_power_failure_counters(void);
|
|
|
|
void dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds);
|
|
|
|
|
|
|
|
void
|
|
|
|
dev_reset_power_failure_event(void)
|
|
|
|
{
|
|
|
|
memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
|
|
|
|
memset(&g_power_failure_thresholds, 0, sizeof(g_power_failure_thresholds));
|
|
|
|
g_power_failure_rc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dev_reset_power_failure_counters(void)
|
|
|
|
{
|
|
|
|
memset(&g_power_failure_counters, 0, sizeof(g_power_failure_counters));
|
|
|
|
g_power_failure_rc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Set power failure event. Power failure will occur after given number
|
2021-11-25 01:40:59 +00:00
|
|
|
* of IO operations. It may occur after number of particular operations
|
2019-05-14 09:43:44 +00:00
|
|
|
* (read, write, unmap, write zero or flush) or after given number of
|
2021-11-25 01:40:59 +00:00
|
|
|
* any IO operations (general_threshold). Value 0 means that the threshold
|
2019-05-14 09:43:44 +00:00
|
|
|
* is disabled. Any other value is the number of operation starting from
|
|
|
|
* which power failure event will happen.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
dev_set_power_failure_thresholds(struct spdk_power_failure_thresholds thresholds)
|
|
|
|
{
|
|
|
|
g_power_failure_thresholds = thresholds;
|
|
|
|
}
|
|
|
|
|
2017-11-07 03:27:05 +00:00
|
|
|
/* Define here for UT only. */
|
2018-01-05 19:27:38 +00:00
|
|
|
struct spdk_io_channel g_io_channel;
|
2017-11-07 03:27:05 +00:00
|
|
|
|
2017-03-20 18:26:50 +00:00
|
|
|
static struct spdk_io_channel *
|
|
|
|
dev_create_channel(struct spdk_bs_dev *dev)
|
|
|
|
{
|
2017-11-07 03:27:05 +00:00
|
|
|
return &g_io_channel;
|
2017-03-20 18:26:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dev_destroy(struct spdk_bs_dev *dev)
|
|
|
|
{
|
2017-08-02 13:30:22 +00:00
|
|
|
free(dev);
|
2017-03-20 18:26:50 +00:00
|
|
|
}
|
|
|
|
|
2018-04-19 08:19:49 +00:00
|
|
|
|
2018-02-23 17:56:40 +00:00
|
|
|
static void
|
2018-04-19 08:19:49 +00:00
|
|
|
dev_complete_cb(void *arg)
|
2018-02-23 17:56:40 +00:00
|
|
|
{
|
|
|
|
struct spdk_bs_dev_cb_args *cb_args = arg;
|
|
|
|
|
2019-05-14 09:43:44 +00:00
|
|
|
cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, g_power_failure_rc);
|
2018-02-23 17:56:40 +00:00
|
|
|
}
|
|
|
|
|
2018-04-19 08:19:49 +00:00
|
|
|
static void
|
|
|
|
dev_complete(void *arg)
|
|
|
|
{
|
|
|
|
_bs_send_msg(dev_complete_cb, arg, NULL);
|
|
|
|
}
|
|
|
|
|
2017-03-20 18:26:50 +00:00
|
|
|
static void
|
|
|
|
dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
|
|
|
|
uint64_t lba, uint32_t lba_count,
|
|
|
|
struct spdk_bs_dev_cb_args *cb_args)
|
|
|
|
{
|
|
|
|
uint64_t offset, length;
|
|
|
|
|
2019-05-14 09:43:44 +00:00
|
|
|
if (g_power_failure_thresholds.read_threshold != 0) {
|
|
|
|
g_power_failure_counters.read_counter++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_power_failure_thresholds.general_threshold != 0) {
|
|
|
|
g_power_failure_counters.general_counter++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((g_power_failure_thresholds.read_threshold == 0 ||
|
|
|
|
g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
|
|
|
|
(g_power_failure_thresholds.general_threshold == 0 ||
|
|
|
|
g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
|
|
|
|
offset = lba * dev->blocklen;
|
|
|
|
length = lba_count * dev->blocklen;
|
|
|
|
SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
|
|
|
|
|
2021-07-23 09:50:41 +00:00
|
|
|
if (length > 0) {
|
|
|
|
memcpy(payload, &g_dev_buffer[offset], length);
|
|
|
|
g_dev_read_bytes += length;
|
|
|
|
}
|
2019-05-14 09:43:44 +00:00
|
|
|
} else {
|
|
|
|
g_power_failure_rc = -EIO;
|
|
|
|
}
|
|
|
|
|
2018-02-23 17:56:40 +00:00
|
|
|
spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
|
2017-03-20 18:26:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
|
|
|
|
uint64_t lba, uint32_t lba_count,
|
|
|
|
struct spdk_bs_dev_cb_args *cb_args)
|
|
|
|
{
|
|
|
|
uint64_t offset, length;
|
|
|
|
|
2019-05-14 09:43:44 +00:00
|
|
|
if (g_power_failure_thresholds.write_threshold != 0) {
|
|
|
|
g_power_failure_counters.write_counter++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_power_failure_thresholds.general_threshold != 0) {
|
|
|
|
g_power_failure_counters.general_counter++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((g_power_failure_thresholds.write_threshold == 0 ||
|
|
|
|
g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) &&
|
|
|
|
(g_power_failure_thresholds.general_threshold == 0 ||
|
|
|
|
g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
|
|
|
|
offset = lba * dev->blocklen;
|
|
|
|
length = lba_count * dev->blocklen;
|
|
|
|
SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
|
|
|
|
|
|
|
|
memcpy(&g_dev_buffer[offset], payload, length);
|
|
|
|
g_dev_write_bytes += length;
|
|
|
|
} else {
|
|
|
|
g_power_failure_rc = -EIO;
|
|
|
|
}
|
|
|
|
|
2018-02-23 17:56:40 +00:00
|
|
|
spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
|
2017-03-20 18:26:50 +00:00
|
|
|
}
|
|
|
|
|
2017-08-18 16:41:26 +00:00
|
|
|
static void
|
|
|
|
__check_iov(struct iovec *iov, int iovcnt, uint64_t length)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < iovcnt; i++) {
|
|
|
|
length -= iov[i].iov_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
CU_ASSERT(length == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
|
|
|
|
struct iovec *iov, int iovcnt,
|
|
|
|
uint64_t lba, uint32_t lba_count,
|
|
|
|
struct spdk_bs_dev_cb_args *cb_args)
|
|
|
|
{
|
|
|
|
uint64_t offset, length;
|
|
|
|
int i;
|
|
|
|
|
2019-05-14 09:43:44 +00:00
|
|
|
if (g_power_failure_thresholds.read_threshold != 0) {
|
|
|
|
g_power_failure_counters.read_counter++;
|
|
|
|
}
|
2017-08-18 16:41:26 +00:00
|
|
|
|
2019-05-14 09:43:44 +00:00
|
|
|
if (g_power_failure_thresholds.general_threshold != 0) {
|
|
|
|
g_power_failure_counters.general_counter++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((g_power_failure_thresholds.read_threshold == 0 ||
|
|
|
|
g_power_failure_counters.read_counter < g_power_failure_thresholds.read_threshold) &&
|
|
|
|
(g_power_failure_thresholds.general_threshold == 0 ||
|
|
|
|
g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
|
|
|
|
offset = lba * dev->blocklen;
|
|
|
|
length = lba_count * dev->blocklen;
|
|
|
|
SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
|
|
|
|
__check_iov(iov, iovcnt, length);
|
|
|
|
|
|
|
|
for (i = 0; i < iovcnt; i++) {
|
|
|
|
memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len);
|
|
|
|
offset += iov[i].iov_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
g_dev_read_bytes += length;
|
|
|
|
} else {
|
|
|
|
g_power_failure_rc = -EIO;
|
2017-08-18 16:41:26 +00:00
|
|
|
}
|
|
|
|
|
2018-02-23 17:56:40 +00:00
|
|
|
spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
|
2017-08-18 16:41:26 +00:00
|
|
|
}
|
|
|
|
|
2022-01-31 08:53:15 +00:00
|
|
|
static void
|
|
|
|
dev_readv_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
|
|
|
|
struct iovec *iov, int iovcnt,
|
|
|
|
uint64_t lba, uint32_t lba_count,
|
|
|
|
struct spdk_bs_dev_cb_args *cb_args,
|
|
|
|
struct spdk_blob_ext_io_opts *io_opts)
|
|
|
|
{
|
2022-01-31 10:16:21 +00:00
|
|
|
g_dev_readv_ext_called = true;
|
|
|
|
g_blob_ext_io_opts = *io_opts;
|
2022-01-31 08:53:15 +00:00
|
|
|
dev_readv(dev, channel, iov, iovcnt, lba, lba_count, cb_args);
|
|
|
|
}
|
|
|
|
|
2017-08-18 16:41:26 +00:00
|
|
|
static void
|
|
|
|
dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
|
|
|
|
struct iovec *iov, int iovcnt,
|
|
|
|
uint64_t lba, uint32_t lba_count,
|
|
|
|
struct spdk_bs_dev_cb_args *cb_args)
|
|
|
|
{
|
|
|
|
uint64_t offset, length;
|
|
|
|
int i;
|
|
|
|
|
2019-05-14 09:43:44 +00:00
|
|
|
if (g_power_failure_thresholds.write_threshold != 0) {
|
|
|
|
g_power_failure_counters.write_counter++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_power_failure_thresholds.general_threshold != 0) {
|
|
|
|
g_power_failure_counters.general_counter++;
|
|
|
|
}
|
2017-08-18 16:41:26 +00:00
|
|
|
|
2019-05-14 09:43:44 +00:00
|
|
|
if ((g_power_failure_thresholds.write_threshold == 0 ||
|
|
|
|
g_power_failure_counters.write_counter < g_power_failure_thresholds.write_threshold) &&
|
|
|
|
(g_power_failure_thresholds.general_threshold == 0 ||
|
|
|
|
g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
|
|
|
|
offset = lba * dev->blocklen;
|
|
|
|
length = lba_count * dev->blocklen;
|
|
|
|
SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
|
|
|
|
__check_iov(iov, iovcnt, length);
|
|
|
|
|
|
|
|
for (i = 0; i < iovcnt; i++) {
|
|
|
|
memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len);
|
|
|
|
offset += iov[i].iov_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
g_dev_write_bytes += length;
|
|
|
|
} else {
|
|
|
|
g_power_failure_rc = -EIO;
|
2017-08-18 16:41:26 +00:00
|
|
|
}
|
|
|
|
|
2018-02-23 17:56:40 +00:00
|
|
|
spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
|
2017-08-18 16:41:26 +00:00
|
|
|
}
|
|
|
|
|
2022-01-31 08:53:15 +00:00
|
|
|
static void
|
|
|
|
dev_writev_ext(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
|
|
|
|
struct iovec *iov, int iovcnt,
|
|
|
|
uint64_t lba, uint32_t lba_count,
|
|
|
|
struct spdk_bs_dev_cb_args *cb_args,
|
|
|
|
struct spdk_blob_ext_io_opts *io_opts)
|
|
|
|
{
|
2022-01-31 10:16:21 +00:00
|
|
|
g_dev_writev_ext_called = true;
|
|
|
|
g_blob_ext_io_opts = *io_opts;
|
2022-01-31 08:53:15 +00:00
|
|
|
dev_writev(dev, channel, iov, iovcnt, lba, lba_count, cb_args);
|
|
|
|
}
|
|
|
|
|
2017-03-20 18:26:50 +00:00
|
|
|
static void
|
|
|
|
dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
|
|
|
|
struct spdk_bs_dev_cb_args *cb_args)
|
|
|
|
{
|
2019-05-14 09:43:44 +00:00
|
|
|
if (g_power_failure_thresholds.flush_threshold != 0) {
|
|
|
|
g_power_failure_counters.flush_counter++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_power_failure_thresholds.general_threshold != 0) {
|
|
|
|
g_power_failure_counters.general_counter++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((g_power_failure_thresholds.flush_threshold != 0 &&
|
|
|
|
g_power_failure_counters.flush_counter >= g_power_failure_thresholds.flush_threshold) ||
|
|
|
|
(g_power_failure_thresholds.general_threshold != 0 &&
|
|
|
|
g_power_failure_counters.general_counter >= g_power_failure_thresholds.general_threshold)) {
|
|
|
|
g_power_failure_rc = -EIO;
|
|
|
|
}
|
|
|
|
|
2018-02-23 17:56:40 +00:00
|
|
|
spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
|
2017-03-20 18:26:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
|
2021-10-05 23:37:19 +00:00
|
|
|
uint64_t lba, uint64_t lba_count,
|
2017-03-20 18:26:50 +00:00
|
|
|
struct spdk_bs_dev_cb_args *cb_args)
|
|
|
|
{
|
|
|
|
uint64_t offset, length;
|
|
|
|
|
2019-05-14 09:43:44 +00:00
|
|
|
if (g_power_failure_thresholds.unmap_threshold != 0) {
|
|
|
|
g_power_failure_counters.unmap_counter++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_power_failure_thresholds.general_threshold != 0) {
|
|
|
|
g_power_failure_counters.general_counter++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((g_power_failure_thresholds.unmap_threshold == 0 ||
|
|
|
|
g_power_failure_counters.unmap_counter < g_power_failure_thresholds.unmap_threshold) &&
|
|
|
|
(g_power_failure_thresholds.general_threshold == 0 ||
|
|
|
|
g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
|
|
|
|
offset = lba * dev->blocklen;
|
|
|
|
length = lba_count * dev->blocklen;
|
|
|
|
SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
|
|
|
|
memset(&g_dev_buffer[offset], 0, length);
|
|
|
|
} else {
|
|
|
|
g_power_failure_rc = -EIO;
|
|
|
|
}
|
|
|
|
|
2018-02-23 17:56:40 +00:00
|
|
|
spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
|
2017-07-27 17:58:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
|
2021-10-05 23:37:19 +00:00
|
|
|
uint64_t lba, uint64_t lba_count,
|
2017-07-27 17:58:51 +00:00
|
|
|
struct spdk_bs_dev_cb_args *cb_args)
|
|
|
|
{
|
|
|
|
uint64_t offset, length;
|
|
|
|
|
2019-05-14 09:43:44 +00:00
|
|
|
if (g_power_failure_thresholds.write_zero_threshold != 0) {
|
|
|
|
g_power_failure_counters.write_zero_counter++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (g_power_failure_thresholds.general_threshold != 0) {
|
|
|
|
g_power_failure_counters.general_counter++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((g_power_failure_thresholds.write_zero_threshold == 0 ||
|
|
|
|
g_power_failure_counters.write_zero_counter < g_power_failure_thresholds.write_zero_threshold) &&
|
|
|
|
(g_power_failure_thresholds.general_threshold == 0 ||
|
|
|
|
g_power_failure_counters.general_counter < g_power_failure_thresholds.general_threshold)) {
|
|
|
|
offset = lba * dev->blocklen;
|
|
|
|
length = lba_count * dev->blocklen;
|
|
|
|
SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
|
|
|
|
memset(&g_dev_buffer[offset], 0, length);
|
|
|
|
g_dev_write_bytes += length;
|
|
|
|
} else {
|
|
|
|
g_power_failure_rc = -EIO;
|
|
|
|
}
|
|
|
|
|
2018-02-23 17:56:40 +00:00
|
|
|
spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
|
2017-03-20 18:26:50 +00:00
|
|
|
}
|
|
|
|
|
2017-08-02 13:30:22 +00:00
|
|
|
static struct spdk_bs_dev *
|
|
|
|
init_dev(void)
|
2017-03-20 18:26:50 +00:00
|
|
|
{
|
2017-08-02 13:30:22 +00:00
|
|
|
struct spdk_bs_dev *dev = calloc(1, sizeof(*dev));
|
|
|
|
|
|
|
|
SPDK_CU_ASSERT_FATAL(dev != NULL);
|
|
|
|
|
2017-03-20 18:26:50 +00:00
|
|
|
dev->create_channel = dev_create_channel;
|
|
|
|
dev->destroy_channel = dev_destroy_channel;
|
|
|
|
dev->destroy = dev_destroy;
|
|
|
|
dev->read = dev_read;
|
|
|
|
dev->write = dev_write;
|
2017-08-18 16:41:26 +00:00
|
|
|
dev->readv = dev_readv;
|
|
|
|
dev->writev = dev_writev;
|
2022-01-31 08:53:15 +00:00
|
|
|
dev->readv_ext = dev_readv_ext;
|
|
|
|
dev->writev_ext = dev_writev_ext;
|
2017-03-20 18:26:50 +00:00
|
|
|
dev->flush = dev_flush;
|
|
|
|
dev->unmap = dev_unmap;
|
2017-07-27 17:58:51 +00:00
|
|
|
dev->write_zeroes = dev_write_zeroes;
|
2017-03-20 18:26:50 +00:00
|
|
|
dev->blockcnt = DEV_BUFFER_BLOCKCNT;
|
|
|
|
dev->blocklen = DEV_BUFFER_BLOCKLEN;
|
2017-08-02 13:30:22 +00:00
|
|
|
|
|
|
|
return dev;
|
2017-03-20 18:26:50 +00:00
|
|
|
}
|