nvmf: Allow asynchronous nvmf transport destroy.
As part of FC transport destroy, FC LLD (Low level Driver) needs to to do its cleanup which cannot be completed synchronously. So allow transport destroy to be asynchronous. FC transport code to use this functionality will be pushed shortly. Signed-off-by: Naresh Gottumukkala <raju.gottumukkala@broadcom.com> Change-Id: I104cf7d131e18199abdcf0651df261fe41d666ef Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/5180 Community-CI: Broadcom CI Community-CI: Mellanox Build Bot Reviewed-by: Aleksey Marchuk <alexeymar@mellanox.com> Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
d4ad1f9cc1
commit
0d98a94901
@ -4,8 +4,8 @@
|
|||||||
|
|
||||||
### nvmf
|
### nvmf
|
||||||
|
|
||||||
The function `qpair_fini` in the transport interface now accepts a cb_fn and
|
The functions `destroy` and `qpair_fini` in the transport interface now accept a
|
||||||
cb_arg to call upon completion, and its execution can be asynchronous.
|
cb_fn and cb_arg to call upon completion, and their execution can be asynchronous.
|
||||||
|
|
||||||
The SPDK nvmf target now supports async event notification for discovery log changes.
|
The SPDK nvmf target now supports async event notification for discovery log changes.
|
||||||
This allows the initiator to create persistent connection to discovery controller and
|
This allows the initiator to create persistent connection to discovery controller and
|
||||||
|
@ -926,14 +926,19 @@ spdk_nvmf_transport_opts_init(const char *transport_name,
|
|||||||
struct spdk_nvmf_transport *spdk_nvmf_transport_create(const char *transport_name,
|
struct spdk_nvmf_transport *spdk_nvmf_transport_create(const char *transport_name,
|
||||||
struct spdk_nvmf_transport_opts *opts);
|
struct spdk_nvmf_transport_opts *opts);
|
||||||
|
|
||||||
|
typedef void (*spdk_nvmf_transport_destroy_done_cb)(void *cb_arg);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Destroy a protocol transport
|
* Destroy a protocol transport
|
||||||
*
|
*
|
||||||
* \param transport The transport to destory
|
* \param transport The transport to destory
|
||||||
|
* \param cb_fn A callback that will be called once the transport is destroyed
|
||||||
|
* \param cb_arg A context argument passed to cb_fn.
|
||||||
*
|
*
|
||||||
* \return 0 on success, -1 on failure.
|
* \return 0 on success, -1 on failure.
|
||||||
*/
|
*/
|
||||||
int spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport);
|
int spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport,
|
||||||
|
spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get an existing transport from the target
|
* Get an existing transport from the target
|
||||||
|
@ -235,7 +235,8 @@ struct spdk_nvmf_transport_ops {
|
|||||||
/**
|
/**
|
||||||
* Destroy the transport
|
* Destroy the transport
|
||||||
*/
|
*/
|
||||||
int (*destroy)(struct spdk_nvmf_transport *transport);
|
int (*destroy)(struct spdk_nvmf_transport *transport,
|
||||||
|
spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Instruct the transport to accept new connections at the address
|
* Instruct the transport to accept new connections at the address
|
||||||
|
@ -1861,7 +1861,8 @@ nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nvmf_fc_destroy(struct spdk_nvmf_transport *transport)
|
nvmf_fc_destroy(struct spdk_nvmf_transport *transport,
|
||||||
|
spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
|
||||||
{
|
{
|
||||||
if (transport) {
|
if (transport) {
|
||||||
struct spdk_nvmf_fc_transport *ftransport;
|
struct spdk_nvmf_fc_transport *ftransport;
|
||||||
@ -1884,6 +1885,9 @@ nvmf_fc_destroy(struct spdk_nvmf_transport *transport)
|
|||||||
nvmf_fc_port_cleanup();
|
nvmf_fc_port_cleanup();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cb_fn) {
|
||||||
|
cb_fn(cb_arg);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -313,13 +313,33 @@ spdk_nvmf_tgt_create(struct spdk_nvmf_target_opts *opts)
|
|||||||
return tgt;
|
return tgt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
_nvmf_tgt_destroy_next_transport(void *ctx)
|
||||||
|
{
|
||||||
|
struct spdk_nvmf_tgt *tgt = ctx;
|
||||||
|
struct spdk_nvmf_transport *transport;
|
||||||
|
|
||||||
|
if (!TAILQ_EMPTY(&tgt->transports)) {
|
||||||
|
transport = TAILQ_FIRST(&tgt->transports);
|
||||||
|
TAILQ_REMOVE(&tgt->transports, transport, link);
|
||||||
|
spdk_nvmf_transport_destroy(transport, _nvmf_tgt_destroy_next_transport, tgt);
|
||||||
|
} else {
|
||||||
|
spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn = tgt->destroy_cb_fn;
|
||||||
|
void *destroy_cb_arg = tgt->destroy_cb_arg;
|
||||||
|
|
||||||
|
pthread_mutex_destroy(&tgt->mutex);
|
||||||
|
free(tgt);
|
||||||
|
|
||||||
|
if (destroy_cb_fn) {
|
||||||
|
destroy_cb_fn(destroy_cb_arg, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
nvmf_tgt_destroy_cb(void *io_device)
|
nvmf_tgt_destroy_cb(void *io_device)
|
||||||
{
|
{
|
||||||
struct spdk_nvmf_tgt *tgt = io_device;
|
struct spdk_nvmf_tgt *tgt = io_device;
|
||||||
struct spdk_nvmf_transport *transport, *transport_tmp;
|
|
||||||
spdk_nvmf_tgt_destroy_done_fn *destroy_cb_fn;
|
|
||||||
void *destroy_cb_arg;
|
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
if (tgt->subsystems) {
|
if (tgt->subsystems) {
|
||||||
@ -332,20 +352,7 @@ nvmf_tgt_destroy_cb(void *io_device)
|
|||||||
free(tgt->subsystems);
|
free(tgt->subsystems);
|
||||||
}
|
}
|
||||||
|
|
||||||
TAILQ_FOREACH_SAFE(transport, &tgt->transports, link, transport_tmp) {
|
_nvmf_tgt_destroy_next_transport(tgt);
|
||||||
TAILQ_REMOVE(&tgt->transports, transport, link);
|
|
||||||
spdk_nvmf_transport_destroy(transport);
|
|
||||||
}
|
|
||||||
|
|
||||||
destroy_cb_fn = tgt->destroy_cb_fn;
|
|
||||||
destroy_cb_arg = tgt->destroy_cb_arg;
|
|
||||||
|
|
||||||
pthread_mutex_destroy(&tgt->mutex);
|
|
||||||
free(tgt);
|
|
||||||
|
|
||||||
if (destroy_cb_fn) {
|
|
||||||
destroy_cb_fn(destroy_cb_arg, 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -2289,7 +2289,8 @@ const struct spdk_mem_map_ops g_nvmf_rdma_map_ops = {
|
|||||||
.are_contiguous = nvmf_rdma_check_contiguous_entries
|
.are_contiguous = nvmf_rdma_check_contiguous_entries
|
||||||
};
|
};
|
||||||
|
|
||||||
static int nvmf_rdma_destroy(struct spdk_nvmf_transport *transport);
|
static int nvmf_rdma_destroy(struct spdk_nvmf_transport *transport,
|
||||||
|
spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg);
|
||||||
|
|
||||||
static struct spdk_nvmf_transport *
|
static struct spdk_nvmf_transport *
|
||||||
nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
||||||
@ -2346,7 +2347,7 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
|||||||
SPDK_COUNTOF(rdma_transport_opts_decoder),
|
SPDK_COUNTOF(rdma_transport_opts_decoder),
|
||||||
&rtransport->rdma_opts)) {
|
&rtransport->rdma_opts)) {
|
||||||
SPDK_ERRLOG("spdk_json_decode_object_relaxed failed\n");
|
SPDK_ERRLOG("spdk_json_decode_object_relaxed failed\n");
|
||||||
nvmf_rdma_destroy(&rtransport->transport);
|
nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2384,7 +2385,7 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
|||||||
SPDK_ERRLOG("The number of shared data buffers (%d) is less than"
|
SPDK_ERRLOG("The number of shared data buffers (%d) is less than"
|
||||||
"the minimum number required to guarantee that forward progress can be made (%d)\n",
|
"the minimum number required to guarantee that forward progress can be made (%d)\n",
|
||||||
opts->num_shared_buffers, (SPDK_NVMF_MAX_SGL_ENTRIES * 2));
|
opts->num_shared_buffers, (SPDK_NVMF_MAX_SGL_ENTRIES * 2));
|
||||||
nvmf_rdma_destroy(&rtransport->transport);
|
nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2394,21 +2395,21 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
|||||||
"per-poll group caches for each thread. (%" PRIu32 ")"
|
"per-poll group caches for each thread. (%" PRIu32 ")"
|
||||||
"supplied. (%" PRIu32 ") required\n", opts->num_shared_buffers, min_shared_buffers);
|
"supplied. (%" PRIu32 ") required\n", opts->num_shared_buffers, min_shared_buffers);
|
||||||
SPDK_ERRLOG("Please specify a larger number of shared buffers\n");
|
SPDK_ERRLOG("Please specify a larger number of shared buffers\n");
|
||||||
nvmf_rdma_destroy(&rtransport->transport);
|
nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
sge_count = opts->max_io_size / opts->io_unit_size;
|
sge_count = opts->max_io_size / opts->io_unit_size;
|
||||||
if (sge_count > NVMF_DEFAULT_TX_SGE) {
|
if (sge_count > NVMF_DEFAULT_TX_SGE) {
|
||||||
SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
|
SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
|
||||||
nvmf_rdma_destroy(&rtransport->transport);
|
nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
rtransport->event_channel = rdma_create_event_channel();
|
rtransport->event_channel = rdma_create_event_channel();
|
||||||
if (rtransport->event_channel == NULL) {
|
if (rtransport->event_channel == NULL) {
|
||||||
SPDK_ERRLOG("rdma_create_event_channel() failed, %s\n", spdk_strerror(errno));
|
SPDK_ERRLOG("rdma_create_event_channel() failed, %s\n", spdk_strerror(errno));
|
||||||
nvmf_rdma_destroy(&rtransport->transport);
|
nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2416,7 +2417,7 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
|||||||
if (fcntl(rtransport->event_channel->fd, F_SETFL, flag | O_NONBLOCK) < 0) {
|
if (fcntl(rtransport->event_channel->fd, F_SETFL, flag | O_NONBLOCK) < 0) {
|
||||||
SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%s)\n",
|
SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%s)\n",
|
||||||
rtransport->event_channel->fd, spdk_strerror(errno));
|
rtransport->event_channel->fd, spdk_strerror(errno));
|
||||||
nvmf_rdma_destroy(&rtransport->transport);
|
nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2427,14 +2428,14 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
|||||||
SPDK_ENV_SOCKET_ID_ANY);
|
SPDK_ENV_SOCKET_ID_ANY);
|
||||||
if (!rtransport->data_wr_pool) {
|
if (!rtransport->data_wr_pool) {
|
||||||
SPDK_ERRLOG("Unable to allocate work request pool for poll group\n");
|
SPDK_ERRLOG("Unable to allocate work request pool for poll group\n");
|
||||||
nvmf_rdma_destroy(&rtransport->transport);
|
nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
contexts = rdma_get_devices(NULL);
|
contexts = rdma_get_devices(NULL);
|
||||||
if (contexts == NULL) {
|
if (contexts == NULL) {
|
||||||
SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno);
|
SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno);
|
||||||
nvmf_rdma_destroy(&rtransport->transport);
|
nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2529,7 +2530,7 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
nvmf_rdma_destroy(&rtransport->transport);
|
nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2541,7 +2542,7 @@ nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
|
|||||||
rtransport->poll_fds = calloc(rtransport->npoll_fds, sizeof(struct pollfd));
|
rtransport->poll_fds = calloc(rtransport->npoll_fds, sizeof(struct pollfd));
|
||||||
if (rtransport->poll_fds == NULL) {
|
if (rtransport->poll_fds == NULL) {
|
||||||
SPDK_ERRLOG("poll_fds allocation failed\n");
|
SPDK_ERRLOG("poll_fds allocation failed\n");
|
||||||
nvmf_rdma_destroy(&rtransport->transport);
|
nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2570,7 +2571,8 @@ nvmf_rdma_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_writ
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
|
nvmf_rdma_destroy(struct spdk_nvmf_transport *transport,
|
||||||
|
spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
|
||||||
{
|
{
|
||||||
struct spdk_nvmf_rdma_transport *rtransport;
|
struct spdk_nvmf_rdma_transport *rtransport;
|
||||||
struct spdk_nvmf_rdma_port *port, *port_tmp;
|
struct spdk_nvmf_rdma_port *port, *port_tmp;
|
||||||
@ -2619,6 +2621,9 @@ nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
|
|||||||
pthread_mutex_destroy(&rtransport->lock);
|
pthread_mutex_destroy(&rtransport->lock);
|
||||||
free(rtransport);
|
free(rtransport);
|
||||||
|
|
||||||
|
if (cb_fn) {
|
||||||
|
cb_fn(cb_arg);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,7 +484,8 @@ nvmf_tcp_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nvmf_tcp_destroy(struct spdk_nvmf_transport *transport)
|
nvmf_tcp_destroy(struct spdk_nvmf_transport *transport,
|
||||||
|
spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
|
||||||
{
|
{
|
||||||
struct spdk_nvmf_tcp_transport *ttransport;
|
struct spdk_nvmf_tcp_transport *ttransport;
|
||||||
|
|
||||||
@ -493,6 +494,10 @@ nvmf_tcp_destroy(struct spdk_nvmf_transport *transport)
|
|||||||
|
|
||||||
pthread_mutex_destroy(&ttransport->lock);
|
pthread_mutex_destroy(&ttransport->lock);
|
||||||
free(ttransport);
|
free(ttransport);
|
||||||
|
|
||||||
|
if (cb_fn) {
|
||||||
|
cb_fn(cb_arg);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -579,7 +584,7 @@ nvmf_tcp_create(struct spdk_nvmf_transport_opts *opts)
|
|||||||
"per-poll group caches for each thread. (%" PRIu32 ")"
|
"per-poll group caches for each thread. (%" PRIu32 ")"
|
||||||
"supplied. (%" PRIu32 ") required\n", opts->num_shared_buffers, min_shared_buffers);
|
"supplied. (%" PRIu32 ") required\n", opts->num_shared_buffers, min_shared_buffers);
|
||||||
SPDK_ERRLOG("Please specify a larger number of shared buffers\n");
|
SPDK_ERRLOG("Please specify a larger number of shared buffers\n");
|
||||||
nvmf_tcp_destroy(&ttransport->transport);
|
nvmf_tcp_destroy(&ttransport->transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ spdk_nvmf_transport_create(const char *transport_name, struct spdk_nvmf_transpor
|
|||||||
transport_name, "data");
|
transport_name, "data");
|
||||||
if (chars_written < 0) {
|
if (chars_written < 0) {
|
||||||
SPDK_ERRLOG("Unable to generate transport data buffer pool name.\n");
|
SPDK_ERRLOG("Unable to generate transport data buffer pool name.\n");
|
||||||
ops->destroy(transport);
|
ops->destroy(transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,7 +153,7 @@ spdk_nvmf_transport_create(const char *transport_name, struct spdk_nvmf_transpor
|
|||||||
|
|
||||||
if (!transport->data_buf_pool) {
|
if (!transport->data_buf_pool) {
|
||||||
SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n");
|
SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n");
|
||||||
ops->destroy(transport);
|
ops->destroy(transport, NULL, NULL);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -173,7 +173,8 @@ spdk_nvmf_transport_get_next(struct spdk_nvmf_transport *transport)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport)
|
spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport,
|
||||||
|
spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
|
||||||
{
|
{
|
||||||
if (transport->data_buf_pool != NULL) {
|
if (transport->data_buf_pool != NULL) {
|
||||||
if (spdk_mempool_count(transport->data_buf_pool) !=
|
if (spdk_mempool_count(transport->data_buf_pool) !=
|
||||||
@ -186,7 +187,7 @@ spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport)
|
|||||||
|
|
||||||
spdk_mempool_free(transport->data_buf_pool);
|
spdk_mempool_free(transport->data_buf_pool);
|
||||||
|
|
||||||
return transport->ops->destroy(transport);
|
return transport->ops->destroy(transport, cb_fn, cb_arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct spdk_nvmf_listener *
|
struct spdk_nvmf_listener *
|
||||||
|
@ -457,7 +457,7 @@ test_nvmf_tcp_destroy(void)
|
|||||||
CU_ASSERT_PTR_NOT_NULL(transport);
|
CU_ASSERT_PTR_NOT_NULL(transport);
|
||||||
transport->opts = opts;
|
transport->opts = opts;
|
||||||
/* destroy transport */
|
/* destroy transport */
|
||||||
CU_ASSERT(nvmf_tcp_destroy(transport) == 0);
|
CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0);
|
||||||
|
|
||||||
spdk_thread_exit(thread);
|
spdk_thread_exit(thread);
|
||||||
while (!spdk_thread_is_exited(thread)) {
|
while (!spdk_thread_is_exited(thread)) {
|
||||||
@ -501,7 +501,7 @@ test_nvmf_tcp_poll_group_create(void)
|
|||||||
}
|
}
|
||||||
group->transport = transport;
|
group->transport = transport;
|
||||||
nvmf_tcp_poll_group_destroy(group);
|
nvmf_tcp_poll_group_destroy(group);
|
||||||
nvmf_tcp_destroy(transport);
|
nvmf_tcp_destroy(transport, NULL, NULL);
|
||||||
|
|
||||||
spdk_thread_exit(thread);
|
spdk_thread_exit(thread);
|
||||||
while (!spdk_thread_is_exited(thread)) {
|
while (!spdk_thread_is_exited(thread)) {
|
||||||
|
Loading…
Reference in New Issue
Block a user