nbd: apply interrupt

If interrupt mode is set, related poller functions will be
registered to interrupt_handler instead of poller.
interrupt_tgt can run IO with linux nbd.

Change-Id: I39ecf1efa10be76419fb0d25713ea457a5a53b37
Signed-off-by: Liu Xiaodong <xiaodong.liu@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4274
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
Liu Xiaodong 2020-09-16 16:33:19 -04:00 committed by Tomasz Zawadzki
parent ad8c05dc7c
commit 54a7d73ad1
2 changed files with 35 additions and 5 deletions

View File

@ -47,6 +47,8 @@ SPDK_LIB_LIST += bdev notify accel vmd sock
SPDK_LIB_LIST += $(EVENT_BDEV_SUBSYSTEM) # event_bdev depends on some other event modules, but they dont support edriven yet
# Aio bdev library
SPDK_LIB_LIST += bdev_aio
# NBD libraries
SPDK_LIB_LIST += nbd event_nbd
ifeq ($(SPDK_ROOT_DIR)/lib/env_dpdk,$(CONFIG_ENV))
SPDK_LIB_LIST += env_dpdk_rpc

View File

@ -101,6 +101,7 @@ struct spdk_nbd_disk {
int kernel_sp_fd;
int spdk_sp_fd;
struct spdk_poller *nbd_poller;
struct spdk_interrupt *intr;
uint32_t buf_align;
struct nbd_io *io_in_recv;
@ -353,6 +354,14 @@ _nbd_stop(struct spdk_nbd_disk *nbd)
spdk_bdev_close(nbd->bdev_desc);
}
if (nbd->nbd_poller) {
spdk_poller_unregister(&nbd->nbd_poller);
}
if (nbd->intr) {
spdk_interrupt_unregister(&nbd->intr);
}
if (nbd->spdk_sp_fd >= 0) {
close(nbd->spdk_sp_fd);
}
@ -374,10 +383,6 @@ _nbd_stop(struct spdk_nbd_disk *nbd)
free(nbd->nbd_path);
}
if (nbd->nbd_poller) {
spdk_poller_unregister(&nbd->nbd_poller);
}
nbd_disk_unregister(nbd);
free(nbd);
@ -449,6 +454,14 @@ nbd_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
}
memcpy(&io->resp.handle, &io->req.handle, sizeof(io->resp.handle));
/* When there begins to have executed_io, enable socket writable notice in order to
* get it processed in nbd_io_xmit
*/
if (nbd->intr && TAILQ_EMPTY(&nbd->executed_io_list)) {
spdk_interrupt_set_event_types(nbd->intr, SPDK_INTERRUPT_EVENT_IN | SPDK_INTERRUPT_EVENT_OUT);
}
TAILQ_INSERT_TAIL(&nbd->executed_io_list, io, tailq);
if (bdev_io != NULL) {
@ -523,6 +536,12 @@ nbd_submit_bdev_io(struct spdk_nbd_disk *nbd, struct nbd_io *io)
case NBD_CMD_DISC:
nbd_put_io(nbd, io);
nbd->state = NBD_DISK_STATE_SOFTDISC;
/* when there begins to have executed_io to send, enable socket writable notice */
if (nbd->intr && TAILQ_EMPTY(&nbd->executed_io_list)) {
spdk_interrupt_set_event_types(nbd->intr, SPDK_INTERRUPT_EVENT_IN | SPDK_INTERRUPT_EVENT_OUT);
}
break;
default:
rc = -1;
@ -780,6 +799,11 @@ nbd_io_xmit(struct spdk_nbd_disk *nbd)
ret += rc;
}
/* When there begins to have no executed_io, disable socket writable notice */
if (nbd->intr) {
spdk_interrupt_set_event_types(nbd->intr, SPDK_INTERRUPT_EVENT_IN);
}
/*
* For soft disconnection, nbd server can close connection after all
* outstanding request are transmitted.
@ -937,7 +961,11 @@ nbd_start_complete(struct spdk_nbd_start_ctx *ctx)
goto err;
}
ctx->nbd->nbd_poller = SPDK_POLLER_REGISTER(nbd_poll, ctx->nbd, 0);
if (spdk_interrupt_mode_is_enabled()) {
ctx->nbd->intr = SPDK_INTERRUPT_REGISTER(ctx->nbd->spdk_sp_fd, nbd_poll, ctx->nbd);
} else {
ctx->nbd->nbd_poller = SPDK_POLLER_REGISTER(nbd_poll, ctx->nbd, 0);
}
if (ctx->cb_fn) {
ctx->cb_fn(ctx->cb_arg, ctx->nbd, 0);