nvme: implement epoll in the tcp transport.
Change-Id: I6672361baca4969f23259c19b73ed9dbe2f436bd Signed-off-by: Seth Howell <seth.howell@intel.com> Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/885 Community-CI: Mellanox Build Bot Community-CI: Broadcom CI Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
2d18104489
commit
5d0718528d
@ -1995,7 +1995,7 @@ struct spdk_nvme_poll_group *spdk_nvme_poll_group_create(void *ctx);
|
||||
* \param qpair The qpair to add to the poll group.
|
||||
*
|
||||
* return 0 on success, -EINVAL if the qpair is not in the disabled state, -ENODEV if the transport
|
||||
* doesn't exist, or -ENOMEM on memory allocation failures.
|
||||
* doesn't exist, -ENOMEM on memory allocation failures, or -EPROTO on a protocol (transport) specific failure.
|
||||
*/
|
||||
int spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair);
|
||||
|
||||
@ -2005,7 +2005,7 @@ int spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, struct spdk_nvm
|
||||
* \param group The group from which to remove the qpair.
|
||||
* \param qpair The qpair to remove from the poll group.
|
||||
*
|
||||
* return 0 on success, -ENOENT if the qpair is not found in the group.
|
||||
* return 0 on success, -ENOENT if the qpair is not found in the group, or -EPROTO on a protocol (transport) specific failure.
|
||||
*/
|
||||
int spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, struct spdk_nvme_qpair *qpair);
|
||||
|
||||
|
@ -66,6 +66,9 @@ struct nvme_tcp_ctrlr {
|
||||
|
||||
struct nvme_tcp_poll_group {
|
||||
struct spdk_nvme_transport_poll_group group;
|
||||
struct spdk_sock_group *sock_group;
|
||||
uint32_t completions_per_qpair;
|
||||
int64_t num_completions;
|
||||
};
|
||||
|
||||
/* NVMe TCP qpair extensions for spdk_nvme_qpair */
|
||||
@ -129,6 +132,12 @@ nvme_tcp_qpair(struct spdk_nvme_qpair *qpair)
|
||||
return SPDK_CONTAINEROF(qpair, struct nvme_tcp_qpair, qpair);
|
||||
}
|
||||
|
||||
static inline struct nvme_tcp_poll_group *
|
||||
nvme_tcp_poll_group(struct spdk_nvme_transport_poll_group *group)
|
||||
{
|
||||
return SPDK_CONTAINEROF(group, struct nvme_tcp_poll_group, group);
|
||||
}
|
||||
|
||||
static inline struct nvme_tcp_ctrlr *
|
||||
nvme_tcp_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
|
||||
{
|
||||
@ -1443,6 +1452,22 @@ fail:
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static void
|
||||
nvme_tcp_qpair_sock_cb(void *ctx, struct spdk_sock_group *group, struct spdk_sock *sock)
|
||||
{
|
||||
struct spdk_nvme_qpair *qpair = ctx;
|
||||
struct nvme_tcp_poll_group *pgroup = nvme_tcp_poll_group(qpair->poll_group);
|
||||
int32_t num_completions;
|
||||
|
||||
num_completions = spdk_nvme_qpair_process_completions(qpair, pgroup->completions_per_qpair);
|
||||
|
||||
if (pgroup->num_completions >= 0 && num_completions >= 0) {
|
||||
pgroup->num_completions += num_completions;
|
||||
} else {
|
||||
pgroup->num_completions = -ENXIO;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
|
||||
{
|
||||
@ -1720,18 +1745,39 @@ nvme_tcp_poll_group_create(void)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
group->sock_group = spdk_sock_group_create(group);
|
||||
if (group->sock_group == NULL) {
|
||||
free(group);
|
||||
SPDK_ERRLOG("Unable to allocate sock group.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &group->group;
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_tcp_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(qpair->poll_group);
|
||||
struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
|
||||
|
||||
if (spdk_sock_group_add_sock(group->sock_group, tqpair->sock, nvme_tcp_qpair_sock_cb, qpair)) {
|
||||
return -EPROTO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_tcp_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(qpair->poll_group);
|
||||
struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
|
||||
|
||||
if (tqpair->sock && group->sock_group) {
|
||||
if (spdk_sock_group_remove_sock(group->sock_group, tqpair->sock)) {
|
||||
return -EPROTO;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1739,6 +1785,16 @@ static int
|
||||
nvme_tcp_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
|
||||
struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
|
||||
struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(tgroup);
|
||||
|
||||
/* disconnected qpairs won't have a sock to add. */
|
||||
if (nvme_qpair_get_state(qpair) >= NVME_QPAIR_CONNECTED) {
|
||||
if (spdk_sock_group_add_sock(group->sock_group, tqpair->sock, nvme_tcp_qpair_sock_cb, qpair)) {
|
||||
return -EPROTO;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1746,6 +1802,10 @@ static int
|
||||
nvme_tcp_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
|
||||
struct spdk_nvme_qpair *qpair)
|
||||
{
|
||||
if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
|
||||
return nvme_poll_group_disconnect_qpair(qpair);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1753,33 +1813,37 @@ static int64_t
|
||||
nvme_tcp_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
|
||||
uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
|
||||
{
|
||||
struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(tgroup);
|
||||
struct spdk_nvme_qpair *qpair, *tmp_qpair;
|
||||
int32_t local_completions = 0;
|
||||
int64_t total_completions = 0;
|
||||
|
||||
group->completions_per_qpair = completions_per_qpair;
|
||||
group->num_completions = 0;
|
||||
|
||||
spdk_sock_group_poll(group->sock_group);
|
||||
|
||||
STAILQ_FOREACH_SAFE(qpair, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_qpair) {
|
||||
disconnected_qpair_cb(qpair, tgroup->group->ctx);
|
||||
}
|
||||
|
||||
STAILQ_FOREACH_SAFE(qpair, &tgroup->connected_qpairs, poll_group_stailq, tmp_qpair) {
|
||||
local_completions = spdk_nvme_qpair_process_completions(qpair, completions_per_qpair);
|
||||
if (local_completions < 0) {
|
||||
disconnected_qpair_cb(qpair, tgroup->group->ctx);
|
||||
local_completions = 0;
|
||||
}
|
||||
total_completions += local_completions;
|
||||
}
|
||||
|
||||
return total_completions;
|
||||
return group->num_completions;
|
||||
}
|
||||
|
||||
static int
|
||||
nvme_tcp_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
|
||||
{
|
||||
int rc;
|
||||
struct nvme_tcp_poll_group *group = nvme_tcp_poll_group(tgroup);
|
||||
|
||||
if (!STAILQ_EMPTY(&tgroup->connected_qpairs) || !STAILQ_EMPTY(&tgroup->disconnected_qpairs)) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
rc = spdk_sock_group_close(&group->sock_group);
|
||||
if (rc != 0) {
|
||||
SPDK_ERRLOG("Failed to close the sock group for a tcp poll group.\n");
|
||||
assert(false);
|
||||
}
|
||||
|
||||
free(tgroup);
|
||||
|
||||
return 0;
|
||||
|
@ -114,3 +114,4 @@ DEFINE_STUB(nvme_fabric_qpair_connect, int, (struct spdk_nvme_qpair *qpair, uint
|
||||
0);
|
||||
DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair, (struct spdk_nvme_ctrlr *ctrlr,
|
||||
struct spdk_nvme_qpair *qpair));
|
||||
DEFINE_STUB(nvme_poll_group_disconnect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
|
||||
|
@ -48,6 +48,9 @@ DEFINE_STUB(nvme_qpair_submit_request,
|
||||
DEFINE_STUB(spdk_sock_set_priority,
|
||||
int, (struct spdk_sock *sock, int priority), 0);
|
||||
|
||||
DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
|
||||
struct spdk_nvme_qpair *qpair), 0);
|
||||
|
||||
static void
|
||||
test_nvme_tcp_pdu_set_data_buf(void)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user