2022-06-03 19:15:11 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2022-11-01 20:26:26 +00:00
|
|
|
* Copyright (C) 2019 Intel Corporation.
|
2019-10-14 23:58:51 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "spdk/stdinc.h"
|
|
|
|
#include "spdk/util.h"
|
|
|
|
|
|
|
|
#include "spdk_internal/mock.h"
|
|
|
|
|
|
|
|
#include "spdk_cunit.h"
|
|
|
|
|
2021-04-01 20:07:17 +00:00
|
|
|
#include "common/lib/test_env.c"
|
2019-10-14 23:58:51 +00:00
|
|
|
#include "sock/uring/uring.c"
|
|
|
|
|
2021-04-01 20:07:17 +00:00
|
|
|
DEFINE_STUB(spdk_sock_map_insert, int, (struct spdk_sock_map *map, int placement_id,
|
2021-04-01 20:22:28 +00:00
|
|
|
struct spdk_sock_group_impl *group), 0);
|
2021-04-01 20:07:17 +00:00
|
|
|
DEFINE_STUB_V(spdk_sock_map_release, (struct spdk_sock_map *map, int placement_id));
|
|
|
|
DEFINE_STUB(spdk_sock_map_lookup, int, (struct spdk_sock_map *map, int placement_id,
|
2021-11-17 13:19:58 +00:00
|
|
|
struct spdk_sock_group_impl **group, struct spdk_sock_group_impl *hint), 0);
|
2021-04-01 22:03:08 +00:00
|
|
|
DEFINE_STUB(spdk_sock_map_find_free, int, (struct spdk_sock_map *map), -1);
|
2021-04-01 20:07:17 +00:00
|
|
|
DEFINE_STUB_V(spdk_sock_map_cleanup, (struct spdk_sock_map *map));
|
|
|
|
|
2019-10-14 23:58:51 +00:00
|
|
|
DEFINE_STUB_V(spdk_net_impl_register, (struct spdk_net_impl *impl, int priority));
|
|
|
|
DEFINE_STUB(spdk_sock_close, int, (struct spdk_sock **s), 0);
|
|
|
|
DEFINE_STUB(__io_uring_get_cqe, int, (struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
|
|
|
|
unsigned submit,
|
|
|
|
unsigned wait_nr, sigset_t *sigmask), 0);
|
|
|
|
DEFINE_STUB(io_uring_submit, int, (struct io_uring *ring), 0);
|
|
|
|
DEFINE_STUB(io_uring_queue_init, int, (unsigned entries, struct io_uring *ring, unsigned flags), 0);
|
|
|
|
DEFINE_STUB_V(io_uring_queue_exit, (struct io_uring *ring));
|
|
|
|
|
|
|
|
static void
|
|
|
|
_req_cb(void *cb_arg, int len)
|
|
|
|
{
|
|
|
|
*(bool *)cb_arg = true;
|
|
|
|
CU_ASSERT(len == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
flush_client(void)
|
|
|
|
{
|
|
|
|
struct spdk_uring_sock_group_impl group = {};
|
|
|
|
struct spdk_uring_sock usock = {};
|
|
|
|
struct spdk_sock *sock = &usock.base;
|
|
|
|
struct spdk_sock_request *req1, *req2;
|
|
|
|
bool cb_arg1, cb_arg2;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* Set up data structures */
|
|
|
|
TAILQ_INIT(&sock->queued_reqs);
|
|
|
|
TAILQ_INIT(&sock->pending_reqs);
|
|
|
|
sock->group_impl = &group.base;
|
|
|
|
|
|
|
|
req1 = calloc(1, sizeof(struct spdk_sock_request) + 3 * sizeof(struct iovec));
|
|
|
|
SPDK_CU_ASSERT_FATAL(req1 != NULL);
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_base = (void *)100;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_len = 64;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_base = (void *)200;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_len = 64;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req1, 2)->iov_base = (void *)300;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req1, 2)->iov_len = 64;
|
|
|
|
req1->iovcnt = 3;
|
|
|
|
req1->cb_fn = _req_cb;
|
|
|
|
req1->cb_arg = &cb_arg1;
|
|
|
|
|
|
|
|
req2 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
|
|
|
|
SPDK_CU_ASSERT_FATAL(req2 != NULL);
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_base = (void *)100;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_len = 32;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_base = (void *)200;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_len = 32;
|
|
|
|
req2->iovcnt = 2;
|
|
|
|
req2->cb_fn = _req_cb;
|
|
|
|
req2->cb_arg = &cb_arg2;
|
|
|
|
|
|
|
|
/* Simple test - a request with a 3 element iovec
|
|
|
|
* that gets submitted in a single sendmsg. */
|
|
|
|
spdk_sock_request_queue(sock, req1);
|
|
|
|
MOCK_SET(sendmsg, 192);
|
|
|
|
cb_arg1 = false;
|
|
|
|
rc = _sock_flush_client(sock);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(cb_arg1 == true);
|
|
|
|
CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
|
|
|
|
|
|
|
|
/* Two requests, where both can fully send. */
|
|
|
|
spdk_sock_request_queue(sock, req1);
|
|
|
|
spdk_sock_request_queue(sock, req2);
|
|
|
|
MOCK_SET(sendmsg, 256);
|
|
|
|
cb_arg1 = false;
|
|
|
|
cb_arg2 = false;
|
|
|
|
rc = _sock_flush_client(sock);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(cb_arg1 == true);
|
|
|
|
CU_ASSERT(cb_arg2 == true);
|
|
|
|
CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
|
|
|
|
|
|
|
|
/* Two requests. Only first one can send */
|
|
|
|
spdk_sock_request_queue(sock, req1);
|
|
|
|
spdk_sock_request_queue(sock, req2);
|
|
|
|
MOCK_SET(sendmsg, 192);
|
|
|
|
cb_arg1 = false;
|
|
|
|
cb_arg2 = false;
|
|
|
|
rc = _sock_flush_client(sock);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(cb_arg1 == true);
|
|
|
|
CU_ASSERT(cb_arg2 == false);
|
|
|
|
CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req2);
|
|
|
|
TAILQ_REMOVE(&sock->queued_reqs, req2, internal.link);
|
|
|
|
CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
|
|
|
|
|
|
|
|
/* One request. Partial send. */
|
|
|
|
spdk_sock_request_queue(sock, req1);
|
|
|
|
MOCK_SET(sendmsg, 10);
|
|
|
|
cb_arg1 = false;
|
|
|
|
rc = _sock_flush_client(sock);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(cb_arg1 == false);
|
|
|
|
CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
|
|
|
|
|
|
|
|
/* Do a second flush that partial sends again. */
|
|
|
|
MOCK_SET(sendmsg, 52);
|
|
|
|
cb_arg1 = false;
|
|
|
|
rc = _sock_flush_client(sock);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(cb_arg1 == false);
|
|
|
|
CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
|
|
|
|
|
|
|
|
/* Flush the rest of the data */
|
|
|
|
MOCK_SET(sendmsg, 130);
|
|
|
|
cb_arg1 = false;
|
|
|
|
rc = _sock_flush_client(sock);
|
|
|
|
CU_ASSERT(rc == 0);
|
|
|
|
CU_ASSERT(cb_arg1 == true);
|
|
|
|
CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
|
|
|
|
|
|
|
|
free(req1);
|
|
|
|
free(req2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
flush_server(void)
|
|
|
|
{
|
|
|
|
struct spdk_uring_sock_group_impl group = {};
|
|
|
|
struct spdk_uring_sock usock = {};
|
|
|
|
struct spdk_sock *sock = &usock.base;
|
|
|
|
struct spdk_sock_request *req1, *req2;
|
|
|
|
bool cb_arg1, cb_arg2;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* Set up data structures */
|
|
|
|
TAILQ_INIT(&sock->queued_reqs);
|
|
|
|
TAILQ_INIT(&sock->pending_reqs);
|
|
|
|
sock->group_impl = &group.base;
|
|
|
|
usock.write_task.sock = &usock;
|
|
|
|
usock.group = &group;
|
|
|
|
|
|
|
|
req1 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
|
|
|
|
SPDK_CU_ASSERT_FATAL(req1 != NULL);
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_base = (void *)100;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req1, 0)->iov_len = 64;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_base = (void *)200;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req1, 1)->iov_len = 64;
|
|
|
|
req1->iovcnt = 2;
|
|
|
|
req1->cb_fn = _req_cb;
|
|
|
|
req1->cb_arg = &cb_arg1;
|
|
|
|
|
|
|
|
req2 = calloc(1, sizeof(struct spdk_sock_request) + 2 * sizeof(struct iovec));
|
|
|
|
SPDK_CU_ASSERT_FATAL(req2 != NULL);
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_base = (void *)100;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req2, 0)->iov_len = 32;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_base = (void *)200;
|
|
|
|
SPDK_SOCK_REQUEST_IOV(req2, 1)->iov_len = 32;
|
|
|
|
req2->iovcnt = 2;
|
|
|
|
req2->cb_fn = _req_cb;
|
|
|
|
req2->cb_arg = &cb_arg2;
|
|
|
|
|
|
|
|
/* we should not call _sock_flush directly, since it will finally
|
2021-11-25 01:40:59 +00:00
|
|
|
* call liburing related functions */
|
2019-10-14 23:58:51 +00:00
|
|
|
|
|
|
|
/* Simple test - a request with a 2 element iovec
|
|
|
|
* that is fully completed. */
|
|
|
|
spdk_sock_request_queue(sock, req1);
|
|
|
|
cb_arg1 = false;
|
sock: introduce dynamic zerocopy according to data size
MSG_ZEROCOPY is not always effective as mentioned in
https://www.kernel.org/doc/html/v4.15/networking/msg_zerocopy.html.
Currently in spdk, once we enable sendmsg zerocopy, then all data
transferred through _sock_flush are sent with zerocopy, and vice
versa. Here dynamic zerocopy is introduced to allow data sent with
MSG_ZEROCOPY or not according to its size, which can be enabled by
setting "enable_dynamic_zerocopy" as true.
Test with 16 P4610 NVMe SSD, 2 initiators, target's and initiators'
configurations are the same as spdk report:
https://ci.spdk.io/download/performance-reports/SPDK_tcp_perf_report_2104.pdf
For posix socket, rw_percent=0(randwrite), it has 1.9%~8.3% performance boost
tested with target 1~40 cpu cores and qdepth=128,256,512. And it has no obvious
influence when read percentage is greater than 50%.
For uring socket, rw_percent=0(randwrite), it has 1.8%~7.9% performance boost
tested with target 1~40 cpu cores and qdepth=128,256,512. And it still has
1%~7% improvement when read percentage is greater than 50%.
The following is part of the detailed data.
posix:
qdepth=128
rw_percent 0 | 30
cpu origin thisPatch opt | origin thisPatch opt
1 286.5 298.5 4.19% 307 304.15 -0.93%
4 1042.5 1107 6.19% 1135.5 1136 0.04%
8 1952.5 2058 5.40% 2170.5 2170.5 0.00%
12 2658.5 2879 8.29% 3042 3046 0.13%
16 3247.5 3460.5 6.56% 3793.5 3775 -0.49%
24 4232.5 4459.5 5.36% 4614.5 4756.5 3.08%
32 4810 5095 5.93% 4488 4845 7.95%
40 5306.5 5435 2.42% 4427.5 4902 10.72%
qdepth=512
rw_percent 0 | 30
cpu origin thisPatch opt | origin thisPatch opt
1 275 287 4.36% 294.4 295.45 0.36%
4 979 1041 6.33% 1073 1083.5 0.98%
8 1822.5 1914.5 5.05% 2030.5 2018.5 -0.59%
12 2441 2598.5 6.45% 2808.5 2779.5 -1.03%
16 2920.5 3109.5 6.47% 3455 3411.5 -1.26%
24 3709 3972.5 7.10% 4483.5 4502.5 0.42%
32 4225.5 4532.5 7.27% 4463.5 4733 6.04%
40 4790.5 4884.5 1.96% 4427 4904.5 10.79%
uring:
qdepth=128
rw_percent 0 | 30
cpu origin thisPatch opt | origin thisPatch opt
1 270.5 287.5 6.28% 295.75 304.75 3.04%
4 1018.5 1089.5 6.97% 1119.5 1156.5 3.31%
8 1907 2055 7.76% 2127 2211.5 3.97%
12 2614 2801 7.15% 2982.5 3061.5 2.65%
16 3169.5 3420 7.90% 3654.5 3781.5 3.48%
24 4109.5 4414 7.41% 4691.5 4750.5 1.26%
32 4752.5 4908 3.27% 4494 4825.5 7.38%
40 5233.5 5327 1.79% 4374.5 4891 11.81%
qdepth=512
rw_percent 0 | 30
cpu origin thisPatch opt | origin thisPatch opt
1 259.95 276 6.17% 286.65 294.8 2.84%
4 955 1021 6.91% 1070.5 1100 2.76%
8 1772 1903.5 7.42% 1992.5 2077.5 4.27%
12 2380.5 2543.5 6.85% 2752.5 2860 3.91%
16 2920.5 3099 6.11% 3391.5 3540 4.38%
24 3697 3912 5.82% 4401 4637 5.36%
32 4256.5 4454.5 4.65% 4516 4777 5.78%
40 4707 4968.5 5.56% 4400.5 4933 12.10%
Signed-off-by: Richael Zhuang <richael.zhuang@arm.com>
Change-Id: I730dcf89ed2bf3efe91586421a89045fc11c81f0
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12210
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2021-12-07 09:37:02 +00:00
|
|
|
rc = spdk_sock_prep_reqs(sock, usock.write_task.iovs, 0, NULL, NULL);
|
2019-10-14 23:58:51 +00:00
|
|
|
CU_ASSERT(rc == 2);
|
2022-03-23 10:31:56 +00:00
|
|
|
sock_complete_write_reqs(sock, 128, 0);
|
2019-10-14 23:58:51 +00:00
|
|
|
CU_ASSERT(cb_arg1 == true);
|
|
|
|
CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
|
|
|
|
|
|
|
|
/* Two requests, where both can be fully completed. */
|
|
|
|
spdk_sock_request_queue(sock, req1);
|
|
|
|
spdk_sock_request_queue(sock, req2);
|
|
|
|
cb_arg1 = false;
|
|
|
|
cb_arg2 = false;
|
sock: introduce dynamic zerocopy according to data size
MSG_ZEROCOPY is not always effective as mentioned in
https://www.kernel.org/doc/html/v4.15/networking/msg_zerocopy.html.
Currently in spdk, once we enable sendmsg zerocopy, then all data
transferred through _sock_flush are sent with zerocopy, and vice
versa. Here dynamic zerocopy is introduced to allow data sent with
MSG_ZEROCOPY or not according to its size, which can be enabled by
setting "enable_dynamic_zerocopy" as true.
Test with 16 P4610 NVMe SSD, 2 initiators, target's and initiators'
configurations are the same as spdk report:
https://ci.spdk.io/download/performance-reports/SPDK_tcp_perf_report_2104.pdf
For posix socket, rw_percent=0(randwrite), it has 1.9%~8.3% performance boost
tested with target 1~40 cpu cores and qdepth=128,256,512. And it has no obvious
influence when read percentage is greater than 50%.
For uring socket, rw_percent=0(randwrite), it has 1.8%~7.9% performance boost
tested with target 1~40 cpu cores and qdepth=128,256,512. And it still has
1%~7% improvement when read percentage is greater than 50%.
The following is part of the detailed data.
posix:
qdepth=128
rw_percent 0 | 30
cpu origin thisPatch opt | origin thisPatch opt
1 286.5 298.5 4.19% 307 304.15 -0.93%
4 1042.5 1107 6.19% 1135.5 1136 0.04%
8 1952.5 2058 5.40% 2170.5 2170.5 0.00%
12 2658.5 2879 8.29% 3042 3046 0.13%
16 3247.5 3460.5 6.56% 3793.5 3775 -0.49%
24 4232.5 4459.5 5.36% 4614.5 4756.5 3.08%
32 4810 5095 5.93% 4488 4845 7.95%
40 5306.5 5435 2.42% 4427.5 4902 10.72%
qdepth=512
rw_percent 0 | 30
cpu origin thisPatch opt | origin thisPatch opt
1 275 287 4.36% 294.4 295.45 0.36%
4 979 1041 6.33% 1073 1083.5 0.98%
8 1822.5 1914.5 5.05% 2030.5 2018.5 -0.59%
12 2441 2598.5 6.45% 2808.5 2779.5 -1.03%
16 2920.5 3109.5 6.47% 3455 3411.5 -1.26%
24 3709 3972.5 7.10% 4483.5 4502.5 0.42%
32 4225.5 4532.5 7.27% 4463.5 4733 6.04%
40 4790.5 4884.5 1.96% 4427 4904.5 10.79%
uring:
qdepth=128
rw_percent 0 | 30
cpu origin thisPatch opt | origin thisPatch opt
1 270.5 287.5 6.28% 295.75 304.75 3.04%
4 1018.5 1089.5 6.97% 1119.5 1156.5 3.31%
8 1907 2055 7.76% 2127 2211.5 3.97%
12 2614 2801 7.15% 2982.5 3061.5 2.65%
16 3169.5 3420 7.90% 3654.5 3781.5 3.48%
24 4109.5 4414 7.41% 4691.5 4750.5 1.26%
32 4752.5 4908 3.27% 4494 4825.5 7.38%
40 5233.5 5327 1.79% 4374.5 4891 11.81%
qdepth=512
rw_percent 0 | 30
cpu origin thisPatch opt | origin thisPatch opt
1 259.95 276 6.17% 286.65 294.8 2.84%
4 955 1021 6.91% 1070.5 1100 2.76%
8 1772 1903.5 7.42% 1992.5 2077.5 4.27%
12 2380.5 2543.5 6.85% 2752.5 2860 3.91%
16 2920.5 3099 6.11% 3391.5 3540 4.38%
24 3697 3912 5.82% 4401 4637 5.36%
32 4256.5 4454.5 4.65% 4516 4777 5.78%
40 4707 4968.5 5.56% 4400.5 4933 12.10%
Signed-off-by: Richael Zhuang <richael.zhuang@arm.com>
Change-Id: I730dcf89ed2bf3efe91586421a89045fc11c81f0
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12210
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2021-12-07 09:37:02 +00:00
|
|
|
rc = spdk_sock_prep_reqs(sock, usock.write_task.iovs, 0, NULL, NULL);
|
2019-10-14 23:58:51 +00:00
|
|
|
CU_ASSERT(rc == 4);
|
2022-03-23 10:31:56 +00:00
|
|
|
sock_complete_write_reqs(sock, 192, 0);
|
2019-10-14 23:58:51 +00:00
|
|
|
CU_ASSERT(cb_arg1 == true);
|
|
|
|
CU_ASSERT(cb_arg2 == true);
|
|
|
|
CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
|
|
|
|
|
|
|
|
|
|
|
|
/* One request that is partially sent. */
|
|
|
|
spdk_sock_request_queue(sock, req1);
|
|
|
|
cb_arg1 = false;
|
sock: introduce dynamic zerocopy according to data size
MSG_ZEROCOPY is not always effective as mentioned in
https://www.kernel.org/doc/html/v4.15/networking/msg_zerocopy.html.
Currently in spdk, once we enable sendmsg zerocopy, then all data
transferred through _sock_flush are sent with zerocopy, and vice
versa. Here dynamic zerocopy is introduced to allow data sent with
MSG_ZEROCOPY or not according to its size, which can be enabled by
setting "enable_dynamic_zerocopy" as true.
Test with 16 P4610 NVMe SSD, 2 initiators, target's and initiators'
configurations are the same as spdk report:
https://ci.spdk.io/download/performance-reports/SPDK_tcp_perf_report_2104.pdf
For posix socket, rw_percent=0(randwrite), it has 1.9%~8.3% performance boost
tested with target 1~40 cpu cores and qdepth=128,256,512. And it has no obvious
influence when read percentage is greater than 50%.
For uring socket, rw_percent=0(randwrite), it has 1.8%~7.9% performance boost
tested with target 1~40 cpu cores and qdepth=128,256,512. And it still has
1%~7% improvement when read percentage is greater than 50%.
The following is part of the detailed data.
posix:
qdepth=128
rw_percent 0 | 30
cpu origin thisPatch opt | origin thisPatch opt
1 286.5 298.5 4.19% 307 304.15 -0.93%
4 1042.5 1107 6.19% 1135.5 1136 0.04%
8 1952.5 2058 5.40% 2170.5 2170.5 0.00%
12 2658.5 2879 8.29% 3042 3046 0.13%
16 3247.5 3460.5 6.56% 3793.5 3775 -0.49%
24 4232.5 4459.5 5.36% 4614.5 4756.5 3.08%
32 4810 5095 5.93% 4488 4845 7.95%
40 5306.5 5435 2.42% 4427.5 4902 10.72%
qdepth=512
rw_percent 0 | 30
cpu origin thisPatch opt | origin thisPatch opt
1 275 287 4.36% 294.4 295.45 0.36%
4 979 1041 6.33% 1073 1083.5 0.98%
8 1822.5 1914.5 5.05% 2030.5 2018.5 -0.59%
12 2441 2598.5 6.45% 2808.5 2779.5 -1.03%
16 2920.5 3109.5 6.47% 3455 3411.5 -1.26%
24 3709 3972.5 7.10% 4483.5 4502.5 0.42%
32 4225.5 4532.5 7.27% 4463.5 4733 6.04%
40 4790.5 4884.5 1.96% 4427 4904.5 10.79%
uring:
qdepth=128
rw_percent 0 | 30
cpu origin thisPatch opt | origin thisPatch opt
1 270.5 287.5 6.28% 295.75 304.75 3.04%
4 1018.5 1089.5 6.97% 1119.5 1156.5 3.31%
8 1907 2055 7.76% 2127 2211.5 3.97%
12 2614 2801 7.15% 2982.5 3061.5 2.65%
16 3169.5 3420 7.90% 3654.5 3781.5 3.48%
24 4109.5 4414 7.41% 4691.5 4750.5 1.26%
32 4752.5 4908 3.27% 4494 4825.5 7.38%
40 5233.5 5327 1.79% 4374.5 4891 11.81%
qdepth=512
rw_percent 0 | 30
cpu origin thisPatch opt | origin thisPatch opt
1 259.95 276 6.17% 286.65 294.8 2.84%
4 955 1021 6.91% 1070.5 1100 2.76%
8 1772 1903.5 7.42% 1992.5 2077.5 4.27%
12 2380.5 2543.5 6.85% 2752.5 2860 3.91%
16 2920.5 3099 6.11% 3391.5 3540 4.38%
24 3697 3912 5.82% 4401 4637 5.36%
32 4256.5 4454.5 4.65% 4516 4777 5.78%
40 4707 4968.5 5.56% 4400.5 4933 12.10%
Signed-off-by: Richael Zhuang <richael.zhuang@arm.com>
Change-Id: I730dcf89ed2bf3efe91586421a89045fc11c81f0
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12210
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
2021-12-07 09:37:02 +00:00
|
|
|
rc = spdk_sock_prep_reqs(sock, usock.write_task.iovs, 0, NULL, NULL);
|
2019-10-14 23:58:51 +00:00
|
|
|
CU_ASSERT(rc == 2);
|
2022-03-23 10:31:56 +00:00
|
|
|
sock_complete_write_reqs(sock, 92, 0);
|
2019-10-14 23:58:51 +00:00
|
|
|
CU_ASSERT(rc == 2);
|
|
|
|
CU_ASSERT(cb_arg1 == false);
|
|
|
|
CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
|
|
|
|
|
|
|
|
/* Get the second time partial sent result. */
|
2022-03-23 10:31:56 +00:00
|
|
|
sock_complete_write_reqs(sock, 10, 0);
|
2019-10-14 23:58:51 +00:00
|
|
|
CU_ASSERT(cb_arg1 == false);
|
|
|
|
CU_ASSERT(TAILQ_FIRST(&sock->queued_reqs) == req1);
|
|
|
|
|
|
|
|
/* Data is finally sent. */
|
2022-03-23 10:31:56 +00:00
|
|
|
sock_complete_write_reqs(sock, 26, 0);
|
2019-10-14 23:58:51 +00:00
|
|
|
CU_ASSERT(cb_arg1 == true);
|
|
|
|
CU_ASSERT(TAILQ_EMPTY(&sock->queued_reqs));
|
|
|
|
|
|
|
|
free(req1);
|
|
|
|
free(req2);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
main(int argc, char **argv)
|
|
|
|
{
|
|
|
|
CU_pSuite suite = NULL;
|
|
|
|
unsigned int num_failures;
|
|
|
|
|
2020-03-11 17:59:24 +00:00
|
|
|
CU_set_error_action(CUEA_ABORT);
|
|
|
|
CU_initialize_registry();
|
2019-10-14 23:58:51 +00:00
|
|
|
|
|
|
|
suite = CU_add_suite("uring", NULL, NULL);
|
2020-03-11 17:59:24 +00:00
|
|
|
|
|
|
|
|
2020-03-11 19:15:39 +00:00
|
|
|
CU_ADD_TEST(suite, flush_client);
|
|
|
|
CU_ADD_TEST(suite, flush_server);
|
2019-10-14 23:58:51 +00:00
|
|
|
|
|
|
|
CU_basic_set_mode(CU_BRM_VERBOSE);
|
|
|
|
|
|
|
|
CU_basic_run_tests();
|
|
|
|
|
|
|
|
num_failures = CU_get_number_of_failures();
|
|
|
|
CU_cleanup_registry();
|
|
|
|
|
|
|
|
return num_failures;
|
|
|
|
}
|