nvme/tcp: increase timeout for async icreq response

This was arbitrarily picked as 2 seconds in commit
0e3dbd. But for extremely high connection count
use cases, such as nvme-perf with several cores
and high connection count per core, this 2 second
time window can get exceeded.

So increase this to 10 seconds, but only for qpairs
that are being connected asynchronously.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I906ca9e6561b778613c80b739a20bd72c807216c
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17619
Reviewed-by: Aleksey Marchuk <alexeymar@nvidia.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com>
This commit is contained in:
Jim Harris 2023-04-17 23:57:54 +00:00 committed by Konrad Sztyber
parent 46cfc0484f
commit 672710c8fc

View File

@ -25,7 +25,12 @@
#include "spdk_internal/trace_defs.h"
#define NVME_TCP_RW_BUFFER_SIZE 131072
#define NVME_TCP_TIME_OUT_IN_SECONDS 2
/* For async connect workloads, allow more time since we are more likely
* to be processing lots ICREQs at once.
*/
#define ICREQ_TIMEOUT_SYNC 2 /* in seconds */
#define ICREQ_TIMEOUT_ASYNC 10 /* in seconds */
#define NVME_TCP_HPDA_DEFAULT 0
#define NVME_TCP_MAX_R2T_DEFAULT 1
@ -1847,6 +1852,7 @@ nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
{
struct spdk_nvme_tcp_ic_req *ic_req;
struct nvme_tcp_pdu *pdu;
uint32_t timeout_in_sec;
pdu = tqpair->send_pdu;
memset(tqpair->send_pdu, 0, sizeof(*tqpair->send_pdu));
@ -1863,7 +1869,8 @@ nvme_tcp_qpair_icreq_send(struct nvme_tcp_qpair *tqpair)
nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_send_icreq_complete, tqpair);
tqpair->icreq_timeout_tsc = spdk_get_ticks() + (NVME_TCP_TIME_OUT_IN_SECONDS * spdk_get_ticks_hz());
timeout_in_sec = tqpair->qpair.async ? ICREQ_TIMEOUT_ASYNC : ICREQ_TIMEOUT_SYNC;
tqpair->icreq_timeout_tsc = spdk_get_ticks() + (timeout_in_sec * spdk_get_ticks_hz());
return 0;
}