nvmf/tcp: Add a maximal PDU loop number

In our previous code, we will handle all the PDU until there is
no incoming data from the network if we can continue the loop.
However this is not quite fair when we handling multiple connections
in a polling group.

And this change is setting a maximal NVME/TCP PDU we can handle
for each conneciton, it can improve the performance. After some
tuing, 32 should be a good loop number. Our iSCSI target uses
16.

The following shows some performance data:

Configuration:
1 Command used in the initiator side:
./examples/nvme/perf/perf -r 'trtype:TCP adrfam:IPv4 traddr:192.168.4.11 trsvcid:4420'
-q 128 -o 4096 -w randrw -M 50 -t 10

2 target side, export 4 malloc bdev in a same subsystem

Result:

Before patch:

Starting thread on core 0
========================================================
                                                                                                           Latency(us)
Device Information                                                    :       IOPS      MiB/s    Average        min        max
TCP  (addr:192.168.4.11 subnqn:nqn.2016-06.io.spdk:cnode1) from core 0:   51554.20     201.38    2483.07     462.31    4158.45
TCP  (addr:192.168.4.11 subnqn:nqn.2016-06.io.spdk:cnode1) from core 0:   51533.00     201.30    2484.12     508.06    4464.07
TCP  (addr:192.168.4.11 subnqn:nqn.2016-06.io.spdk:cnode1) from core 0:   51630.20     201.68    2479.30     481.19    4120.83
TCP  (addr:192.168.4.11 subnqn:nqn.2016-06.io.spdk:cnode1) from core 0:   51700.70     201.96    2475.85     442.61    4018.67
========================================================
Total                                                                 :  206418.10     806.32    2480.58     442.61    4464.07

After patch:
Starting thread on core 0
========================================================
                                                                                                           Latency(us)
Device Information                                                    :       IOPS      MiB/s    Average        min        max
TCP  (addr:192.168.4.11 subnqn:nqn.2016-06.io.spdk:cnode1) from core 0:   57445.30     224.40    2228.46     450.03    4231.23
TCP  (addr:192.168.4.11 subnqn:nqn.2016-06.io.spdk:cnode1) from core 0:   57529.50     224.72    2225.17     676.07    4251.76
TCP  (addr:192.168.4.11 subnqn:nqn.2016-06.io.spdk:cnode1) from core 0:   57524.80     224.71    2225.29     627.08    4193.28
TCP  (addr:192.168.4.11 subnqn:nqn.2016-06.io.spdk:cnode1) from core 0:   57476.50     224.52    2227.17     663.14    4205.12
========================================================
Total                                                                 :  229976.10     898.34    2226.52     450.03    4251.76

Signed-off-by: Ziye Yang <ziye.yang@intel.com>
Change-Id: I86b7af1b669169eee2225de2d28c2cc313e7d905
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/459572
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Changpeng Liu <changpeng.liu@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
Ziye Yang 2019-06-14 16:48:37 +08:00 committed by Ben Walker
parent 5b80a5505c
commit cdc0170c1b

View File

@ -1836,13 +1836,15 @@ err:
spdk_nvmf_tcp_send_c2h_term_req(tqpair, pdu, fes, error_offset);
}
#define MAX_NVME_TCP_PDU_LOOP_COUNT 32
static int
spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
{
int rc = 0;
struct nvme_tcp_pdu *pdu;
enum nvme_tcp_pdu_recv_state prev_state;
uint32_t data_len;
uint32_t data_len, current_pdu_num = 0;
uint8_t psh_len, pdo, hlen;
int8_t padding_len;
@ -1920,6 +1922,9 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
spdk_nvmf_tcp_pdu_psh_handle(tqpair);
if (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY) {
current_pdu_num++;
}
break;
case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD:
pdu = &tqpair->pdu_in_progress;
@ -1949,6 +1954,7 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
/* All of this PDU has now been read from the socket. */
spdk_nvmf_tcp_pdu_payload_handle(tqpair);
current_pdu_num++;
break;
case NVME_TCP_PDU_RECV_STATE_ERROR:
pdu = &tqpair->pdu_in_progress;
@ -1963,7 +1969,7 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
SPDK_ERRLOG("code should not come to here");
break;
}
} while (tqpair->recv_state != prev_state);
} while ((tqpair->recv_state != prev_state) && (current_pdu_num < MAX_NVME_TCP_PDU_LOOP_COUNT));
return rc;
}