lib/iscsi: Make max outstanding R2Ts per connection configurable
By the recent refactoring, we have no static size array for outstanding R2Ts per connection. It looks that we do not have any critical reason to prohibit us from making max outstanding R2Ts per connection configurable. There are some use cases to use large write I/O intensively (e.g. 128KB). Let such use cases change the value of max R2Ts per connection by their responsibility to do performance tuning. Maximum outstanding R2Ts per task are defined both for iSCSI target and NVMe-TCP target but maximum outstanding R2Ts per connection is unique for iSCSI target. The next patch will add the corresponding iSCSI option. Signed-off-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com> Change-Id: I4f6fd3c750a9a0a99bcf23064fe43a3389829aa9 Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3776 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Reviewed-by: Changpeng Liu <changpeng.liu@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com>
This commit is contained in:
parent
fb229e1eb2
commit
5af42000c1
@ -2735,7 +2735,7 @@ add_transfer_task(struct spdk_iscsi_conn *conn, struct spdk_iscsi_task *task)
|
||||
* and start sending R2T for it after some of the tasks using R2T/data
|
||||
* out buffers complete.
|
||||
*/
|
||||
if (conn->pending_r2t >= DEFAULT_MAXR2T) {
|
||||
if (conn->pending_r2t >= g_iscsi.MaxR2TPerConnection) {
|
||||
TAILQ_INSERT_TAIL(&conn->queued_r2t_tasks, task, link);
|
||||
return 0;
|
||||
}
|
||||
@ -2785,7 +2785,7 @@ start_queued_transfer_tasks(struct spdk_iscsi_conn *conn)
|
||||
struct spdk_iscsi_task *task, *tmp;
|
||||
|
||||
TAILQ_FOREACH_SAFE(task, &conn->queued_r2t_tasks, link, tmp) {
|
||||
if (conn->pending_r2t < DEFAULT_MAXR2T) {
|
||||
if (conn->pending_r2t < g_iscsi.MaxR2TPerConnection) {
|
||||
TAILQ_REMOVE(&conn->queued_r2t_tasks, task, link);
|
||||
add_transfer_task(conn, task);
|
||||
} else {
|
||||
|
@ -353,6 +353,7 @@ struct spdk_iscsi_globals {
|
||||
uint32_t ErrorRecoveryLevel;
|
||||
bool AllowDuplicateIsid;
|
||||
uint32_t MaxLargeDataInPerConnection;
|
||||
uint32_t MaxR2TPerConnection;
|
||||
|
||||
struct spdk_mempool *pdu_pool;
|
||||
struct spdk_mempool *pdu_immediate_data_pool;
|
||||
|
@ -141,7 +141,7 @@ mobj_ctor(struct spdk_mempool *mp, __attribute__((unused)) void *arg,
|
||||
|
||||
#define NUM_PDU_PER_CONNECTION(iscsi) (2 * (iscsi->MaxQueueDepth + \
|
||||
iscsi->MaxLargeDataInPerConnection + \
|
||||
2 * DEFAULT_MAXR2T + 8))
|
||||
2 * iscsi->MaxR2TPerConnection + 8))
|
||||
#define PDU_POOL_SIZE(iscsi) (iscsi->MaxConnections * NUM_PDU_PER_CONNECTION(iscsi))
|
||||
#define IMMEDIATE_DATA_POOL_SIZE(iscsi) (iscsi->MaxConnections * 128)
|
||||
#define DATA_OUT_POOL_SIZE(iscsi) (iscsi->MaxConnections * MAX_DATA_OUT_PER_CONNECTION)
|
||||
@ -381,6 +381,9 @@ iscsi_log_globals(void)
|
||||
|
||||
SPDK_DEBUGLOG(SPDK_LOG_ISCSI, "MaxLargeDataInPerConnection %d\n",
|
||||
g_iscsi.MaxLargeDataInPerConnection);
|
||||
|
||||
SPDK_DEBUGLOG(SPDK_LOG_ISCSI, "MaxR2TPerConnection %d\n",
|
||||
g_iscsi.MaxR2TPerConnection);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -780,6 +783,7 @@ iscsi_set_global_params(struct spdk_iscsi_opts *opts)
|
||||
g_iscsi.mutual_chap = opts->mutual_chap;
|
||||
g_iscsi.chap_group = opts->chap_group;
|
||||
g_iscsi.MaxLargeDataInPerConnection = opts->MaxLargeDataInPerConnection;
|
||||
g_iscsi.MaxR2TPerConnection = DEFAULT_MAXR2T;
|
||||
|
||||
iscsi_log_globals();
|
||||
|
||||
|
@ -280,6 +280,8 @@ maxburstlength_test(void)
|
||||
struct spdk_iscsi_pdu *response_pdu;
|
||||
int rc;
|
||||
|
||||
g_iscsi.MaxR2TPerConnection = DEFAULT_MAXR2T;
|
||||
|
||||
req_pdu = iscsi_get_pdu(&conn);
|
||||
data_out_pdu = iscsi_get_pdu(&conn);
|
||||
|
||||
@ -654,6 +656,8 @@ add_transfer_task_test(void)
|
||||
int rc, count = 0;
|
||||
uint32_t buffer_offset, desired_xfer_len;
|
||||
|
||||
g_iscsi.MaxR2TPerConnection = DEFAULT_MAXR2T;
|
||||
|
||||
sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH; /* 1M */
|
||||
sess.MaxOutstandingR2T = DEFAULT_MAXR2T; /* 4 */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user