lib/idxd: Further simplify WQ configuration code

As we now only support a single WQ, there's no need for a teble of
them and no need to assert that the stride from WQ to WQ is the
same as the WQ struct size.

Signed-off-by: paul luse <paul.e.luse@intel.com>
Change-Id: I205f36aae22070f532653726dd75249bbafbe3ef
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12081
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
This commit is contained in:
paul luse 2022-03-29 09:15:32 -07:00 committed by Ben Walker
parent d086d56415
commit b9d44da07d
3 changed files with 23 additions and 31 deletions

View File

@ -589,10 +589,6 @@ union idxd_wqcfg {
}; };
SPDK_STATIC_ASSERT(sizeof(union idxd_wqcfg) == 32, "size mismatch"); SPDK_STATIC_ASSERT(sizeof(union idxd_wqcfg) == 32, "size mismatch");
struct idxd_wqtbl {
union idxd_wqcfg wq[1];
};
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -232,14 +232,10 @@ idxd_wq_config(struct spdk_user_idxd_device *user_idxd)
struct spdk_idxd_device *idxd = &user_idxd->idxd; struct spdk_idxd_device *idxd = &user_idxd->idxd;
union idxd_wqcap_register wqcap; union idxd_wqcap_register wqcap;
union idxd_offsets_register table_offsets; union idxd_offsets_register table_offsets;
struct idxd_wqtbl *wqtbl; union idxd_wqcfg *wqcfg;
union idxd_wqcfg wqcfg;
wqcap.raw = spdk_mmio_read_8(&user_idxd->registers->wqcap.raw); wqcap.raw = spdk_mmio_read_8(&user_idxd->registers->wqcap.raw);
/* If this fires, something in the hardware spec has changed. */
assert(sizeof(wqtbl->wq[0]) == 1 << (WQCFG_SHIFT + wqcap.wqcfg_size));
SPDK_DEBUGLOG(idxd, "Total ring slots available 0x%x\n", wqcap.total_wq_size); SPDK_DEBUGLOG(idxd, "Total ring slots available 0x%x\n", wqcap.total_wq_size);
idxd->total_wq_size = wqcap.total_wq_size; idxd->total_wq_size = wqcap.total_wq_size;
@ -251,22 +247,22 @@ idxd_wq_config(struct spdk_user_idxd_device *user_idxd)
table_offsets.raw[0] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[0]); table_offsets.raw[0] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[0]);
table_offsets.raw[1] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[1]); table_offsets.raw[1] = spdk_mmio_read_8(&user_idxd->registers->offsets.raw[1]);
wqtbl = (struct idxd_wqtbl *)((uint8_t *)user_idxd->registers + (table_offsets.wqcfg * wqcfg = (union idxd_wqcfg *)((uint8_t *)user_idxd->registers + (table_offsets.wqcfg *
IDXD_TABLE_OFFSET_MULT)); IDXD_TABLE_OFFSET_MULT));
for (i = 0 ; i < SPDK_COUNTOF(wqtbl->wq[0].raw); i++) { for (i = 0 ; i < SPDK_COUNTOF(wqcfg->raw); i++) {
wqcfg.raw[i] = spdk_mmio_read_4(&wqtbl->wq[0].raw[i]); wqcfg->raw[i] = spdk_mmio_read_4(&wqcfg->raw[i]);
} }
wqcfg.wq_size = wqcap.total_wq_size; wqcfg->wq_size = wqcap.total_wq_size;
wqcfg.mode = WQ_MODE_DEDICATED; wqcfg->mode = WQ_MODE_DEDICATED;
wqcfg.max_batch_shift = LOG2_WQ_MAX_BATCH; wqcfg->max_batch_shift = LOG2_WQ_MAX_BATCH;
wqcfg.max_xfer_shift = LOG2_WQ_MAX_XFER; wqcfg->max_xfer_shift = LOG2_WQ_MAX_XFER;
wqcfg.wq_state = WQ_ENABLED; wqcfg->wq_state = WQ_ENABLED;
wqcfg.priority = WQ_PRIORITY_1; wqcfg->priority = WQ_PRIORITY_1;
for (i = 0; i < SPDK_COUNTOF(wqtbl->wq[0].raw); i++) { for (i = 0; i < SPDK_COUNTOF(wqcfg->raw); i++) {
spdk_mmio_write_4(&wqtbl->wq[0].raw[i], wqcfg.raw[i]); spdk_mmio_write_4(&wqcfg->raw[i], wqcfg->raw[i]);
} }
return 0; return 0;

View File

@ -107,7 +107,7 @@ test_idxd_wq_config(void)
struct spdk_user_idxd_device user_idxd = {}; struct spdk_user_idxd_device user_idxd = {};
uint32_t wq_size, i, j; uint32_t wq_size, i, j;
int rc; int rc;
struct idxd_wqtbl *wqtbl; union idxd_wqcfg *wqcfg;
user_idxd.registers = calloc(1, FAKE_REG_SIZE); user_idxd.registers = calloc(1, FAKE_REG_SIZE);
SPDK_CU_ASSERT_FATAL(user_idxd.registers != NULL); SPDK_CU_ASSERT_FATAL(user_idxd.registers != NULL);
@ -119,21 +119,21 @@ test_idxd_wq_config(void)
user_idxd.registers->offsets.wqcfg = WQ_CFG_OFFSET; user_idxd.registers->offsets.wqcfg = WQ_CFG_OFFSET;
wq_size = user_idxd.registers->wqcap.total_wq_size; wq_size = user_idxd.registers->wqcap.total_wq_size;
wqtbl = (struct idxd_wqtbl *)((uint8_t *)user_idxd.registers + wqcfg = (union idxd_wqcfg *)((uint8_t *)user_idxd.registers +
(user_idxd.registers->offsets.wqcfg * IDXD_TABLE_OFFSET_MULT)); (user_idxd.registers->offsets.wqcfg * IDXD_TABLE_OFFSET_MULT));
rc = idxd_wq_config(&user_idxd); rc = idxd_wq_config(&user_idxd);
CU_ASSERT(rc == 0); CU_ASSERT(rc == 0);
CU_ASSERT(wqtbl->wq[0].wq_size == wq_size); CU_ASSERT(wqcfg->wq_size == wq_size);
CU_ASSERT(wqtbl->wq[0].mode == WQ_MODE_DEDICATED); CU_ASSERT(wqcfg->mode == WQ_MODE_DEDICATED);
CU_ASSERT(wqtbl->wq[0].max_batch_shift == LOG2_WQ_MAX_BATCH); CU_ASSERT(wqcfg->max_batch_shift == LOG2_WQ_MAX_BATCH);
CU_ASSERT(wqtbl->wq[0].max_xfer_shift == LOG2_WQ_MAX_XFER); CU_ASSERT(wqcfg->max_xfer_shift == LOG2_WQ_MAX_XFER);
CU_ASSERT(wqtbl->wq[0].wq_state == WQ_ENABLED); CU_ASSERT(wqcfg->wq_state == WQ_ENABLED);
CU_ASSERT(wqtbl->wq[0].priority == WQ_PRIORITY_1); CU_ASSERT(wqcfg->priority == WQ_PRIORITY_1);
for (i = 1; i < user_idxd.registers->wqcap.num_wqs; i++) { for (i = 1; i < user_idxd.registers->wqcap.num_wqs; i++) {
for (j = 0 ; j < (sizeof(union idxd_wqcfg) / sizeof(uint32_t)); j++) { for (j = 0 ; j < (sizeof(union idxd_wqcfg) / sizeof(uint32_t)); j++) {
CU_ASSERT(spdk_mmio_read_4(&wqtbl->wq[i].raw[j]) == 0); CU_ASSERT(spdk_mmio_read_4(&wqcfg->raw[j]) == 0);
} }
} }