idxd: Remove idxd_wqcfg from idxd_wq

It turns out that this can stay on the stack.

Change-Id: I961366307dae5ec7413a86271cd1dfb370b8f9f3
Signed-off-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11488
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: Paul Luse <paul.e.luse@intel.com>
This commit is contained in:
Ben Walker 2022-02-03 14:29:40 -07:00 committed by Tomasz Zawadzki
parent 3b9c7ade6c
commit 225cf4b6ed
4 changed files with 21 additions and 30 deletions

View File

@ -152,7 +152,6 @@ SPDK_STATIC_ASSERT(sizeof(struct idxd_ops) == 96, "size mismatch");
struct idxd_wq {
struct spdk_idxd_device *idxd;
struct idxd_group *group;
union idxd_wqcfg wqcfg;
};
struct spdk_idxd_impl {

View File

@ -273,12 +273,6 @@ kernel_idxd_wq_config(struct spdk_kernel_idxd_device *kernel_idxd)
for (i = 0; i < g_kernel_dev_cfg.total_wqs; i++) {
queue = &idxd->queues[i];
queue->wqcfg.wq_size = kernel_idxd->wq_ctx[i].wq_size;
queue->wqcfg.mode = WQ_MODE_DEDICATED;
queue->wqcfg.max_batch_shift = LOG2_WQ_MAX_BATCH;
queue->wqcfg.max_xfer_shift = LOG2_WQ_MAX_XFER;
queue->wqcfg.wq_state = WQ_ENABLED;
queue->wqcfg.priority = WQ_PRIORITY_1;
/* Not part of the config struct */
queue->idxd = idxd;

View File

@ -257,6 +257,7 @@ idxd_wq_config(struct spdk_user_idxd_device *user_idxd)
union idxd_wqcap_register wqcap;
union idxd_offsets_register table_offsets;
struct idxd_wqtbl *wqtbl;
union idxd_wqcfg wqcfg;
wqcap.raw = spdk_mmio_read_8(&user_idxd->registers->wqcap.raw);
@ -289,15 +290,15 @@ idxd_wq_config(struct spdk_user_idxd_device *user_idxd)
* didn't touch when we write the cfg register out below.
*/
for (j = 0 ; j < (sizeof(union idxd_wqcfg) / sizeof(uint32_t)); j++) {
queue->wqcfg.raw[j] = spdk_mmio_read_4(&wqtbl->wq[0].raw[j]);
wqcfg.raw[j] = spdk_mmio_read_4(&wqtbl->wq[0].raw[j]);
}
queue->wqcfg.wq_size = wq_size;
queue->wqcfg.mode = WQ_MODE_DEDICATED;
queue->wqcfg.max_batch_shift = LOG2_WQ_MAX_BATCH;
queue->wqcfg.max_xfer_shift = LOG2_WQ_MAX_XFER;
queue->wqcfg.wq_state = WQ_ENABLED;
queue->wqcfg.priority = WQ_PRIORITY_1;
wqcfg.wq_size = wq_size;
wqcfg.mode = WQ_MODE_DEDICATED;
wqcfg.max_batch_shift = LOG2_WQ_MAX_BATCH;
wqcfg.max_xfer_shift = LOG2_WQ_MAX_XFER;
wqcfg.wq_state = WQ_ENABLED;
wqcfg.priority = WQ_PRIORITY_1;
/* Not part of the config struct */
queue->idxd = idxd;
@ -307,7 +308,7 @@ idxd_wq_config(struct spdk_user_idxd_device *user_idxd)
* Now write the work queue config to the device for configured queues
*/
for (j = 0 ; j < (sizeof(union idxd_wqcfg) / sizeof(uint32_t)); j++) {
spdk_mmio_write_4(&wqtbl->wq[0].raw[j], queue->wqcfg.raw[j]);
spdk_mmio_write_4(&wqtbl->wq[0].raw[j], wqcfg.raw[j]);
}
return 0;

View File

@ -106,11 +106,9 @@ test_idxd_wq_config(void)
{
struct spdk_user_idxd_device user_idxd = {};
struct spdk_idxd_device *idxd = &user_idxd.idxd;
union idxd_wqcfg wqcfg = {};
uint32_t expected[8] = {0x40, 0, 0x11, 0xbe, 0, 0, 0x40000000, 0};
uint32_t wq_size, i, j;
uint32_t wqcap_size = 32;
int rc;
struct idxd_wqtbl *wqtbl;
user_idxd.registers = calloc(1, FAKE_REG_SIZE);
SPDK_CU_ASSERT_FATAL(user_idxd.registers != NULL);
@ -125,24 +123,23 @@ test_idxd_wq_config(void)
user_idxd.registers->offsets.wqcfg = WQ_CFG_OFFSET;
wq_size = user_idxd.registers->wqcap.total_wq_size;
wqtbl = (struct idxd_wqtbl *)((uint8_t *)user_idxd.registers +
(user_idxd.registers->offsets.wqcfg * IDXD_TABLE_OFFSET_MULT));
rc = idxd_wq_config(&user_idxd);
CU_ASSERT(rc == 0);
CU_ASSERT(idxd->queues->wqcfg.wq_size == wq_size);
CU_ASSERT(idxd->queues->wqcfg.mode == WQ_MODE_DEDICATED);
CU_ASSERT(idxd->queues->wqcfg.max_batch_shift == LOG2_WQ_MAX_BATCH);
CU_ASSERT(idxd->queues->wqcfg.max_xfer_shift == LOG2_WQ_MAX_XFER);
CU_ASSERT(idxd->queues->wqcfg.wq_state == WQ_ENABLED);
CU_ASSERT(idxd->queues->wqcfg.priority == WQ_PRIORITY_1);
CU_ASSERT(wqtbl->wq[0].wq_size == wq_size);
CU_ASSERT(wqtbl->wq[0].mode == WQ_MODE_DEDICATED);
CU_ASSERT(wqtbl->wq[0].max_batch_shift == LOG2_WQ_MAX_BATCH);
CU_ASSERT(wqtbl->wq[0].max_xfer_shift == LOG2_WQ_MAX_XFER);
CU_ASSERT(wqtbl->wq[0].wq_state == WQ_ENABLED);
CU_ASSERT(wqtbl->wq[0].priority == WQ_PRIORITY_1);
CU_ASSERT(idxd->queues->idxd == idxd);
CU_ASSERT(idxd->queues->group == idxd->groups);
for (i = 0 ; i < user_idxd.registers->wqcap.num_wqs; i++) {
for (i = 1 ; i < user_idxd.registers->wqcap.num_wqs; i++) {
for (j = 0 ; j < (sizeof(union idxd_wqcfg) / sizeof(uint32_t)); j++) {
wqcfg.raw[j] = spdk_mmio_read_4((uint32_t *)((uint8_t *)user_idxd.registers +
(user_idxd.registers->offsets.wqcfg * IDXD_TABLE_OFFSET_MULT) +
(i * wqcap_size) +
(j * sizeof(uint32_t))));
CU_ASSERT(wqcfg.raw[j] == expected[j]);
CU_ASSERT(spdk_mmio_read_4(&wqtbl->wq[i].raw[j]) == 0);
}
}