env: bump max virtual address map from 128tb to 256tb

At least on some ARM platforms, virtual address map
uses lower 48 bits of address space, compared to just
47 on x86-64.

This does double the size of the top level map from
1MB to 2MB.

Signed-off-by: Jim Harris <james.r.harris@intel.com>
Change-Id: I18f95a10b4913dca0256cbbd8d05f3baa5ab69fa

Reviewed-on: https://review.gerrithub.io/396318
Tested-by: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Daniel Verkamp <daniel.verkamp@intel.com>
This commit is contained in:
Jim Harris 2018-01-24 20:36:50 -07:00 committed by Daniel Verkamp
parent 23fd31f9cc
commit 33e88d790f
3 changed files with 29 additions and 29 deletions

View File

@ -53,11 +53,11 @@ extern struct rte_pci_bus rte_pci_bus;
#endif #endif
#include <rte_dev.h> #include <rte_dev.h>
/* x86-64 userspace virtual addresses use only the low 47 bits [0..46], /* x86-64 and ARM userspace virtual addresses use only the low 48 bits [0..47],
* which is enough to cover 128 TB. * which is enough to cover 256 TB.
*/ */
#define SHIFT_128TB 47 /* (1 << 47) == 128 TB */ #define SHIFT_256TB 48 /* (1 << 48) == 256 TB */
#define MASK_128TB ((1ULL << SHIFT_128TB) - 1) #define MASK_256TB ((1ULL << SHIFT_256TB) - 1)
#define SHIFT_1GB 30 /* (1 << 30) == 1 GB */ #define SHIFT_1GB 30 /* (1 << 30) == 1 GB */
#define MASK_1GB ((1ULL << SHIFT_1GB) - 1) #define MASK_1GB ((1ULL << SHIFT_1GB) - 1)

View File

@ -54,7 +54,7 @@
#define FN_2MB_TO_4KB(fn) (fn << (SHIFT_2MB - SHIFT_4KB)) #define FN_2MB_TO_4KB(fn) (fn << (SHIFT_2MB - SHIFT_4KB))
#define FN_4KB_TO_2MB(fn) (fn >> (SHIFT_2MB - SHIFT_4KB)) #define FN_4KB_TO_2MB(fn) (fn >> (SHIFT_2MB - SHIFT_4KB))
#define MAP_128TB_IDX(vfn_2mb) ((vfn_2mb) >> (SHIFT_1GB - SHIFT_2MB)) #define MAP_256TB_IDX(vfn_2mb) ((vfn_2mb) >> (SHIFT_1GB - SHIFT_2MB))
#define MAP_1GB_IDX(vfn_2mb) ((vfn_2mb) & ((1ULL << (SHIFT_1GB - SHIFT_2MB + 1)) - 1)) #define MAP_1GB_IDX(vfn_2mb) ((vfn_2mb) & ((1ULL << (SHIFT_1GB - SHIFT_2MB + 1)) - 1))
/* Translation of a single 2MB page. */ /* Translation of a single 2MB page. */
@ -73,13 +73,13 @@ struct map_1gb {
/* Top-level map table indexed by bits [30..46] of the virtual address. /* Top-level map table indexed by bits [30..46] of the virtual address.
* Each entry points to a second-level map table or NULL. * Each entry points to a second-level map table or NULL.
*/ */
struct map_128tb { struct map_256tb {
struct map_1gb *map[1ULL << (SHIFT_128TB - SHIFT_1GB + 1)]; struct map_1gb *map[1ULL << (SHIFT_256TB - SHIFT_1GB + 1)];
}; };
/* Page-granularity memory address translation */ /* Page-granularity memory address translation */
struct spdk_mem_map { struct spdk_mem_map {
struct map_128tb map_128tb; struct map_256tb map_256tb;
pthread_mutex_t mutex; pthread_mutex_t mutex;
uint64_t default_translation; uint64_t default_translation;
spdk_mem_map_notify_cb notify_cb; spdk_mem_map_notify_cb notify_cb;
@ -98,7 +98,7 @@ static pthread_mutex_t g_spdk_mem_map_mutex = PTHREAD_MUTEX_INITIALIZER;
static void static void
spdk_mem_map_notify_walk(struct spdk_mem_map *map, enum spdk_mem_map_notify_action action) spdk_mem_map_notify_walk(struct spdk_mem_map *map, enum spdk_mem_map_notify_action action)
{ {
size_t idx_128tb; size_t idx_256tb;
uint64_t contig_start = 0; uint64_t contig_start = 0;
uint64_t contig_end = 0; uint64_t contig_end = 0;
@ -121,10 +121,10 @@ spdk_mem_map_notify_walk(struct spdk_mem_map *map, enum spdk_mem_map_notify_acti
/* Hold the memory registration map mutex so no new registrations can be added while we are looping. */ /* Hold the memory registration map mutex so no new registrations can be added while we are looping. */
pthread_mutex_lock(&g_mem_reg_map->mutex); pthread_mutex_lock(&g_mem_reg_map->mutex);
for (idx_128tb = 0; for (idx_256tb = 0;
idx_128tb < sizeof(g_mem_reg_map->map_128tb.map) / sizeof(g_mem_reg_map->map_128tb.map[0]); idx_256tb < sizeof(g_mem_reg_map->map_256tb.map) / sizeof(g_mem_reg_map->map_256tb.map[0]);
idx_128tb++) { idx_256tb++) {
const struct map_1gb *map_1gb = g_mem_reg_map->map_128tb.map[idx_128tb]; const struct map_1gb *map_1gb = g_mem_reg_map->map_256tb.map[idx_256tb];
uint64_t idx_1gb; uint64_t idx_1gb;
if (!map_1gb) { if (!map_1gb) {
@ -135,7 +135,7 @@ spdk_mem_map_notify_walk(struct spdk_mem_map *map, enum spdk_mem_map_notify_acti
for (idx_1gb = 0; idx_1gb < sizeof(map_1gb->map) / sizeof(map_1gb->map[0]); idx_1gb++) { for (idx_1gb = 0; idx_1gb < sizeof(map_1gb->map) / sizeof(map_1gb->map[0]); idx_1gb++) {
if (map_1gb->map[idx_1gb].translation_2mb != 0) { if (map_1gb->map[idx_1gb].translation_2mb != 0) {
/* Rebuild the virtual address from the indexes */ /* Rebuild the virtual address from the indexes */
uint64_t vaddr = (idx_128tb << SHIFT_1GB) | (idx_1gb << SHIFT_2MB); uint64_t vaddr = (idx_256tb << SHIFT_1GB) | (idx_1gb << SHIFT_2MB);
if (contig_start == 0) { if (contig_start == 0) {
contig_start = vaddr; contig_start = vaddr;
@ -202,8 +202,8 @@ spdk_mem_map_free(struct spdk_mem_map **pmap)
TAILQ_REMOVE(&g_spdk_mem_maps, map, tailq); TAILQ_REMOVE(&g_spdk_mem_maps, map, tailq);
pthread_mutex_unlock(&g_spdk_mem_map_mutex); pthread_mutex_unlock(&g_spdk_mem_map_mutex);
for (i = 0; i < sizeof(map->map_128tb.map) / sizeof(map->map_128tb.map[0]); i++) { for (i = 0; i < sizeof(map->map_256tb.map) / sizeof(map->map_256tb.map[0]); i++) {
free(map->map_128tb.map[i]); free(map->map_256tb.map[i]);
} }
pthread_mutex_destroy(&map->mutex); pthread_mutex_destroy(&map->mutex);
@ -220,7 +220,7 @@ spdk_mem_register(void *vaddr, size_t len)
void *seg_vaddr; void *seg_vaddr;
size_t seg_len; size_t seg_len;
if ((uintptr_t)vaddr & ~MASK_128TB) { if ((uintptr_t)vaddr & ~MASK_256TB) {
DEBUG_PRINT("invalid usermode virtual address %p\n", vaddr); DEBUG_PRINT("invalid usermode virtual address %p\n", vaddr);
return -EINVAL; return -EINVAL;
} }
@ -286,7 +286,7 @@ spdk_mem_unregister(void *vaddr, size_t len)
size_t seg_len; size_t seg_len;
uint64_t ref_count; uint64_t ref_count;
if ((uintptr_t)vaddr & ~MASK_128TB) { if ((uintptr_t)vaddr & ~MASK_256TB) {
DEBUG_PRINT("invalid usermode virtual address %p\n", vaddr); DEBUG_PRINT("invalid usermode virtual address %p\n", vaddr);
return -EINVAL; return -EINVAL;
} }
@ -357,16 +357,16 @@ static struct map_1gb *
spdk_mem_map_get_map_1gb(struct spdk_mem_map *map, uint64_t vfn_2mb) spdk_mem_map_get_map_1gb(struct spdk_mem_map *map, uint64_t vfn_2mb)
{ {
struct map_1gb *map_1gb; struct map_1gb *map_1gb;
uint64_t idx_128tb = MAP_128TB_IDX(vfn_2mb); uint64_t idx_256tb = MAP_256TB_IDX(vfn_2mb);
size_t i; size_t i;
map_1gb = map->map_128tb.map[idx_128tb]; map_1gb = map->map_256tb.map[idx_256tb];
if (!map_1gb) { if (!map_1gb) {
pthread_mutex_lock(&map->mutex); pthread_mutex_lock(&map->mutex);
/* Recheck to make sure nobody else got the mutex first. */ /* Recheck to make sure nobody else got the mutex first. */
map_1gb = map->map_128tb.map[idx_128tb]; map_1gb = map->map_256tb.map[idx_256tb];
if (!map_1gb) { if (!map_1gb) {
map_1gb = malloc(sizeof(struct map_1gb)); map_1gb = malloc(sizeof(struct map_1gb));
if (map_1gb) { if (map_1gb) {
@ -374,7 +374,7 @@ spdk_mem_map_get_map_1gb(struct spdk_mem_map *map, uint64_t vfn_2mb)
for (i = 0; i < SPDK_COUNTOF(map_1gb->map); i++) { for (i = 0; i < SPDK_COUNTOF(map_1gb->map); i++) {
map_1gb->map[i].translation_2mb = map->default_translation; map_1gb->map[i].translation_2mb = map->default_translation;
} }
map->map_128tb.map[idx_128tb] = map_1gb; map->map_256tb.map[idx_256tb] = map_1gb;
} }
} }
@ -399,7 +399,7 @@ spdk_mem_map_set_translation(struct spdk_mem_map *map, uint64_t vaddr, uint64_t
struct map_2mb *map_2mb; struct map_2mb *map_2mb;
/* For now, only 2 MB-aligned registrations are supported */ /* For now, only 2 MB-aligned registrations are supported */
if ((uintptr_t)vaddr & ~MASK_128TB) { if ((uintptr_t)vaddr & ~MASK_256TB) {
DEBUG_PRINT("invalid usermode virtual address %lu\n", vaddr); DEBUG_PRINT("invalid usermode virtual address %lu\n", vaddr);
return -EINVAL; return -EINVAL;
} }
@ -439,7 +439,7 @@ spdk_mem_map_clear_translation(struct spdk_mem_map *map, uint64_t vaddr, uint64_
struct map_2mb *map_2mb; struct map_2mb *map_2mb;
/* For now, only 2 MB-aligned registrations are supported */ /* For now, only 2 MB-aligned registrations are supported */
if ((uintptr_t)vaddr & ~MASK_128TB) { if ((uintptr_t)vaddr & ~MASK_256TB) {
DEBUG_PRINT("invalid usermode virtual address %lu\n", vaddr); DEBUG_PRINT("invalid usermode virtual address %lu\n", vaddr);
return -EINVAL; return -EINVAL;
} }
@ -475,20 +475,20 @@ spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr)
{ {
const struct map_1gb *map_1gb; const struct map_1gb *map_1gb;
const struct map_2mb *map_2mb; const struct map_2mb *map_2mb;
uint64_t idx_128tb; uint64_t idx_256tb;
uint64_t idx_1gb; uint64_t idx_1gb;
uint64_t vfn_2mb; uint64_t vfn_2mb;
if (spdk_unlikely(vaddr & ~MASK_128TB)) { if (spdk_unlikely(vaddr & ~MASK_256TB)) {
DEBUG_PRINT("invalid usermode virtual address %p\n", (void *)vaddr); DEBUG_PRINT("invalid usermode virtual address %p\n", (void *)vaddr);
return map->default_translation; return map->default_translation;
} }
vfn_2mb = vaddr >> SHIFT_2MB; vfn_2mb = vaddr >> SHIFT_2MB;
idx_128tb = MAP_128TB_IDX(vfn_2mb); idx_256tb = MAP_256TB_IDX(vfn_2mb);
idx_1gb = MAP_1GB_IDX(vfn_2mb); idx_1gb = MAP_1GB_IDX(vfn_2mb);
map_1gb = map->map_128tb.map[idx_128tb]; map_1gb = map->map_256tb.map[idx_256tb];
if (spdk_unlikely(!map_1gb)) { if (spdk_unlikely(!map_1gb)) {
return map->default_translation; return map->default_translation;
} }

View File

@ -248,7 +248,7 @@ spdk_vtophys_notify(void *cb_ctx, struct spdk_mem_map *map,
int rc = 0; int rc = 0;
uint64_t paddr; uint64_t paddr;
if ((uintptr_t)vaddr & ~MASK_128TB) { if ((uintptr_t)vaddr & ~MASK_256TB) {
DEBUG_PRINT("invalid usermode virtual address %p\n", vaddr); DEBUG_PRINT("invalid usermode virtual address %p\n", vaddr);
return -EINVAL; return -EINVAL;
} }