env: introduce a new mem_map_ops structure
This struct will hold the unique operations for the mem_map. This series of changes is aimed at enabling spdk_mem_map_translate to report back to the user the length of the valid mem_map up to the function that requested the translation. This will be useful when retrieving memory regions associated with I/O buffers in NVMe-oF. For large I/O it will be possible that the buffer is split over multiple MRs and the I/O will have to be split into multiple SGLs. Change-Id: Ifdd82497f238d99345033f2615c718802a591438 Signed-off-by: Seth Howell <seth.howell@intel.com> Reviewed-on: https://review.gerrithub.io/425327 Tested-by: SPDK CI Jenkins <sys_sgci@intel.com> Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com> Reviewed-by: Jim Harris <james.r.harris@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com>
This commit is contained in:
parent
d288c41242
commit
1ee27f794d
@ -978,6 +978,13 @@ typedef int (*spdk_mem_map_notify_cb)(void *cb_ctx, struct spdk_mem_map *map,
|
|||||||
enum spdk_mem_map_notify_action action,
|
enum spdk_mem_map_notify_action action,
|
||||||
void *vaddr, size_t size);
|
void *vaddr, size_t size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A function table to be implemented by each memory map.
|
||||||
|
*/
|
||||||
|
struct spdk_mem_map_ops {
|
||||||
|
spdk_mem_map_notify_cb notify_cb;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate a virtual memory address translation map.
|
* Allocate a virtual memory address translation map.
|
||||||
*
|
*
|
||||||
|
@ -82,7 +82,7 @@ struct spdk_mem_map {
|
|||||||
struct map_256tb map_256tb;
|
struct map_256tb map_256tb;
|
||||||
pthread_mutex_t mutex;
|
pthread_mutex_t mutex;
|
||||||
uint64_t default_translation;
|
uint64_t default_translation;
|
||||||
spdk_mem_map_notify_cb notify_cb;
|
struct spdk_mem_map_ops ops;
|
||||||
void *cb_ctx;
|
void *cb_ctx;
|
||||||
TAILQ_ENTRY(spdk_mem_map) tailq;
|
TAILQ_ENTRY(spdk_mem_map) tailq;
|
||||||
};
|
};
|
||||||
@ -106,7 +106,7 @@ spdk_mem_map_notify_walk(struct spdk_mem_map *map, enum spdk_mem_map_notify_acti
|
|||||||
do { \
|
do { \
|
||||||
if (contig_start != 0) { \
|
if (contig_start != 0) { \
|
||||||
/* End of of a virtually contiguous range */ \
|
/* End of of a virtually contiguous range */ \
|
||||||
map->notify_cb(map->cb_ctx, map, action, \
|
map->ops.notify_cb(map->cb_ctx, map, action, \
|
||||||
(void *)contig_start, \
|
(void *)contig_start, \
|
||||||
contig_end - contig_start + 2 * 1024 * 1024); \
|
contig_end - contig_start + 2 * 1024 * 1024); \
|
||||||
} \
|
} \
|
||||||
@ -166,7 +166,7 @@ spdk_mem_map_alloc(uint64_t default_translation, spdk_mem_map_notify_cb notify_c
|
|||||||
}
|
}
|
||||||
|
|
||||||
map->default_translation = default_translation;
|
map->default_translation = default_translation;
|
||||||
map->notify_cb = notify_cb;
|
map->ops.notify_cb = notify_cb;
|
||||||
map->cb_ctx = cb_ctx;
|
map->cb_ctx = cb_ctx;
|
||||||
|
|
||||||
pthread_mutex_lock(&g_spdk_mem_map_mutex);
|
pthread_mutex_lock(&g_spdk_mem_map_mutex);
|
||||||
@ -245,7 +245,7 @@ spdk_mem_register(void *vaddr, size_t len)
|
|||||||
if (ref_count > 0) {
|
if (ref_count > 0) {
|
||||||
if (seg_len > 0) {
|
if (seg_len > 0) {
|
||||||
TAILQ_FOREACH(map, &g_spdk_mem_maps, tailq) {
|
TAILQ_FOREACH(map, &g_spdk_mem_maps, tailq) {
|
||||||
rc = map->notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_REGISTER, seg_vaddr, seg_len);
|
rc = map->ops.notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_REGISTER, seg_vaddr, seg_len);
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
pthread_mutex_unlock(&g_spdk_mem_map_mutex);
|
pthread_mutex_unlock(&g_spdk_mem_map_mutex);
|
||||||
return rc;
|
return rc;
|
||||||
@ -265,7 +265,7 @@ spdk_mem_register(void *vaddr, size_t len)
|
|||||||
|
|
||||||
if (seg_len > 0) {
|
if (seg_len > 0) {
|
||||||
TAILQ_FOREACH(map, &g_spdk_mem_maps, tailq) {
|
TAILQ_FOREACH(map, &g_spdk_mem_maps, tailq) {
|
||||||
rc = map->notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_REGISTER, seg_vaddr, seg_len);
|
rc = map->ops.notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_REGISTER, seg_vaddr, seg_len);
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
pthread_mutex_unlock(&g_spdk_mem_map_mutex);
|
pthread_mutex_unlock(&g_spdk_mem_map_mutex);
|
||||||
return rc;
|
return rc;
|
||||||
@ -321,7 +321,7 @@ spdk_mem_unregister(void *vaddr, size_t len)
|
|||||||
if (ref_count > 1) {
|
if (ref_count > 1) {
|
||||||
if (seg_len > 0) {
|
if (seg_len > 0) {
|
||||||
TAILQ_FOREACH(map, &g_spdk_mem_maps, tailq) {
|
TAILQ_FOREACH(map, &g_spdk_mem_maps, tailq) {
|
||||||
rc = map->notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_UNREGISTER, seg_vaddr, seg_len);
|
rc = map->ops.notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_UNREGISTER, seg_vaddr, seg_len);
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
pthread_mutex_unlock(&g_spdk_mem_map_mutex);
|
pthread_mutex_unlock(&g_spdk_mem_map_mutex);
|
||||||
return rc;
|
return rc;
|
||||||
@ -341,7 +341,7 @@ spdk_mem_unregister(void *vaddr, size_t len)
|
|||||||
|
|
||||||
if (seg_len > 0) {
|
if (seg_len > 0) {
|
||||||
TAILQ_FOREACH(map, &g_spdk_mem_maps, tailq) {
|
TAILQ_FOREACH(map, &g_spdk_mem_maps, tailq) {
|
||||||
rc = map->notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_UNREGISTER, seg_vaddr, seg_len);
|
rc = map->ops.notify_cb(map->cb_ctx, map, SPDK_MEM_MAP_NOTIFY_UNREGISTER, seg_vaddr, seg_len);
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
pthread_mutex_unlock(&g_spdk_mem_map_mutex);
|
pthread_mutex_unlock(&g_spdk_mem_map_mutex);
|
||||||
return rc;
|
return rc;
|
||||||
|
Loading…
Reference in New Issue
Block a user