@@ -29,6 +29,8 @@ struct vduse_mmap_vma {
struct list_head list;
};
+struct percpu_counter vduse_total_bounce_pages;
+
static inline struct page *
vduse_domain_get_bounce_page(struct vduse_iova_domain *domain,
unsigned long iova)
@@ -48,6 +50,13 @@ vduse_domain_set_bounce_page(struct vduse_iova_domain *domain,
unsigned long chunkoff = iova & ~IOVA_CHUNK_MASK;
unsigned long pgindex = chunkoff >> PAGE_SHIFT;
+ if (page) {
+ domain->chunks[index].used_bounce_pages++;
+ percpu_counter_inc(&vduse_total_bounce_pages);
+ } else {
+ domain->chunks[index].used_bounce_pages--;
+ percpu_counter_dec(&vduse_total_bounce_pages);
+ }
domain->chunks[index].bounce_pages[pgindex] = page;
}
@@ -175,6 +184,29 @@ void vduse_domain_remove_mapping(struct vduse_iova_domain *domain,
}
}
+static bool vduse_domain_try_unmap(struct vduse_iova_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct vduse_mmap_vma *mmap_vma;
+ unsigned long uaddr;
+ bool unmap = true;
+
+ mutex_lock(&domain->vma_lock);
+ list_for_each_entry(mmap_vma, &domain->vma_list, list) {
+ if (!mmap_read_trylock(mmap_vma->vma->vm_mm)) {
+ unmap = false;
+ break;
+ }
+
+ uaddr = iova + mmap_vma->vma->vm_start;
+ zap_page_range(mmap_vma->vma, uaddr, size);
+ mmap_read_unlock(mmap_vma->vma->vm_mm);
+ }
+ mutex_unlock(&domain->vma_lock);
+
+ return unmap;
+}
+
void vduse_domain_unmap(struct vduse_iova_domain *domain,
unsigned long iova, size_t size)
{
@@ -302,6 +334,32 @@ bool vduse_domain_is_direct_map(struct vduse_iova_domain *domain,
return atomic_read(&chunk->map_type) == TYPE_DIRECT_MAP;
}
+int vduse_domain_reclaim(struct vduse_iova_domain *domain)
+{
+ struct vduse_iova_chunk *chunk;
+ int i, freed = 0;
+
+ for (i = domain->chunk_num - 1; i >= 0; i--) {
+ chunk = &domain->chunks[i];
+ if (!chunk->used_bounce_pages)
+ continue;
+
+ if (atomic_cmpxchg(&chunk->state, 0, INT_MIN) != 0)
+ continue;
+
+ if (!vduse_domain_try_unmap(domain,
+ chunk->start, IOVA_CHUNK_SIZE)) {
+ atomic_sub(INT_MIN, &chunk->state);
+ break;
+ }
+ freed += vduse_domain_free_bounce_pages(domain,
+ chunk->start, IOVA_CHUNK_SIZE);
+ atomic_sub(INT_MIN, &chunk->state);
+ }
+
+ return freed;
+}
+
unsigned long vduse_domain_alloc_iova(struct vduse_iova_domain *domain,
size_t size, enum iova_map_type type)
{
@@ -319,10 +377,13 @@ unsigned long vduse_domain_alloc_iova(struct vduse_iova_domain *domain,
if (atomic_read(&chunk->map_type) != type)
continue;
- iova = gen_pool_alloc_algo(chunk->pool, size,
+ if (atomic_fetch_inc(&chunk->state) >= 0) {
+ iova = gen_pool_alloc_algo(chunk->pool, size,
gen_pool_first_fit_align, &data);
- if (iova)
- break;
+ if (iova)
+ break;
+ }
+ atomic_dec(&chunk->state);
}
return iova;
@@ -335,6 +396,7 @@ void vduse_domain_free_iova(struct vduse_iova_domain *domain,
struct vduse_iova_chunk *chunk = &domain->chunks[index];
gen_pool_free(chunk->pool, iova, size);
+ atomic_dec(&chunk->state);
}
static void vduse_iova_chunk_cleanup(struct vduse_iova_chunk *chunk)
@@ -351,7 +413,8 @@ void vduse_iova_domain_destroy(struct vduse_iova_domain *domain)
for (i = 0; i < domain->chunk_num; i++) {
chunk = &domain->chunks[i];
- vduse_domain_free_bounce_pages(domain,
+ if (chunk->used_bounce_pages)
+ vduse_domain_free_bounce_pages(domain,
chunk->start, IOVA_CHUNK_SIZE);
vduse_iova_chunk_cleanup(chunk);
}
@@ -390,8 +453,10 @@ static int vduse_iova_chunk_init(struct vduse_iova_chunk *chunk,
if (!chunk->iova_map)
goto err;
+ chunk->used_bounce_pages = 0;
chunk->start = addr;
atomic_set(&chunk->map_type, TYPE_NONE);
+ atomic_set(&chunk->state, 0);
return 0;
err:
@@ -440,3 +505,13 @@ struct vduse_iova_domain *vduse_iova_domain_create(size_t size)
return NULL;
}
+
+int vduse_domain_init(void)
+{
+ return percpu_counter_init(&vduse_total_bounce_pages, 0, GFP_KERNEL);
+}
+
+void vduse_domain_exit(void)
+{
+ percpu_counter_destroy(&vduse_total_bounce_pages);
+}
@@ -31,8 +31,10 @@ struct vduse_iova_chunk {
struct gen_pool *pool;
struct page **bounce_pages;
struct vduse_iova_map **iova_map;
+ int used_bounce_pages;
unsigned long start;
atomic_t map_type;
+ atomic_t state;
};
struct vduse_iova_domain {
@@ -44,6 +46,8 @@ struct vduse_iova_domain {
struct list_head vma_list;
};
+extern struct percpu_counter vduse_total_bounce_pages;
+
int vduse_domain_add_vma(struct vduse_iova_domain *domain,
struct vm_area_struct *vma);
@@ -77,6 +81,8 @@ int vduse_domain_bounce_map(struct vduse_iova_domain *domain,
bool vduse_domain_is_direct_map(struct vduse_iova_domain *domain,
unsigned long iova);
+int vduse_domain_reclaim(struct vduse_iova_domain *domain);
+
unsigned long vduse_domain_alloc_iova(struct vduse_iova_domain *domain,
size_t size, enum iova_map_type type);
@@ -90,4 +96,8 @@ void vduse_iova_domain_destroy(struct vduse_iova_domain *domain);
struct vduse_iova_domain *vduse_iova_domain_create(size_t size);
+int vduse_domain_init(void);
+
+void vduse_domain_exit(void);
+
#endif /* _VDUSE_IOVA_DOMAIN_H */
Introduce vduse_domain_reclaim() to support reclaiming bounce page when necessary. We will do reclaiming chunk by chunk. And only reclaim the iova chunk that no one used. Signed-off-by: Xie Yongji <xieyongji@bytedance.com> --- drivers/vdpa/vdpa_user/iova_domain.c | 83 ++++++++++++++++++++++++++++++++++-- drivers/vdpa/vdpa_user/iova_domain.h | 10 +++++ 2 files changed, 89 insertions(+), 4 deletions(-)