@@ -197,7 +197,6 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
target = zone->max_mem;
target = (extra > target) ? 0ULL : target;
-
if (zone->used_mem > target)
return true;
}
@@ -375,6 +374,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
}
/* set it as 1/2 * swap free space we can get at that time */
glob->max_swap_mem = get_nr_swap_pages() << (PAGE_SHIFT - 1);
+ atomic64_set(&glob->swap_glob_mem, 0);
si_meminfo(&si);
ret = ttm_mem_init_kernel_zone(glob, &si);
if (unlikely(ret != 0))
@@ -475,10 +475,12 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
uint64_t amount, bool reserve)
{
uint64_t free_swap_mem = get_nr_swap_pages() << (PAGE_SHIFT - 1);
- uint64_t limit;
+ uint64_t swap_glob_mem, limit, total_used_mem;
+ struct ttm_mem_zone *zone;
int ret = -ENOMEM;
unsigned int i;
- struct ttm_mem_zone *zone;
+
+ swap_glob_mem = atomic64_read(&glob->swap_glob_mem);
spin_lock(&glob->lock);
/* adjust the max_swap_mem to cover the new inserted swap space */
@@ -493,7 +495,9 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
limit = (capable(CAP_SYS_ADMIN)) ?
zone->emer_mem : zone->max_mem;
- if (zone->used_mem > limit)
+ total_used_mem = zone->used_mem + swap_glob_mem;
+ limit += glob->max_swap_mem;
+ if (total_used_mem > limit)
goto out_unlock;
}
@@ -183,8 +183,11 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm_tt_unpopulate(ttm);
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
- ttm->swap_storage)
+ ttm->swap_storage) {
fput(ttm->swap_storage);
+ atomic64_sub_return(ttm->num_pages << PAGE_SHIFT,
+ &ttm->glob->mem_glob->swap_glob_mem);
+ }
ttm->swap_storage = NULL;
ttm->func->destroy(ttm);
@@ -322,8 +325,11 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
put_page(from_page);
}
- if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
+ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) {
fput(swap_storage);
+ atomic64_sub_return(ttm->num_pages << PAGE_SHIFT,
+ &ttm->glob->mem_glob->swap_glob_mem);
+ }
ttm->swap_storage = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
@@ -383,6 +389,9 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage)
ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
+ else
+ atomic64_add_return(ttm->num_pages << PAGE_SHIFT,
+ &ttm->glob->mem_glob->swap_glob_mem);
return 0;
out_err:
@@ -50,6 +50,7 @@
* @lock: Lock to protect the @shrink - and the memory accounting members,
* that is, essentially the whole structure with some exceptions.
* @max_swap_mem: upper limit of swap space TTM can use
+ * @swap_glob_mem: total size of ttm pages which have been swapped out
* @zones: Array of pointers to accounting zones.
* @num_zones: Number of populated entries in the @zones array.
* @zone_kernel: Pointer to the kernel zone.
@@ -69,6 +70,7 @@ struct ttm_mem_global {
struct work_struct work;
spinlock_t lock;
uint64_t max_swap_mem;
+ atomic64_t swap_glob_mem;
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
unsigned int num_zones;
struct ttm_mem_zone *zone_kernel;
separate swapped memory account from zone->used_mem because swapped ttm pages can be flushed into SWAP disk/file under high memory pressure. add check conditon in ttm_mem_global_reserve to prevent triggering OOM. because if swap space is full, all swapped ttm pages would stay in system memory which can't be flushed into swap space. Signed-off-by: Roger He <Hongbo.He@amd.com> --- drivers/gpu/drm/ttm/ttm_memory.c | 12 ++++++++---- drivers/gpu/drm/ttm/ttm_tt.c | 13 +++++++++++-- include/drm/ttm/ttm_memory.h | 2 ++ 3 files changed, 21 insertions(+), 6 deletions(-)