@@ -224,7 +224,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
cvma.vm_page_prot);
} else {
- struct ttm_operation_ctx ctx = {
+ struct ttm_operation_ctx ttm_opt_ctx = {
.interruptible = false,
.no_wait_gpu = false
};
@@ -233,8 +233,10 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
cvma.vm_page_prot);
+ if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+ ttm_opt_ctx.flags |= TTM_OPT_FLAG_ALLOW_ALLOC_ANYWAY;
/* Allocate all page at once, most common usage */
- if (ttm->bdev->driver->ttm_tt_populate(ttm, &ctx)) {
+ if (ttm->bdev->driver->ttm_tt_populate(ttm, &ttm_opt_ctx)) {
ret = VM_FAULT_OOM;
goto out_io_unlock;
}
@@ -488,9 +488,6 @@ ttm_check_over_swaplimit(struct ttm_mem_global *glob)
{
bool ret = false;
- if (!glob->no_retry)
- return ret;
-
if (get_nr_swap_pages() < FREE_SWAP_SPACE
&& si_mem_available() < glob->sys_mem_limit)
ret = true;
@@ -500,17 +497,28 @@ ttm_check_over_swaplimit(struct ttm_mem_global *glob)
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
- uint64_t amount, bool reserve)
+ uint64_t amount, bool reserve,
+ bool allow_alloc_anyway)
{
uint64_t limit;
int ret = -ENOMEM;
unsigned int i;
struct ttm_mem_zone *zone;
- if (ttm_check_over_swaplimit(glob))
+ if (glob->no_retry && !allow_alloc_anyway
+ && ttm_check_over_swaplimit(glob))
return ret;
spin_lock(&glob->lock);
+ /*
+ * to cover two special cases:
+ * a. if serving page_fault allow reservation anyway since
+ * it already allocated system pages. Otherwise it will trigger OOM.
+ * b. if serving suspend, allow reservation anyway as well.
+ */
+ if (glob->no_retry && allow_alloc_anyway)
+ goto reserve_direct;
+
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
@@ -523,6 +531,7 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
goto out_unlock;
}
+reserve_direct:
if (reserve) {
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
@@ -547,10 +556,10 @@ static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
struct ttm_operation_ctx *ctx)
{
int count = TTM_MEMORY_ALLOC_RETRIES;
+ bool alloc_anyway = ctx->flags & TTM_OPT_FLAG_ALLOW_ALLOC_ANYWAY;
- while (unlikely(ttm_mem_global_reserve(glob,
- single_zone,
- memory, true)
+ while (unlikely(ttm_mem_global_reserve(glob, single_zone, memory,
+ true, alloc_anyway)
!= 0)) {
if (ctx->no_wait_gpu)
return -ENOMEM;
@@ -944,7 +944,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
i = 0;
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
goto skip_huge;
@@ -278,7 +278,9 @@ struct ttm_operation_ctx {
};
/* Allow eviction of reserved BOs */
-#define TTM_OPT_FLAG_ALLOW_RES_EVICT 0x1
+#define TTM_OPT_FLAG_ALLOW_RES_EVICT 0x1
+/* when serving page fault, allow alloc anyway */
+#define TTM_OPT_FLAG_ALLOW_ALLOC_ANYWAY 0x2
/**
* ttm_bo_reference - reference a struct ttm_buffer_object
set TTM_OPT_FLAG_ALLOW_ALLOC_ANYWAY when we are servicing for page fault routine. for ttm_mem_global_reserve if in page fault routine, allow the gtt pages reservation always. because page fault routing already grabbed system memory and the allowance of this exception is harmless. Otherwise, it will trigger OOM killer. v2: keep original behavior except ttm bo with flag no_retry Signed-off-by: Roger He <Hongbo.He@amd.com> --- drivers/gpu/drm/ttm/ttm_bo_vm.c | 6 ++++-- drivers/gpu/drm/ttm/ttm_memory.c | 25 +++++++++++++++++-------- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 1 - include/drm/ttm/ttm_bo_api.h | 4 +++- 4 files changed, 24 insertions(+), 12 deletions(-)