diff mbox

[33/34] drm: Fix drm_mm search and insertion

Message ID 20161212115350.780-34-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Dec. 12, 2016, 11:53 a.m. UTC
The drm_mm range manager claimed to support top-down insertion, but it
was neither searching for the top-most hole that could fit the
allocation request nor fitting the request to the hole correctly.

In order to search the range efficiently, we create a secondary index
for the holes using either their size or their address. This index
allows us to find the smallest hole or the hole at the bottom or top of
the range efficiently, whilst keeping the hole stack to rapidly service
evictions.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c  |  12 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c |  14 +-
 drivers/gpu/drm/armada/armada_gem.c          |   4 +-
 drivers/gpu/drm/drm_mm.c                     | 558 +++++++++++----------------
 drivers/gpu/drm/drm_vma_manager.c            |   3 +-
 drivers/gpu/drm/etnaviv/etnaviv_mmu.c        |   9 +-
 drivers/gpu/drm/i915/gvt/aperture_gm.c       |  11 +-
 drivers/gpu/drm/i915/i915_gem.c              |   3 +-
 drivers/gpu/drm/i915/i915_gem_evict.c        |   9 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |   3 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c          |   4 +-
 drivers/gpu/drm/i915/i915_gem_stolen.c       |   3 +-
 drivers/gpu/drm/i915/i915_vma.c              |  25 +-
 drivers/gpu/drm/msm/msm_gem.c                |   3 +-
 drivers/gpu/drm/msm/msm_gem_vma.c            |   3 +-
 drivers/gpu/drm/selftests/test-drm_mm.c      | 137 +++----
 drivers/gpu/drm/sis/sis_mm.c                 |   6 +-
 drivers/gpu/drm/tegra/gem.c                  |   4 +-
 drivers/gpu/drm/ttm/ttm_bo_manager.c         |  15 +-
 drivers/gpu/drm/vc4/vc4_crtc.c               |   2 +-
 drivers/gpu/drm/vc4/vc4_hvs.c                |   3 +-
 drivers/gpu/drm/vc4/vc4_plane.c              |   6 +-
 drivers/gpu/drm/via/via_mm.c                 |   4 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c       |  10 +-
 include/drm/drm_mm.h                         | 113 +++---
 25 files changed, 412 insertions(+), 552 deletions(-)

Comments

Joonas Lahtinen Dec. 15, 2016, 12:28 p.m. UTC | #1
On ma, 2016-12-12 at 11:53 +0000, Chris Wilson wrote:
> The drm_mm range manager claimed to support top-down insertion, but it
> was neither searching for the top-most hole that could fit the
> allocation request nor fitting the request to the hole correctly.
> 
> In order to search the range efficiently, we create a secondary index
> for the holes using either their size or their address. This index
> allows us to find the smallest hole or the hole at the bottom or top of
> the range efficiently, whilst keeping the hole stack to rapidly service
> evictions.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

<SNIP>

> +static void rm_hole(struct drm_mm_node *node)
> +{
> +	if (!node->hole_size)
> +		return;

I've actively tried to remove conditions that cause asymmetry between
add_/rm_, create_/destroy_ etc. So I think this should be
DRM_MM_BUG_ON() too.

> +static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
>  {
> -	struct drm_mm *mm = hole_node->mm;
> -	u64 hole_start = drm_mm_hole_node_start(hole_node);
> -	u64 hole_end = drm_mm_hole_node_end(hole_node);
> -	u64 adj_start = hole_start;
> -	u64 adj_end = hole_end;
> +	struct rb_node *best = NULL;
> +	struct rb_node **link = &mm->holes_size.rb_node;
> +	while (*link) {
> +		struct rb_node *rb = *link;
> +		if (size <= rb_hole_size(rb))
> +			link = &rb->rb_left, best = rb;

Single assignment per line, by coding style. And
link = &(best = rb)->rb_left is not better :P

> -int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
> +int drm_mm_insert_node_in_range_generic(struct drm_mm * const mm,
> +					struct drm_mm_node * const node,

I really have no stance on the const's, I'll defer to higher powers on
this.

> +void drm_mm_remove_node(struct drm_mm_node *node)
>  {

<SNIP>

> -	return best;
> +	rm_hole(prev_node);
> +	add_hole(prev_node);

update_hole?
 
> @@ -799,7 +706,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
>  	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
>  		return false;
>  
> -	if (scan->flags == DRM_MM_CREATE_TOP)
> +	if (scan->flags == DRM_MM_INSERT_HIGH)

Flags are usually checked with & if somebody wants to add them later.
Otherwise you could call it "mode".

Somebody else could give this a glance too.

Regards, Joonas
Chris Wilson Dec. 15, 2016, 12:57 p.m. UTC | #2
On Thu, Dec 15, 2016 at 02:28:32PM +0200, Joonas Lahtinen wrote:
> On ma, 2016-12-12 at 11:53 +0000, Chris Wilson wrote:
> > @@ -799,7 +706,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
> >  	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
> >  		return false;
> >  
> > -	if (scan->flags == DRM_MM_CREATE_TOP)
> > +	if (scan->flags == DRM_MM_INSERT_HIGH)
> 
> Flags are usually checked with & if somebody wants to add them later.
> Otherwise you could call it "mode".

Once upon a time, they were intended to be flags. They have since
devolved back into a mode. The only suitable argument for my laziness
was what if I wanted to add a flag later!
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 00f46b0e076d..ce4f06ea0be2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -97,8 +97,7 @@  int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 {
 	struct amdgpu_gtt_mgr *mgr = man->priv;
 	struct drm_mm_node *node = mem->mm_node;
-	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
-	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+	unsigned int flags;
 	unsigned long fpfn, lpfn;
 	int r;
 
@@ -115,15 +114,14 @@  int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 	else
 		lpfn = man->size;
 
-	if (place && place->flags & TTM_PL_FLAG_TOPDOWN) {
-		sflags = DRM_MM_SEARCH_BELOW;
-		aflags = DRM_MM_CREATE_TOP;
-	}
+	flags = 0;
+	if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
+		flags = DRM_MM_INSERT_HIGH;
 
 	spin_lock(&mgr->lock);
 	r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages,
 						mem->page_alignment, 0,
-						fpfn, lpfn, sflags, aflags);
+						fpfn, lpfn, flags);
 	spin_unlock(&mgr->lock);
 
 	if (!r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index d710226a0fff..3278d53c7473 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -97,8 +97,7 @@  static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 	struct amdgpu_vram_mgr *mgr = man->priv;
 	struct drm_mm *mm = &mgr->mm;
 	struct drm_mm_node *nodes;
-	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
-	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+	unsigned int flags;
 	unsigned long lpfn, num_nodes, pages_per_node, pages_left;
 	unsigned i;
 	int r;
@@ -121,10 +120,9 @@  static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 	if (!nodes)
 		return -ENOMEM;
 
-	if (place->flags & TTM_PL_FLAG_TOPDOWN) {
-		sflags = DRM_MM_SEARCH_BELOW;
-		aflags = DRM_MM_CREATE_TOP;
-	}
+	flags = 0;
+	if (place->flags & TTM_PL_FLAG_TOPDOWN)
+		flags = DRM_MM_INSERT_HIGH;
 
 	pages_left = mem->num_pages;
 
@@ -135,13 +133,11 @@  static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
 
 		if (pages == pages_per_node)
 			alignment = pages_per_node;
-		else
-			sflags |= DRM_MM_SEARCH_BEST;
 
 		r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
 							alignment, 0,
 							place->fpfn, lpfn,
-							sflags, aflags);
+							flags);
 		if (unlikely(r))
 			goto error;
 
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 768087ddb046..ddf11ffba7a1 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -149,8 +149,8 @@  armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
 			return -ENOSPC;
 
 		mutex_lock(&priv->linear_lock);
-		ret = drm_mm_insert_node(&priv->linear, node, size, align,
-					 DRM_MM_SEARCH_DEFAULT);
+		ret = drm_mm_insert_node_generic(&priv->linear,
+						 node, size, align, 0, 0);
 		mutex_unlock(&priv->linear_lock);
 		if (ret) {
 			kfree(node);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index b061066066d8..3c3f09d88f87 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -91,19 +91,6 @@ 
  * some basic allocator dumpers for debugging.
  */
 
-static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-						u64 size,
-						u64 alignment,
-						unsigned long color,
-						enum drm_mm_search_flags flags);
-static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
-						u64 size,
-						u64 alignment,
-						unsigned long color,
-						u64 start,
-						u64 end,
-						enum drm_mm_search_flags flags);
-
 #ifdef CONFIG_DRM_DEBUG_MM
 #include <linux/stackdepot.h>
 
@@ -225,65 +212,46 @@  static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
 			    &drm_mm_interval_tree_augment);
 }
 
-static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
-				 struct drm_mm_node *node,
-				 u64 size, u64 alignment,
-				 unsigned long color,
-				 enum drm_mm_allocator_flags flags)
+#define RB_INSERT(root, member, expr) do { \
+	struct rb_node **link = &root.rb_node, *rb = NULL; \
+	u64 x = expr(node); \
+	while (*link) { \
+		rb = *link; \
+		if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
+			link = &rb->rb_left; \
+		else \
+			link = &rb->rb_right; \
+	} \
+	rb_link_node(&node->member, rb, link); \
+	rb_insert_color(&node->member, &root); \
+} while (0)
+
+#define HOLE_SIZE(NODE) ((NODE)->hole_size)
+#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
+
+static void add_hole(struct drm_mm_node *node)
 {
-	struct drm_mm *mm = hole_node->mm;
-	u64 hole_start = drm_mm_hole_node_start(hole_node);
-	u64 hole_end = drm_mm_hole_node_end(hole_node);
-	u64 adj_start = hole_start;
-	u64 adj_end = hole_end;
-
-	DRM_MM_BUG_ON(node->allocated);
-
-	if (mm->color_adjust)
-		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
-
-	if (flags & DRM_MM_CREATE_TOP)
-		adj_start = adj_end - size;
-
-	if (alignment) {
-		u64 rem;
-
-		div64_u64_rem(adj_start, alignment, &rem);
-		if (rem) {
-			if (flags & DRM_MM_CREATE_TOP)
-				adj_start -= rem;
-			else
-				adj_start += alignment - rem;
-		}
-	}
-
-	DRM_MM_BUG_ON(adj_start < hole_start);
-	DRM_MM_BUG_ON(adj_end > hole_end);
-
-	if (adj_start == hole_start) {
-		hole_node->hole_follows = 0;
-		list_del(&hole_node->hole_stack);
-	}
-
-	node->start = adj_start;
-	node->size = size;
-	node->mm = mm;
-	node->color = color;
-	node->allocated = 1;
+	struct drm_mm *mm = node->mm;
 
-	list_add(&node->node_list, &hole_node->node_list);
+	node->hole_size =
+		__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
+	DRM_MM_BUG_ON(!node->hole_size);
 
-	drm_mm_interval_tree_add_node(hole_node, node);
+	RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
+	RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
 
-	DRM_MM_BUG_ON(node->start + node->size > adj_end);
+	list_add(&node->hole_stack, &mm->hole_stack);
+}
 
-	node->hole_follows = 0;
-	if (__drm_mm_hole_node_start(node) < hole_end) {
-		list_add(&node->hole_stack, &mm->hole_stack);
-		node->hole_follows = 1;
-	}
+static void rm_hole(struct drm_mm_node *node)
+{
+	if (!node->hole_size)
+		return;
 
-	save_stack(node);
+	list_del(&node->hole_stack);
+	rb_erase(&node->rb_hole_size, &node->mm->holes_size);
+	rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
+	node->hole_size = 0;
 }
 
 /**
@@ -322,8 +290,8 @@  int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 		hole = list_entry(__drm_mm_nodes(mm), typeof(*hole), node_list);
 	}
 
-	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
-	if (!hole->hole_follows)
+	hole = list_prev_entry(hole, node_list);
+	if (!hole->hole_size)
 		return -ENOSPC;
 
 	adj_start = hole_start = __drm_mm_hole_node_start(hole);
@@ -336,22 +304,17 @@  int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 		return -ENOSPC;
 
 	node->mm = mm;
-	node->allocated = 1;
 
 	list_add(&node->node_list, &hole->node_list);
-
 	drm_mm_interval_tree_add_node(hole, node);
+	node->allocated = true;
+	node->hole_size = 0;
 
-	if (node->start == hole_start) {
-		hole->hole_follows = 0;
-		list_del(&hole->hole_stack);
-	}
-
-	node->hole_follows = 0;
-	if (end != hole_end) {
-		list_add(&node->hole_stack, &mm->hole_stack);
-		node->hole_follows = 1;
-	}
+	rm_hole(hole);
+	if (node->start > hole_start)
+		add_hole(hole);
+	if (end < hole_end)
+		add_hole(node);
 
 	save_stack(node);
 
@@ -359,104 +322,93 @@  int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 }
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
-/**
- * drm_mm_insert_node_generic - search for space and insert @node
- * @mm: drm_mm to allocate from
- * @node: preallocate node to insert
- * @size: size of the allocation
- * @alignment: alignment of the allocation
- * @color: opaque tag value to use for this node
- * @sflags: flags to fine-tune the allocation search
- * @aflags: flags to fine-tune the allocation behavior
- *
- * The preallocated node must be cleared to 0.
- *
- * Returns:
- * 0 on success, -ENOSPC if there's no suitable hole.
- */
-int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
-			       u64 size, u64 alignment,
-			       unsigned long color,
-			       enum drm_mm_search_flags sflags,
-			       enum drm_mm_allocator_flags aflags)
+static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
 {
-	struct drm_mm_node *hole_node;
-
-	if (WARN_ON(size == 0))
-		return -EINVAL;
+	return rb ? rb_entry(rb, struct drm_mm_node, rb_hole_size) : NULL;
+}
 
-	hole_node = drm_mm_search_free_generic(mm, size, alignment,
-					       color, sflags);
-	if (!hole_node)
-		return -ENOSPC;
+static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
+{
+	return rb ? rb_entry(rb, struct drm_mm_node, rb_hole_addr) : NULL;
+}
 
-	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
-	return 0;
+static inline u64 rb_hole_size(struct rb_node *rb)
+{
+	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
 }
-EXPORT_SYMBOL(drm_mm_insert_node_generic);
-
-static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
-				       struct drm_mm_node *node,
-				       u64 size, u64 alignment,
-				       unsigned long color,
-				       u64 start, u64 end,
-				       enum drm_mm_allocator_flags flags)
+
+static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
 {
-	struct drm_mm *mm = hole_node->mm;
-	u64 hole_start = drm_mm_hole_node_start(hole_node);
-	u64 hole_end = drm_mm_hole_node_end(hole_node);
-	u64 adj_start = hole_start;
-	u64 adj_end = hole_end;
+	struct rb_node *best = NULL;
+	struct rb_node **link = &mm->holes_size.rb_node;
+	while (*link) {
+		struct rb_node *rb = *link;
+		if (size <= rb_hole_size(rb))
+			link = &rb->rb_left, best = rb;
+		else
+			link = &rb->rb_right;
+	}
+	return rb_hole_size_to_node(best);
+}
 
-	DRM_MM_BUG_ON(!hole_node->hole_follows || node->allocated);
+static struct drm_mm_node *low_hole(struct drm_mm *mm, u64 addr)
+{
+	struct drm_mm_node *node = NULL;
+	struct rb_node **link = &mm->holes_addr.rb_node;
+	while (*link) {
+		node = rb_hole_addr_to_node(*link);
+		if (addr == __drm_mm_hole_node_start(node))
+			return node;
 
-	if (adj_start < start)
-		adj_start = start;
-	if (adj_end > end)
-		adj_end = end;
+		if (addr < __drm_mm_hole_node_start(node))
+			link = &node->rb_hole_addr.rb_left;
+		else
+			link = &node->rb_hole_addr.rb_right;
+	}
+	return node;
+}
 
-	if (mm->color_adjust)
-		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+static struct drm_mm_node *
+first_hole(struct drm_mm *mm, u64 start, u64 end, u64 size, unsigned int flags)
+{
+	if (RB_EMPTY_ROOT(&mm->holes_size))
+		return NULL;
 
-	if (flags & DRM_MM_CREATE_TOP)
-		adj_start = adj_end - size;
+	switch (flags) {
+	default:
+	case DRM_MM_INSERT_BEST:
+		return best_hole(mm, size);
 
-	if (alignment) {
-		u64 rem;
+	case DRM_MM_INSERT_LOW:
+		return low_hole(mm, start);
 
-		div64_u64_rem(adj_start, alignment, &rem);
-		if (rem) {
-			if (flags & DRM_MM_CREATE_TOP)
-				adj_start -= rem;
-			else
-				adj_start += alignment - rem;
-		}
-	}
+	case DRM_MM_INSERT_HIGH:
+		return rb_hole_addr_to_node(rb_last(&mm->holes_addr));
 
-	if (adj_start == hole_start) {
-		hole_node->hole_follows = 0;
-		list_del(&hole_node->hole_stack);
+	case DRM_MM_INSERT_EVICT:
+		return list_first_entry_or_null(&mm->hole_stack,
+						struct drm_mm_node,
+						hole_stack);
 	}
+}
 
-	node->start = adj_start;
-	node->size = size;
-	node->mm = mm;
-	node->color = color;
-	node->allocated = 1;
-
-	list_add(&node->node_list, &hole_node->node_list);
+static struct drm_mm_node *
+next_hole(struct drm_mm *mm, struct drm_mm_node *node, unsigned int flags)
+{
+	switch (flags) {
+	default:
+	case DRM_MM_INSERT_BEST:
+		return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
 
-	drm_mm_interval_tree_add_node(hole_node, node);
+	case DRM_MM_INSERT_LOW:
+		return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
 
-	DRM_MM_BUG_ON(node->start < start);
-	DRM_MM_BUG_ON(node->start < adj_start);
-	DRM_MM_BUG_ON(node->start + node->size > adj_end);
-	DRM_MM_BUG_ON(node->start + node->size > end);
+	case DRM_MM_INSERT_HIGH:
+		return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
 
-	node->hole_follows = 0;
-	if (__drm_mm_hole_node_start(node) < hole_end) {
-		list_add(&node->hole_stack, &mm->hole_stack);
-		node->hole_follows = 1;
+	case DRM_MM_INSERT_EVICT:
+		node = list_next_entry(node, hole_stack);
+		return &node->hole_stack == &mm->hole_stack ? NULL : node;
 	}
 
 	save_stack(node);
@@ -479,177 +431,127 @@  static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
  * Returns:
  * 0 on success, -ENOSPC if there's no suitable hole.
  */
-int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+int drm_mm_insert_node_in_range_generic(struct drm_mm * const mm,
+					struct drm_mm_node * const node,
 					u64 size, u64 alignment,
 					unsigned long color,
 					u64 start, u64 end,
-					enum drm_mm_search_flags sflags,
-					enum drm_mm_allocator_flags aflags)
+					unsigned int flags)
 {
-	struct drm_mm_node *hole_node;
+	struct drm_mm_node *hole;
+	u64 alignment_mask;
 
 	if (WARN_ON(size == 0))
 		return -EINVAL;
 
-	hole_node = drm_mm_search_free_in_range_generic(mm,
-							size, alignment, color,
-							start, end, sflags);
-	if (!hole_node)
+	if (end - start < size)
 		return -ENOSPC;
 
-	drm_mm_insert_helper_range(hole_node, node,
-				   size, alignment, color,
-				   start, end, aflags);
-	return 0;
-}
-EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
-
-/**
- * drm_mm_remove_node - Remove a memory node from the allocator.
- * @node: drm_mm_node to remove
- *
- * This just removes a node from its drm_mm allocator. The node does not need to
- * be cleared again before it can be re-inserted into this or any other drm_mm
- * allocator. It is a bug to call this function on a un-allocated node.
- */
-void drm_mm_remove_node(struct drm_mm_node *node)
-{
-	struct drm_mm *mm = node->mm;
-	struct drm_mm_node *prev_node;
-
-	DRM_MM_BUG_ON(!node->allocated);
-	DRM_MM_BUG_ON(node->scanned_block);
-
-	prev_node =
-	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
-
-	if (node->hole_follows) {
-		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
-			      __drm_mm_hole_node_end(node));
-		list_del(&node->hole_stack);
-	} else
-		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
-			      __drm_mm_hole_node_end(node));
-
+	if (alignment <= 1)
+		alignment = 0;
 
-	if (!prev_node->hole_follows) {
-		prev_node->hole_follows = 1;
-		list_add(&prev_node->hole_stack, &mm->hole_stack);
-	} else
-		list_move(&prev_node->hole_stack, &mm->hole_stack);
+	alignment_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
+	for (hole = first_hole(mm, start, end, size, flags); hole;
+	     hole = next_hole(mm, hole, flags)) {
+		u64 hole_start = __drm_mm_hole_node_start(hole);
+		u64 hole_end = hole_start + hole->hole_size;
+		u64 adj_start, adj_end;
+		u64 col_start, col_end;
 
-	drm_mm_interval_tree_remove(node, &mm->interval_tree);
-	list_del(&node->node_list);
-	node->allocated = 0;
-}
-EXPORT_SYMBOL(drm_mm_remove_node);
+		if (flags == DRM_MM_INSERT_LOW && hole_start >= end)
+			break;
 
-static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
-{
-	if (end - start < size)
-		return 0;
+		if (flags == DRM_MM_INSERT_HIGH && hole_end <= start)
+			break;
 
-	if (alignment) {
-		u64 rem;
+		col_start = hole_start;
+		col_end = hole_end;
+		if (mm->color_adjust)
+			mm->color_adjust(hole, color, &col_start, &col_end);
 
-		div64_u64_rem(start, alignment, &rem);
-		if (rem)
-			start += alignment - rem;
-	}
+		adj_start = max(col_start, start);
+		adj_end = min(col_end, end);
 
-	return end >= start + size;
-}
+		if (adj_end <= adj_start || adj_end - adj_start < size)
+			continue;
 
-static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-						      u64 size,
-						      u64 alignment,
-						      unsigned long color,
-						      enum drm_mm_search_flags flags)
-{
-	struct drm_mm_node *entry;
-	struct drm_mm_node *best;
-	u64 adj_start;
-	u64 adj_end;
-	u64 best_size;
+		if (flags == DRM_MM_INSERT_HIGH)
+			adj_start = adj_end - size;
 
-	DRM_MM_BUG_ON(mm->scan_active);
+		if (alignment) {
+			u64 rem;
 
-	best = NULL;
-	best_size = ~0UL;
+			if (alignment_mask)
+				rem = adj_start & alignment_mask;
+			else
+				div64_u64_rem(adj_start, alignment, &rem);
+			if (rem) {
+				adj_start -= rem;
+				if (flags != DRM_MM_INSERT_HIGH)
+					adj_start += alignment;
 
-	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
-			       flags & DRM_MM_SEARCH_BELOW) {
-		u64 hole_size = adj_end - adj_start;
+				if (adj_start < col_start ||
+				    col_end - adj_start < size)
+					continue;
 
-		if (mm->color_adjust) {
-			mm->color_adjust(entry, color, &adj_start, &adj_end);
-			if (adj_end <= adj_start)
-				continue;
+				if (adj_end <= adj_start ||
+				    adj_end - adj_start < size)
+					continue;
+			}
 		}
 
-		if (!check_free_hole(adj_start, adj_end, size, alignment))
-			continue;
+		node->mm = mm;
+		node->size = size;
+		node->start = adj_start;
+		node->color = color;
+		node->hole_size = 0;
 
-		if (!(flags & DRM_MM_SEARCH_BEST))
-			return entry;
+		list_add(&node->node_list, &hole->node_list);
+		drm_mm_interval_tree_add_node(hole, node);
+		node->allocated = true;
 
-		if (hole_size < best_size) {
-			best = entry;
-			best_size = hole_size;
-		}
+		rm_hole(hole);
+		if (adj_start > hole_start)
+			add_hole(hole);
+		if (adj_start + size < hole_end)
+			add_hole(node);
+
+		save_stack(node);
+		return 0;
 	}
 
-	return best;
+	return -ENOSPC;
 }
+EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
 
-static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
-							u64 size,
-							u64 alignment,
-							unsigned long color,
-							u64 start,
-							u64 end,
-							enum drm_mm_search_flags flags)
+/**
+ * drm_mm_remove_node - Remove a memory node from the allocator.
+ * @node: drm_mm_node to remove
+ *
+ * This just removes a node from its drm_mm allocator. The node does not need to
+ * be cleared again before it can be re-inserted into this or any other drm_mm
+ * allocator. It is a bug to call this function on a un-allocated node.
+ */
+void drm_mm_remove_node(struct drm_mm_node *node)
 {
-	struct drm_mm_node *entry;
-	struct drm_mm_node *best;
-	u64 adj_start;
-	u64 adj_end;
-	u64 best_size;
-
-	DRM_MM_BUG_ON(mm->scan_active);
-
-	best = NULL;
-	best_size = ~0UL;
-
-	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
-			       flags & DRM_MM_SEARCH_BELOW) {
-		u64 hole_size = adj_end - adj_start;
-
-		if (adj_start < start)
-			adj_start = start;
-		if (adj_end > end)
-			adj_end = end;
+	struct drm_mm *mm = node->mm;
+	struct drm_mm_node *prev_node;
 
-		if (mm->color_adjust) {
-			mm->color_adjust(entry, color, &adj_start, &adj_end);
-			if (adj_end <= adj_start)
-				continue;
-		}
+	DRM_MM_BUG_ON(!node->allocated);
+	DRM_MM_BUG_ON(node->scanned_block);
 
-		if (!check_free_hole(adj_start, adj_end, size, alignment))
-			continue;
+	prev_node = list_prev_entry(node, node_list);
 
-		if (!(flags & DRM_MM_SEARCH_BEST))
-			return entry;
+	rm_hole(node);
 
-		if (hole_size < best_size) {
-			best = entry;
-			best_size = hole_size;
-		}
-	}
+	drm_mm_interval_tree_remove(node, &mm->interval_tree);
+	list_del(&node->node_list);
+	node->allocated = false;
 
-	return best;
+	rm_hole(prev_node);
+	add_hole(prev_node);
 }
+EXPORT_SYMBOL(drm_mm_remove_node);
 
 /**
  * drm_mm_replace_node - move an allocation from @old to @new
@@ -664,18 +566,23 @@  void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 {
 	DRM_MM_BUG_ON(!old->allocated);
 
+	*new = *old;
+
 	list_replace(&old->node_list, &new->node_list);
-	list_replace(&old->hole_stack, &new->hole_stack);
 	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
-	new->hole_follows = old->hole_follows;
-	new->mm = old->mm;
-	new->start = old->start;
-	new->size = old->size;
-	new->color = old->color;
-	new->__subtree_last = old->__subtree_last;
-
-	old->allocated = 0;
-	new->allocated = 1;
+
+	if (old->hole_size) {
+		list_replace(&old->hole_stack, &new->hole_stack);
+		rb_replace_node(&old->rb_hole_size,
+				&new->rb_hole_size,
+				&old->mm->holes_size);
+		rb_replace_node(&old->rb_hole_addr,
+				&new->rb_hole_addr,
+				&old->mm->holes_addr);
+	}
+
+	old->allocated = false;
+	new->allocated = true;
 }
 EXPORT_SYMBOL(drm_mm_replace_node);
 
@@ -799,7 +706,7 @@  bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
 	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
 		return false;
 
-	if (scan->flags == DRM_MM_CREATE_TOP)
+	if (scan->flags == DRM_MM_INSERT_HIGH)
 		adj_start = adj_end - scan->size;
 
 	if (scan->alignment) {
@@ -811,7 +718,7 @@  bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
 			div64_u64_rem(adj_start, scan->alignment, &rem);
 		if (rem) {
 			adj_start -= rem;
-			if (scan->flags != DRM_MM_CREATE_TOP)
+			if (scan->flags != DRM_MM_INSERT_HIGH)
 				adj_start += scan->alignment;
 			if (adj_start < max(col_start, scan->range_start) ||
 			    max(col_end, scan->range_end) - adj_start < scan->size)
@@ -909,21 +816,22 @@  EXPORT_SYMBOL(drm_mm_scan_color_evict);
  */
 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
 {
+	mm->color_adjust = NULL;
+
 	INIT_LIST_HEAD(&mm->hole_stack);
-	mm->scan_active = 0;
+	mm->interval_tree = RB_ROOT;
+	mm->holes_size = RB_ROOT;
+	mm->holes_addr = RB_ROOT;
 
 	/* Clever trick to avoid a special case in the free hole tracking. */
 	INIT_LIST_HEAD(&mm->head_node.node_list);
-	mm->head_node.allocated = 0;
-	mm->head_node.hole_follows = 1;
+	mm->head_node.allocated = false;
 	mm->head_node.mm = mm;
 	mm->head_node.start = start + size;
-	mm->head_node.size = start - mm->head_node.start;
-	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+	mm->head_node.size = -size;
+	add_hole(&mm->head_node);
 
-	mm->interval_tree = RB_ROOT;
-
-	mm->color_adjust = NULL;
+	mm->scan_active = 0;
 }
 EXPORT_SYMBOL(drm_mm_init);
 
@@ -945,18 +853,16 @@  EXPORT_SYMBOL(drm_mm_takedown);
 static u64 drm_mm_debug_hole(const struct drm_mm_node *entry,
 			     const char *prefix)
 {
-	u64 hole_start, hole_end, hole_size;
+	u64 start, size;
 
-	if (entry->hole_follows) {
-		hole_start = drm_mm_hole_node_start(entry);
-		hole_end = drm_mm_hole_node_end(entry);
-		hole_size = hole_end - hole_start;
-		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
-			 hole_end, hole_size);
-		return hole_size;
+	size = entry->hole_size;
+	if (size) {
+		start = drm_mm_hole_node_start(entry);
+		pr_debug("%s %#llx-%#llx: %llu: free\n",
+			 prefix, start, start + size, size);
 	}
 
-	return 0;
+	return size;
 }
 
 /**
@@ -989,7 +895,7 @@  static u64 drm_mm_dump_hole(struct seq_file *m, const struct drm_mm_node *entry)
 {
 	u64 hole_start, hole_end, hole_size;
 
-	if (entry->hole_follows) {
+	if (entry->hole_size) {
 		hole_start = drm_mm_hole_node_start(entry);
 		hole_end = drm_mm_hole_node_end(entry);
 		hole_size = hole_end - hole_start;
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 20cc33d1bfc1..d9100b565198 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -212,8 +212,7 @@  int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
 		goto out_unlock;
 	}
 
-	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
-				 pages, 0, DRM_MM_SEARCH_DEFAULT);
+	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, pages);
 	if (ret)
 		goto out_unlock;
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 2dae3169ce48..2052fe990e26 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -107,6 +107,7 @@  static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
 				   struct drm_mm_node *node, size_t size)
 {
 	struct etnaviv_vram_mapping *free = NULL;
+	unsigned int flags = DRM_MM_INSERT_LOW;
 	int ret;
 
 	lockdep_assert_held(&mmu->lock);
@@ -117,9 +118,9 @@  static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
 		struct list_head list;
 		bool found;
 
-		ret = drm_mm_insert_node_in_range(&mmu->mm, node,
-			size, 0, mmu->last_iova, ~0UL,
-			DRM_MM_SEARCH_DEFAULT);
+		ret = drm_mm_insert_node_in_range(&mmu->mm, node, size, 0,
+						  mmu->last_iova, U64_MAX,
+						  flags);
 
 		if (ret != -ENOSPC)
 			break;
@@ -187,6 +188,8 @@  static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
 			list_del_init(&m->scan_node);
 		}
 
+		flags = DRM_MM_INSERT_EVICT;
+
 		/*
 		 * We removed enough mappings so that the new allocation will
 		 * succeed.  Ensure that the MMU will be flushed before the
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 7d33b607bc89..beccc4396a8a 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -48,22 +48,20 @@  static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct drm_i915_private *dev_priv = gvt->dev_priv;
-	u32 alloc_flag, search_flag;
 	u64 start, end, size;
+	unsigned int flags;
 	struct drm_mm_node *node;
 	int retried = 0;
 	int ret;
 
 	if (high_gm) {
-		search_flag = DRM_MM_SEARCH_BELOW;
-		alloc_flag = DRM_MM_CREATE_TOP;
+		flags = DRM_MM_INSERT_HIGH;
 		node = &vgpu->gm.high_gm_node;
 		size = vgpu_hidden_sz(vgpu);
 		start = gvt_hidden_gmadr_base(gvt);
 		end = gvt_hidden_gmadr_end(gvt);
 	} else {
-		search_flag = DRM_MM_SEARCH_DEFAULT;
-		alloc_flag = DRM_MM_CREATE_DEFAULT;
+		flags = DRM_MM_INSERT_LOW;
 		node = &vgpu->gm.low_gm_node;
 		size = vgpu_aperture_sz(vgpu);
 		start = gvt_aperture_gmadr_base(gvt);
@@ -75,8 +73,7 @@  static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
 	ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
 						  node, size, 4096,
 						  I915_COLOR_UNEVICTABLE,
-						  start, end, search_flag,
-						  alloc_flag);
+						  start, end, flags);
 	if (ret) {
 		ret = i915_gem_evict_something(&dev_priv->ggtt.base,
 					       size, 4096,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 36183945e61a..ab92cab44bb0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -73,8 +73,7 @@  insert_mappable_node(struct i915_ggtt *ggtt,
 						   size, 0,
 						   I915_COLOR_UNEVICTABLE,
 						   0, ggtt->mappable_end,
-						   DRM_MM_SEARCH_DEFAULT,
-						   DRM_MM_CREATE_DEFAULT);
+						   DRM_MM_INSERT_LOW);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e4987e354311..49a2c492e003 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -109,6 +109,7 @@  i915_gem_evict_something(struct i915_address_space *vm,
 	}, **phase;
 	struct i915_vma *vma, *next;
 	struct drm_mm_node *node;
+	unsigned int mmflags;
 	int ret;
 
 	lockdep_assert_held(&vm->i915->drm.struct_mutex);
@@ -127,10 +128,14 @@  i915_gem_evict_something(struct i915_address_space *vm,
 	 * On each list, the oldest objects lie at the HEAD with the freshest
 	 * object on the TAIL.
 	 */
+	mmflags = 0;
+	if (flags & PIN_HIGH)
+		mmflags = DRM_MM_INSERT_HIGH;
+	if (flags & PIN_MAPPABLE)
+		mmflags = DRM_MM_INSERT_LOW;
 	drm_mm_scan_init_with_range(&scan, &vm->mm,
 				    min_size, alignment, cache_level,
-				    start, end,
-				    flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0);
+				    start, end, mmflags);
 
 	if (flags & PIN_NONBLOCK)
 		phases[1] = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d665a33229bd..530f7b49ebc3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -440,8 +440,7 @@  static void *reloc_iomap(struct drm_i915_gem_object *obj,
 				(&ggtt->base.mm, &cache->node,
 				 4096, 0, I915_COLOR_UNEVICTABLE,
 				 0, ggtt->mappable_end,
-				 DRM_MM_SEARCH_DEFAULT,
-				 DRM_MM_CREATE_DEFAULT);
+				 DRM_MM_INSERT_LOW);
 			if (ret) /* no inactive aperture space, use cpu reloc */
 				return NULL;
 		} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2083c899ab78..0005a5e7fcd4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2074,7 +2074,7 @@  static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 						  GEN6_PD_SIZE, GEN6_PD_ALIGN,
 						  I915_COLOR_UNEVICTABLE,
 						  0, ggtt->base.total,
-						  DRM_MM_TOPDOWN);
+						  DRM_MM_INSERT_HIGH);
 	if (ret == -ENOSPC && !retried) {
 		ret = i915_gem_evict_something(&ggtt->base,
 					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
@@ -2755,7 +2755,7 @@  int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 						  4096, 0,
 						  I915_COLOR_UNEVICTABLE,
 						  0, ggtt->mappable_end,
-						  0, 0);
+						  0);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index efc0e748ef89..37eb514ecec8 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -63,8 +63,7 @@  int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
 
 	mutex_lock(&dev_priv->mm.stolen_lock);
 	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
-					  alignment, start, end,
-					  DRM_MM_SEARCH_DEFAULT);
+					  alignment, start, end, 0);
 	mutex_unlock(&dev_priv->mm.stolen_lock);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ca62a3371d94..97e8f2a8ab03 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -322,11 +322,11 @@  bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
 	GEM_BUG_ON(list_empty(&node->node_list));
 
 	other = list_prev_entry(node, node_list);
-	if (color_differs(other, cache_level) && !other->hole_follows)
+	if (color_differs(other, cache_level) && !other->hole_size)
 		return false;
 
 	other = list_next_entry(node, node_list);
-	if (color_differs(other, cache_level) && !node->hole_follows)
+	if (color_differs(other, cache_level) && !node->hole_size)
 		return false;
 
 	return true;
@@ -410,15 +410,13 @@  i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 				goto err_unpin;
 		}
 	} else {
-		u32 search_flag, alloc_flag;
-
-		if (flags & PIN_HIGH) {
-			search_flag = DRM_MM_SEARCH_BELOW;
-			alloc_flag = DRM_MM_CREATE_TOP;
-		} else {
-			search_flag = DRM_MM_SEARCH_DEFAULT;
-			alloc_flag = DRM_MM_CREATE_DEFAULT;
-		}
+		unsigned int mmflags;
+
+		mmflags = 0;
+		if (flags & PIN_HIGH)
+			mmflags = DRM_MM_INSERT_HIGH;
+		if (flags & PIN_MAPPABLE)
+			mmflags = DRM_MM_INSERT_LOW;
 
 		/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
 		 * so we know that we always have a minimum alignment of 4096.
@@ -435,15 +433,14 @@  i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 							  size, alignment,
 							  obj->cache_level,
 							  start, end,
-							  search_flag,
-							  alloc_flag);
+							  mmflags);
 		if (ret) {
 			ret = i915_gem_evict_something(vma->vm, size, alignment,
 						       obj->cache_level,
 						       start, end,
 						       flags);
 			if (ret == 0) {
-				search_flag = DRM_MM_SEARCH_DEFAULT;
+				mmflags = DRM_MM_INSERT_EVICT;
 				goto search_free;
 			}
 
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index cd06cfd94687..412669062cb7 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -54,8 +54,7 @@  static struct page **get_pages_vram(struct drm_gem_object *obj,
 	if (!p)
 		return ERR_PTR(-ENOMEM);
 
-	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
-			npages, 0, DRM_MM_SEARCH_DEFAULT);
+	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
 	if (ret) {
 		drm_free_large(p);
 		return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index a311d26ccb21..b654eca7636a 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -45,8 +45,7 @@  msm_gem_map_vma(struct msm_gem_address_space *aspace,
 	if (WARN_ON(drm_mm_node_allocated(&vma->node)))
 		return 0;
 
-	ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages,
-			0, DRM_MM_SEARCH_DEFAULT);
+	ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 73353f87f46a..f2f802c60e4c 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -268,8 +268,7 @@  static int __igt_insert(int count, u64 size, bool replace)
 		int err;
 
 		node = memset(replace ? &tmp : &nodes[n], 0, sizeof(*node));
-		err = drm_mm_insert_node(&mm, node, size, 0,
-					 DRM_MM_SEARCH_DEFAULT);
+		err = drm_mm_insert_node(&mm, node, size);
 		if (err) {
 			pr_err("insert failed, step %d, start %llu\n",
 			       n, nodes[n].start);
@@ -303,8 +302,7 @@  static int __igt_insert(int count, u64 size, bool replace)
 		struct drm_mm_node tmp;
 
 		memset(&tmp, 0, sizeof(tmp));
-		if (!drm_mm_insert_node(&mm, &tmp, size, 0,
-					DRM_MM_SEARCH_DEFAULT)) {
+		if (!drm_mm_insert_node(&mm, &tmp, size)) {
 			drm_mm_remove_node(&tmp);
 			pr_err("impossible insert succeeded, step %d, start %llu\n",
 			       n, tmp.start);
@@ -326,7 +324,7 @@  static int __igt_insert(int count, u64 size, bool replace)
 			goto out;
 		}
 
-		if (node->hole_follows) {
+		if (node->hole_size) {
 			pr_err("node %d is followed by a hole!\n", n);
 			goto out;
 		}
@@ -349,8 +347,7 @@  static int __igt_insert(int count, u64 size, bool replace)
 		int err;
 
 		drm_mm_remove_node(&nodes[n]);
-		err = drm_mm_insert_node(&mm, &nodes[n], size, 0,
-					 DRM_MM_SEARCH_DEFAULT);
+		err = drm_mm_insert_node(&mm, &nodes[n], size);
 		if (err) {
 			pr_err("reinsert failed, step %d\n", n);
 			ret = err;
@@ -377,8 +374,7 @@  static int __igt_insert(int count, u64 size, bool replace)
 			int err;
 
 			node = &nodes[order[(o + m) % count]];
-			err = drm_mm_insert_node(&mm, node, size, 0,
-						 DRM_MM_SEARCH_DEFAULT);
+			err = drm_mm_insert_node(&mm, node, size);
 			if (err) {
 				pr_err("insert failed, step %d, start %llu\n",
 				       n, node->start);
@@ -393,8 +389,7 @@  static int __igt_insert(int count, u64 size, bool replace)
 			struct drm_mm_node tmp;
 
 			memset(&tmp, 0, sizeof(tmp));
-			if (!drm_mm_insert_node(&mm, &tmp, size, 0,
-						DRM_MM_SEARCH_DEFAULT)) {
+			if (!drm_mm_insert_node(&mm, &tmp, size)) {
 				drm_mm_remove_node(&tmp);
 				pr_err("impossible insert succeeded, start %llu\n",
 				       tmp.start);
@@ -416,7 +411,7 @@  static int __igt_insert(int count, u64 size, bool replace)
 				goto out;
 			}
 
-			if (node->hole_follows) {
+			if (node->hole_size) {
 				pr_err("node %d is followed by a hole!\n", m);
 				goto out;
 			}
@@ -505,8 +500,7 @@  static int __igt_insert_range(int count, u64 size, u64 start, u64 end)
 	}
 
 	for (n = 0; n < count; n++) {
-		err = drm_mm_insert_node(&mm, &nodes[n], size, 0,
-					 DRM_MM_SEARCH_DEFAULT);
+		err = drm_mm_insert_node(&mm, &nodes[n], size);
 		if (err) {
 			pr_err("insert failed, step %d, start %llu\n",
 			       n, nodes[n].start);
@@ -523,7 +517,7 @@  static int __igt_insert_range(int count, u64 size, u64 start, u64 end)
 		if (!drm_mm_insert_node_in_range(&mm, &tmp,
 						 size, 0,
 						 start, end,
-						 DRM_MM_SEARCH_DEFAULT)) {
+						 0)) {
 			drm_mm_remove_node(&tmp);
 			pr_err("impossible insert succeeded, step %d, start %llu\n",
 			       n, tmp.start);
@@ -553,7 +547,7 @@  static int __igt_insert_range(int count, u64 size, u64 start, u64 end)
 			goto out;
 		}
 
-		if (node->hole_follows) {
+		if (node->hole_size) {
 			pr_err("node %d is followed by a hole!\n", n);
 			goto out;
 		}
@@ -568,7 +562,7 @@  static int __igt_insert_range(int count, u64 size, u64 start, u64 end)
 		drm_mm_remove_node(&nodes[n]);
 		err = drm_mm_insert_node_in_range(&mm, &nodes[n], size, 0,
 						  start, end,
-						  DRM_MM_SEARCH_DEFAULT);
+						  0);
 		if (err) {
 			pr_err("reinsert failed, step %d\n", n);
 			ret = err;
@@ -589,7 +583,7 @@  static int __igt_insert_range(int count, u64 size, u64 start, u64 end)
 	for (n = start_n; n <= end_n; n++) {
 		err = drm_mm_insert_node_in_range(&mm, &nodes[n], size, 0,
 						  start, end,
-						  DRM_MM_SEARCH_DEFAULT);
+						  0);
 		if (err) {
 			pr_err("reinsert failed, step %d\n", n);
 			ret = err;
@@ -619,7 +613,7 @@  static int __igt_insert_range(int count, u64 size, u64 start, u64 end)
 			goto out;
 		}
 
-		if (node->hole_follows) {
+		if (node->hole_size) {
 			pr_err("node %d is followed by a hole!\n", n);
 			goto out;
 		}
@@ -689,9 +683,7 @@  static int igt_align(void *ignored)
 		}
 
 		size = drm_next_prime_number(prime);
-		err = drm_mm_insert_node_generic(&mm, node, size, prime, 0,
-						 DRM_MM_SEARCH_DEFAULT,
-						 DRM_MM_CREATE_DEFAULT);
+		err = drm_mm_insert_node_generic(&mm, node, size, prime, 0, 0);
 		if (err) {
 			pr_err("insert failed with alignment=%d", prime);
 			ret = err;
@@ -736,9 +728,7 @@  static int igt_align_pot(int max)
 
 		align = BIT_ULL(bit);
 		size = BIT_ULL(bit-1) + 1;
-		err = drm_mm_insert_node_generic(&mm, node, size, align, 0,
-						 DRM_MM_SEARCH_DEFAULT,
-						 DRM_MM_CREATE_DEFAULT);
+		err = drm_mm_insert_node_generic(&mm, node, size, align, 0, 0);
 		if (err) {
 			pr_err("insert failed with alignment=%llx [%d]",
 			       align, bit);
@@ -839,8 +829,7 @@  static int igt_evict(void *ignored)
 	for (n = 0; n < size; n++) {
 		int err;
 
-		err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
-					 DRM_MM_SEARCH_DEFAULT);
+		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
 		if (err) {
 			pr_err("insert failed, step %d\n", n);
 			ret = err;
@@ -924,8 +913,8 @@  static int igt_evict(void *ignored)
 			drm_mm_remove_node(&e->node);
 
 		memset(&tmp, 0, sizeof(tmp));
-		err = drm_mm_insert_node(&mm, &tmp, nsize, n,
-					 DRM_MM_SEARCH_DEFAULT);
+		err = drm_mm_insert_node_generic(&mm, &tmp, nsize, n, 0,
+						 DRM_MM_INSERT_EVICT);
 		if (err) {
 			pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
 			       nsize, n);
@@ -935,9 +924,9 @@  static int igt_evict(void *ignored)
 			goto out;
 		}
 
-		if ((int)tmp.start % n || tmp.size != nsize || tmp.hole_follows) {
+		if ((int)tmp.start % n || tmp.size != nsize || tmp.hole_size) {
 			pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%d], start=%llx, hole-follows?=%d\n",
-			       tmp.size, nsize, n, (int)tmp.start % n, tmp.start, tmp.hole_follows);
+			       tmp.size, nsize, n, (int)tmp.start % n, tmp.start, !!tmp.hole_size);
 
 			drm_mm_remove_node(&tmp);
 			goto out;
@@ -985,8 +974,8 @@  static int igt_evict(void *ignored)
 			drm_mm_remove_node(&e->node);
 
 		memset(&tmp, 0, sizeof(tmp));
-		err = drm_mm_insert_node(&mm, &tmp, nsize, n,
-					 DRM_MM_SEARCH_DEFAULT);
+		err = drm_mm_insert_node_generic(&mm, &tmp, nsize, n, 0,
+						 DRM_MM_INSERT_EVICT);
 		if (err) {
 			pr_err("Failed to insert into eviction hole: size=%d, align=%d (prime)\n",
 			       nsize, n);
@@ -996,9 +985,9 @@  static int igt_evict(void *ignored)
 			goto out;
 		}
 
-		if ((int)tmp.start % n || tmp.size != nsize || tmp.hole_follows) {
+		if ((int)tmp.start % n || tmp.size != nsize || tmp.hole_size) {
 			pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%d] (prime), start=%llx, hole-follows?=%d\n",
-			       tmp.size, nsize, n, (int)tmp.start % n, tmp.start, tmp.hole_follows);
+			       tmp.size, nsize, n, (int)tmp.start % n, tmp.start, !!tmp.hole_size);
 
 			drm_mm_remove_node(&tmp);
 			goto out;
@@ -1058,8 +1047,7 @@  static int igt_evict_range(void *ignored)
 	for (n = 0; n < size; n++) {
 		int err;
 
-		err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
-					 DRM_MM_SEARCH_DEFAULT);
+		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
 		if (err) {
 			pr_err("insert failed, step %d\n", n);
 			ret = err;
@@ -1101,7 +1089,7 @@  static int igt_evict_range(void *ignored)
 		memset(&tmp, 0, sizeof(tmp));
 		err = drm_mm_insert_node_in_range(&mm, &tmp, nsize, n,
 						  range_start, range_end,
-						  DRM_MM_SEARCH_DEFAULT);
+						  DRM_MM_INSERT_EVICT);
 		if (err) {
 			pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
 			       nsize, n);
@@ -1118,9 +1106,9 @@  static int igt_evict_range(void *ignored)
 			goto out;
 		}
 
-		if ((int)tmp.start % n || tmp.size != nsize || tmp.hole_follows) {
+		if ((int)tmp.start % n || tmp.size != nsize || tmp.hole_size) {
 			pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%d], start=%llx, hole-follows?=%d\n",
-			       tmp.size, nsize, n, (int)tmp.start % n, tmp.start, tmp.hole_follows);
+			       tmp.size, nsize, n, (int)tmp.start % n, tmp.start, !!tmp.hole_size);
 
 			drm_mm_remove_node(&tmp);
 			goto out;
@@ -1172,7 +1160,7 @@  static int igt_evict_range(void *ignored)
 		memset(&tmp, 0, sizeof(tmp));
 		err = drm_mm_insert_node_in_range(&mm, &tmp, nsize, n,
 						  range_start, range_end,
-						  DRM_MM_SEARCH_DEFAULT);
+						  DRM_MM_INSERT_EVICT);
 		if (err) {
 			pr_err("Failed to insert into eviction hole: size=%d, align=%d (prime)\n",
 			       nsize, n);
@@ -1189,9 +1177,9 @@  static int igt_evict_range(void *ignored)
 			goto out;
 		}
 
-		if ((int)tmp.start % n || tmp.size != nsize || tmp.hole_follows) {
+		if ((int)tmp.start % n || tmp.size != nsize || tmp.hole_size) {
 			pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%d] (prime), start=%llx, hole-follows?=%d\n",
-			       tmp.size, nsize, n, (int)tmp.start % n, tmp.start, tmp.hole_follows);
+			       tmp.size, nsize, n, (int)tmp.start % n, tmp.start, !!tmp.hole_size);
 
 			drm_mm_remove_node(&tmp);
 			goto out;
@@ -1251,15 +1239,14 @@  static int igt_topdown(void *ignored)
 		int err;
 
 		err = drm_mm_insert_node_generic(&mm, &nodes[n], 1, 0, 0,
-						 DRM_MM_SEARCH_BELOW,
-						 DRM_MM_CREATE_TOP);
+						 DRM_MM_INSERT_HIGH);
 		if (err) {
 			pr_err("insert failed, step %d\n", n);
 			ret = err;
 			goto out;
 		}
 
-		if (nodes[n].hole_follows) {
+		if (nodes[n].hole_size) {
 			pr_err("hole after topdown insert %d, start=%llx\n",
 			       n, nodes[n].start);
 			goto out;
@@ -1278,15 +1265,14 @@  static int igt_topdown(void *ignored)
 
 			node = &nodes[order[(o + m) % size]];
 			err = drm_mm_insert_node_generic(&mm, node, 1, 0, 0,
-							 DRM_MM_SEARCH_BELOW,
-							 DRM_MM_CREATE_TOP);
+							 DRM_MM_INSERT_HIGH);
 			if (err) {
 				pr_err("insert failed, step %d/%d\n", m, n);
 				ret = err;
 				goto out;
 			}
 
-			if (node->hole_follows) {
+			if (node->hole_size) {
 				pr_err("hole after topdown insert %d/%d, start=%llx\n",
 				       m, n, node->start);
 				goto out;
@@ -1349,8 +1335,7 @@  static int igt_topdown_align(void *ignored)
 			u64 align = BIT_ULL(n);
 
 			err = drm_mm_insert_node_generic(&mm, &tmp, 1, align, 0,
-							 DRM_MM_SEARCH_BELOW,
-							 DRM_MM_CREATE_TOP);
+							 DRM_MM_INSERT_HIGH);
 			drm_mm_remove_node(&tmp);
 			if (err) {
 				pr_err("insert failed, ret=%d\n", err);
@@ -1375,8 +1360,7 @@  static int igt_topdown_align(void *ignored)
 			u64 rem;
 
 			err = drm_mm_insert_node_generic(&mm, &tmp, 1, n, 0,
-							 DRM_MM_SEARCH_BELOW,
-							 DRM_MM_CREATE_TOP);
+							 DRM_MM_INSERT_HIGH);
 			drm_mm_remove_node(&tmp);
 			if (err) {
 				pr_err("insert failed, ret=%d\n", err);
@@ -1430,12 +1414,12 @@  static int igt_color(void *ignored)
 	struct drm_mm_node *node, *nn;
 	const struct modes {
 		const char *name;
-		unsigned int search;
-		unsigned int create;
+		unsigned int flags;
 	} modes[] = {
-		{ "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
-		{ "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT },
-		{ "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
+		{ "best", 0 },
+		{ "evict", DRM_MM_INSERT_EVICT },
+		{ "top-down", DRM_MM_INSERT_HIGH },
+		{ "bottom-up", DRM_MM_INSERT_LOW },
 	};
 	int ret = -EINVAL;
 	int n, m;
@@ -1451,9 +1435,7 @@  static int igt_color(void *ignored)
 			goto out;
 		}
 
-		err = drm_mm_insert_node_generic(&mm, node, n, 0, n,
-						 DRM_MM_SEARCH_DEFAULT,
-						 DRM_MM_CREATE_DEFAULT);
+		err = drm_mm_insert_node_generic(&mm, node, n, 0, n, 0);
 		if (err) {
 			pr_err("insert failed, step %d\n", n);
 			kfree(node);
@@ -1539,8 +1521,7 @@  static int igt_color(void *ignored)
 			}
 
 			err = drm_mm_insert_node_generic(&mm, node, n, n, n,
-							 modes[m].search,
-							 modes[m].create);
+							 modes[m].flags);
 			if (err) {
 				pr_err("%s insert failed, step %d, err=%d\n",
 				       modes[m].name, n, err);
@@ -1560,7 +1541,7 @@  static int igt_color(void *ignored)
 				goto out;
 			}
 
-			if (!node->hole_follows &&
+			if (!node->hole_size &&
 			    list_next_entry(node, node_list)->allocated) {
 				pr_err("%s colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
 				       modes[m].name,
@@ -1623,9 +1604,7 @@  static int igt_color_evict(void *ignored)
 		int err;
 
 		err = drm_mm_insert_node_generic(&mm, &nodes[n].node,
-						 1, 0, color++,
-						 DRM_MM_SEARCH_DEFAULT,
-						 DRM_MM_CREATE_DEFAULT);
+						 1, 0, color++, 0);
 		if (err) {
 			pr_err("insert failed, step %d\n", n);
 			ret = err;
@@ -1670,8 +1649,7 @@  static int igt_color_evict(void *ignored)
 
 		memset(&tmp, 0, sizeof(tmp));
 		err = drm_mm_insert_node_generic(&mm, &tmp, nsize, n, c,
-						 DRM_MM_SEARCH_DEFAULT,
-						 DRM_MM_CREATE_DEFAULT);
+						 DRM_MM_INSERT_EVICT);
 		if (err) {
 			pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%d, err=%d\n",
 			       nsize, n, c, err);
@@ -1689,7 +1667,7 @@  static int igt_color_evict(void *ignored)
 			goto out;
 		}
 
-		if (!tmp.hole_follows &&
+		if (!tmp.hole_size &&
 		    list_next_entry(&tmp, node_list)->allocated) {
 			pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
 			       tmp.color, tmp.start, tmp.size,
@@ -1748,8 +1726,7 @@  static int igt_color_evict(void *ignored)
 
 		memset(&tmp, 0, sizeof(tmp));
 		err = drm_mm_insert_node_generic(&mm, &tmp, nsize, n, c,
-						 DRM_MM_SEARCH_DEFAULT,
-						 DRM_MM_CREATE_DEFAULT);
+						 DRM_MM_INSERT_EVICT);
 		if (err) {
 			pr_err("Failed to insert into eviction hole: size=%d, align=%d (prime), color=%d, err=%d\n",
 			       nsize, n, c, err);
@@ -1767,7 +1744,7 @@  static int igt_color_evict(void *ignored)
 			goto out;
 		}
 
-		if (!tmp.hole_follows &&
+		if (!tmp.hole_size &&
 		    list_next_entry(&tmp, node_list)->allocated) {
 			pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
 			       tmp.color, tmp.start, tmp.size,
@@ -1836,9 +1813,7 @@  static int igt_color_evict_range(void *ignored)
 		int err;
 
 		err = drm_mm_insert_node_generic(&mm, &nodes[n].node,
-						 1, 0, color++,
-						 DRM_MM_SEARCH_DEFAULT,
-						 DRM_MM_CREATE_DEFAULT);
+						 1, 0, color++, 0);
 		if (err) {
 			pr_err("insert failed, step %d\n", n);
 			ret = err;
@@ -1886,8 +1861,7 @@  static int igt_color_evict_range(void *ignored)
 		memset(&tmp, 0, sizeof(tmp));
 		err = drm_mm_insert_node_in_range_generic(&mm, &tmp, nsize, n, c,
 							  range_start, range_end,
-							  DRM_MM_SEARCH_DEFAULT,
-							  DRM_MM_CREATE_DEFAULT);
+							  DRM_MM_INSERT_EVICT);
 		if (err) {
 			pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%d, err=%d\n",
 			       nsize, n, c, err);
@@ -1905,7 +1879,7 @@  static int igt_color_evict_range(void *ignored)
 			goto out;
 		}
 
-		if (!tmp.hole_follows &&
+		if (!tmp.hole_size &&
 		    list_next_entry(&tmp, node_list)->allocated) {
 			pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
 			       tmp.color, tmp.start, tmp.size,
@@ -1967,8 +1941,7 @@  static int igt_color_evict_range(void *ignored)
 		memset(&tmp, 0, sizeof(tmp));
 		err = drm_mm_insert_node_in_range_generic(&mm, &tmp, nsize, n, c,
 							  range_start, range_end,
-							  DRM_MM_SEARCH_DEFAULT,
-							  DRM_MM_CREATE_DEFAULT);
+							  DRM_MM_INSERT_EVICT);
 		if (err) {
 			pr_err("Failed to insert into eviction hole: size=%d, align=%d (prime), color=%d, err=%d\n",
 			       nsize, n, c, err);
@@ -1986,7 +1959,7 @@  static int igt_color_evict_range(void *ignored)
 			goto out;
 		}
 
-		if (!tmp.hole_follows &&
+		if (!tmp.hole_size &&
 		    list_next_entry(&tmp, node_list)->allocated) {
 			pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
 			       tmp.color, tmp.start, tmp.size,
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 03defda77766..1622db24cd39 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -109,8 +109,7 @@  static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
 	if (pool == AGP_TYPE) {
 		retval = drm_mm_insert_node(&dev_priv->agp_mm,
 					    &item->mm_node,
-					    mem->size, 0,
-					    DRM_MM_SEARCH_DEFAULT);
+					    mem->size);
 		offset = item->mm_node.start;
 	} else {
 #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -122,8 +121,7 @@  static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
 #else
 		retval = drm_mm_insert_node(&dev_priv->vram_mm,
 					    &item->mm_node,
-					    mem->size, 0,
-					    DRM_MM_SEARCH_DEFAULT);
+					    mem->size);
 		offset = item->mm_node.start;
 #endif
 	}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 95e622e31931..96e76244c571 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -98,8 +98,8 @@  static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
 	if (!bo->mm)
 		return -ENOMEM;
 
-	err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
-					 PAGE_SIZE, 0, 0, 0);
+	err = drm_mm_insert_node_generic(&tegra->mm,
+					 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
 	if (err < 0) {
 		dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
 			err);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index aa0bd054d3e9..a3ddc95825f7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -54,9 +54,8 @@  static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 {
 	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 	struct drm_mm *mm = &rman->mm;
-	struct drm_mm_node *node = NULL;
-	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
-	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+	struct drm_mm_node *node;
+	enum drm_mm_flags flags;
 	unsigned long lpfn;
 	int ret;
 
@@ -68,16 +67,14 @@  static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 	if (!node)
 		return -ENOMEM;
 
-	if (place->flags & TTM_PL_FLAG_TOPDOWN) {
-		sflags = DRM_MM_SEARCH_BELOW;
-		aflags = DRM_MM_CREATE_TOP;
-	}
+	flags = 0;
+	if (place->flags & TTM_PL_FLAG_TOPDOWN)
+		flags = DRM_MM_INSERT_HIGH;
 
 	spin_lock(&rman->lock);
 	ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
 					  mem->page_alignment, 0,
-					  place->fpfn, lpfn,
-					  sflags, aflags);
+					  place->fpfn, lpfn, flags);
 	spin_unlock(&rman->lock);
 
 	if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 7f08d681a74b..6af654c013a4 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -590,7 +590,7 @@  static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
 
 	spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
 	ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
-				 dlist_count, 1, 0);
+				 dlist_count);
 	spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 6fbab1c82cb1..4aba0fa56289 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -141,8 +141,7 @@  static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
 	int ret, i;
 	u32 __iomem *dst_kernel;
 
-	ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS, 1,
-				 0);
+	ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
 	if (ret) {
 		DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
 			  ret);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 881bf489478b..03068ab9bdc1 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -514,9 +514,9 @@  static int vc4_plane_mode_set(struct drm_plane *plane,
 	if (lbm_size) {
 		if (!vc4_state->lbm.allocated) {
 			spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
-			ret = drm_mm_insert_node(&vc4->hvs->lbm_mm,
-						 &vc4_state->lbm,
-						 lbm_size, 32, 0);
+			ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
+							 &vc4_state->lbm,
+							 lbm_size, 32, 0, 0);
 			spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
 		} else {
 			WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index a04ef1c992d9..4217d66a5cc6 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -140,11 +140,11 @@  int via_mem_alloc(struct drm_device *dev, void *data,
 	if (mem->type == VIA_MEM_AGP)
 		retval = drm_mm_insert_node(&dev_priv->agp_mm,
 					    &item->mm_node,
-					    tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
+					    tmpSize);
 	else
 		retval = drm_mm_insert_node(&dev_priv->vram_mm,
 					    &item->mm_node,
-					    tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
+					    tmpSize);
 	if (retval)
 		goto fail_alloc;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index aa04fb0159a7..77cb7c627e09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -673,16 +673,10 @@  static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
  
 	memset(info->node, 0, sizeof(*info->node));
 	spin_lock_bh(&man->lock);
-	ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
-					 0, 0,
-					 DRM_MM_SEARCH_DEFAULT,
-					 DRM_MM_CREATE_DEFAULT);
+	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 	if (ret) {
 		vmw_cmdbuf_man_process(man);
-		ret = drm_mm_insert_node_generic(&man->mm, info->node,
-						 info->page_size, 0, 0,
-						 DRM_MM_SEARCH_DEFAULT,
-						 DRM_MM_CREATE_DEFAULT);
+		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 	}
 
 	spin_unlock_bh(&man->lock);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 884166b91e90..511a790e05ed 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -54,32 +54,27 @@ 
 #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
 #endif
 
-enum drm_mm_search_flags {
-	DRM_MM_SEARCH_DEFAULT =		0,
-	DRM_MM_SEARCH_BEST =		1 << 0,
-	DRM_MM_SEARCH_BELOW =		1 << 1,
+enum drm_mm_flags {
+	DRM_MM_INSERT_BEST = 0,
+	DRM_MM_INSERT_LOW,
+	DRM_MM_INSERT_HIGH,
+	DRM_MM_INSERT_EVICT,
 };
 
-enum drm_mm_allocator_flags {
-	DRM_MM_CREATE_DEFAULT =		0,
-	DRM_MM_CREATE_TOP =		1 << 0,
-};
-
-#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
-#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
-
 struct drm_mm_node {
+	struct drm_mm *mm;
 	struct list_head node_list;
 	struct list_head hole_stack;
 	struct rb_node rb;
-	unsigned hole_follows : 1;
-	unsigned allocated : 1;
-	bool scanned_block : 1;
-	unsigned long color;
+	struct rb_node rb_hole_size;
+	struct rb_node rb_hole_addr;
 	u64 start;
 	u64 size;
 	u64 __subtree_last;
-	struct drm_mm *mm;
+	u64 hole_size;
+	unsigned long color;
+	bool allocated : 1;
+	bool scanned_block : 1;
 #ifdef CONFIG_DRM_DEBUG_MM
 	depot_stack_handle_t stack;
 #endif
@@ -93,6 +88,8 @@  struct drm_mm {
 	struct drm_mm_node head_node;
 	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
 	struct rb_root interval_tree;
+	struct rb_root holes_size;
+	struct rb_root holes_addr;
 
 	void (*color_adjust)(const struct drm_mm_node *node,
 			     unsigned long color,
@@ -166,7 +163,7 @@  static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
  */
 static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
 {
-	DRM_MM_BUG_ON(!hole_node->hole_follows);
+	DRM_MM_BUG_ON(!hole_node->hole_size);
 	return __drm_mm_hole_node_start(hole_node);
 }
 
@@ -216,14 +213,6 @@  static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
 #define drm_mm_for_each_node_safe(entry, n, mm) \
 	list_for_each_entry_safe(entry, n, __drm_mm_nodes(mm), node_list)
 
-#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
-	for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
-	     &entry->hole_stack != &(mm)->hole_stack ? \
-	     hole_start = drm_mm_hole_node_start(entry), \
-	     hole_end = drm_mm_hole_node_end(entry), \
-	     1 : 0; \
-	     entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
-
 /**
  * drm_mm_for_each_hole - iterator to walk over all holes
  * @entry: drm_mm_node used internally to track progress
@@ -239,25 +228,52 @@  static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
  * Implementation Note:
  * We need to inline list_for_each_entry in order to be able to set hole_start
  * and hole_end on each iteration while keeping the macro sane.
- *
- * The __drm_mm_for_each_hole version is similar, but with added support for
- * going backwards.
  */
-#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
-	__drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
+#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
+	for (pos = list_first_entry(&(mm)->hole_stack, \
+				    typeof(*pos), hole_stack); \
+	     &pos->hole_stack != &(mm)->hole_stack ? \
+	     hole_start = drm_mm_hole_node_start(pos), \
+	     hole_end = hole_start + pos->hole_size : 0; \
+	     pos = list_next_entry(pos, hole_stack))
 
 /*
  * Basic range manager support (drm_mm.c)
  */
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
+					struct drm_mm_node *node,
+					u64 size,
+					u64 alignment,
+					unsigned long color,
+					u64 start,
+					u64 end,
+					unsigned int flags);
+
+/**
+ * drm_mm_insert_node_generic - search for space and insert @node
+ * @mm: drm_mm to allocate from
+ * @node: preallocate node to insert
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for this node
+ * @flags: flags to fine-tune the allocation search and creation
+ *
+ * The preallocated node must be cleared to 0.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no suitable hole.
+ */
+static inline int
+drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+			   u64 size, u64 alignment,
+			   unsigned long color,
+			   unsigned int flags)
+{
+	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
+						   color, 0, U64_MAX, flags);
+}
 
-int drm_mm_insert_node_generic(struct drm_mm *mm,
-			       struct drm_mm_node *node,
-			       u64 size,
-			       u64 alignment,
-			       unsigned long color,
-			       enum drm_mm_search_flags sflags,
-			       enum drm_mm_allocator_flags aflags);
 /**
  * drm_mm_insert_node - search for space and insert @node
  * @mm: drm_mm to allocate from
@@ -276,23 +292,11 @@  int drm_mm_insert_node_generic(struct drm_mm *mm,
  */
 static inline int drm_mm_insert_node(struct drm_mm *mm,
 				     struct drm_mm_node *node,
-				     u64 size,
-				     u64 alignment,
-				     enum drm_mm_search_flags flags)
+				     u64 size)
 {
-	return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
-					  DRM_MM_CREATE_DEFAULT);
+	return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
 }
 
-int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
-					struct drm_mm_node *node,
-					u64 size,
-					u64 alignment,
-					unsigned long color,
-					u64 start,
-					u64 end,
-					enum drm_mm_search_flags sflags,
-					enum drm_mm_allocator_flags aflags);
 /**
  * drm_mm_insert_node_in_range - ranged search for space and insert @node
  * @mm: drm_mm to allocate from
@@ -317,11 +321,10 @@  static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
 					      u64 alignment,
 					      u64 start,
 					      u64 end,
-					      enum drm_mm_search_flags flags)
+					      unsigned int flags)
 {
 	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
-						   0, start, end, flags,
-						   DRM_MM_CREATE_DEFAULT);
+						   0, start, end, flags);
 }
 
 void drm_mm_remove_node(struct drm_mm_node *node);