@@ -744,7 +744,7 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
-bool intel_enable_gtt(void)
+bool intel_gmch_enable_gtt(void)
{
u8 __iomem *reg;
@@ -787,7 +787,7 @@ bool intel_enable_gtt(void)
return true;
}
-EXPORT_SYMBOL(intel_enable_gtt);
+EXPORT_SYMBOL(intel_gmch_enable_gtt);
static int i830_setup(void)
{
@@ -821,8 +821,8 @@ static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
static int intel_fake_agp_configure(void)
{
- if (!intel_enable_gtt())
- return -EIO;
+ if (!intel_gmch_enable_gtt())
+ return -EIO;
intel_private.clear_fake_agp = true;
agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
@@ -844,20 +844,20 @@ static bool i830_check_flags(unsigned int flags)
return false;
}
-void intel_gtt_insert_page(dma_addr_t addr,
- unsigned int pg,
- unsigned int flags)
+void intel_gmch_gtt_insert_page(dma_addr_t addr,
+ unsigned int pg,
+ unsigned int flags)
{
intel_private.driver->write_entry(addr, pg, flags);
readl(intel_private.gtt + pg);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
-EXPORT_SYMBOL(intel_gtt_insert_page);
+EXPORT_SYMBOL(intel_gmch_gtt_insert_page);
-void intel_gtt_insert_sg_entries(struct sg_table *st,
- unsigned int pg_start,
- unsigned int flags)
+void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
+ unsigned int pg_start,
+ unsigned int flags)
{
struct scatterlist *sg;
unsigned int len, m;
@@ -879,13 +879,13 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
-EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
+EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries);
#if IS_ENABLED(CONFIG_AGP_INTEL)
-static void intel_gtt_insert_pages(unsigned int first_entry,
- unsigned int num_entries,
- struct page **pages,
- unsigned int flags)
+static void intel_gmch_gtt_insert_pages(unsigned int first_entry,
+ unsigned int num_entries,
+ struct page **pages,
+ unsigned int flags)
{
int i, j;
@@ -905,7 +905,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (intel_private.clear_fake_agp) {
int start = intel_private.stolen_size / PAGE_SIZE;
int end = intel_private.gtt_mappable_entries;
- intel_gtt_clear_range(start, end - start);
+ intel_gmch_gtt_clear_range(start, end - start);
intel_private.clear_fake_agp = false;
}
@@ -934,12 +934,12 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (ret != 0)
return ret;
- intel_gtt_insert_sg_entries(&st, pg_start, type);
+ intel_gmch_gtt_insert_sg_entries(&st, pg_start, type);
mem->sg_list = st.sgl;
mem->num_sg = st.nents;
} else
- intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
- type);
+ intel_gmch_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
+ type);
out:
ret = 0;
@@ -949,7 +949,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
}
#endif
-void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
+void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
{
unsigned int i;
@@ -959,7 +959,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
}
wmb();
}
-EXPORT_SYMBOL(intel_gtt_clear_range);
+EXPORT_SYMBOL(intel_gmch_gtt_clear_range);
#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
@@ -968,7 +968,7 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0)
return 0;
- intel_gtt_clear_range(pg_start, mem->page_count);
+ intel_gmch_gtt_clear_range(pg_start, mem->page_count);
if (intel_private.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
@@ -1431,22 +1431,22 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-void intel_gtt_get(u64 *gtt_total,
- phys_addr_t *mappable_base,
- resource_size_t *mappable_end)
+void intel_gmch_gtt_get(u64 *gtt_total,
+ phys_addr_t *mappable_base,
+ resource_size_t *mappable_end)
{
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
*mappable_base = intel_private.gma_bus_addr;
*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
}
-EXPORT_SYMBOL(intel_gtt_get);
+EXPORT_SYMBOL(intel_gmch_gtt_get);
-void intel_gtt_chipset_flush(void)
+void intel_gmch_gtt_flush(void)
{
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
-EXPORT_SYMBOL(intel_gtt_chipset_flush);
+EXPORT_SYMBOL(intel_gmch_gtt_flush);
void intel_gmch_remove(void)
{
@@ -134,7 +134,7 @@ static void gen5_ggtt_insert_page(struct i915_address_space *vm,
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+ intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
}
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
@@ -175,8 +175,8 @@ static void gen5_ggtt_insert_entries(struct i915_address_space *vm,
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
- flags);
+ intel_gmch_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
+ flags);
}
/*
@@ -306,18 +306,18 @@ static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt)
{
- intel_gtt_chipset_flush();
+ intel_gmch_gtt_flush();
}
static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
{
- intel_gtt_chipset_flush();
+ intel_gmch_gtt_flush();
}
static void gen5_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
+ intel_gmch_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
@@ -494,7 +494,7 @@ int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt)
return -EIO;
}
- intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
+ intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
ggtt->gmadr =
(struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
@@ -647,7 +647,7 @@ int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt)
int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915)
{
- if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
+ if (GRAPHICS_VER(i915) < 6 && !intel_gmch_enable_gtt())
return -EIO;
return 0;
@@ -10,24 +10,24 @@ struct agp_bridge_data;
struct pci_dev;
struct sg_table;
-void intel_gtt_get(u64 *gtt_total,
- phys_addr_t *mappable_base,
- resource_size_t *mappable_end);
+void intel_gmch_gtt_get(u64 *gtt_total,
+ phys_addr_t *mappable_base,
+ resource_size_t *mappable_end);
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
void intel_gmch_remove(void);
-bool intel_enable_gtt(void);
+bool intel_gmch_enable_gtt(void);
-void intel_gtt_chipset_flush(void);
-void intel_gtt_insert_page(dma_addr_t addr,
- unsigned int pg,
- unsigned int flags);
-void intel_gtt_insert_sg_entries(struct sg_table *st,
- unsigned int pg_start,
- unsigned int flags);
-void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+void intel_gmch_gtt_flush(void);
+void intel_gmch_gtt_insert_page(dma_addr_t addr,
+ unsigned int pg,
+ unsigned int flags);
+void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
+ unsigned int pg_start,
+ unsigned int flags);
+void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
/* Special gtt memory types */
#define AGP_DCACHE_MEMORY 1