@@ -603,6 +603,32 @@ static void __init intel_graphics_quirks(int num, int slot, int func)
}
}
+struct resource intel_graphics_fake_lmem_res __ro_after_init = DEFINE_RES_MEM(0, 0);
+EXPORT_SYMBOL(intel_graphics_fake_lmem_res);
+
+static int __init early_i915_fake_lmem_init(char *s)
+{
+ u64 start;
+ int ret;
+
+ if (*s == '=')
+ s++;
+
+ ret = kstrtoull(s, 16, &start);
+ if (ret)
+ return ret;
+
+ intel_graphics_fake_lmem_res.start = start;
+ intel_graphics_fake_lmem_res.end = SZ_2G; /* Placeholder; depends on aperture size */
+
+ printk(KERN_INFO "Intel graphics fake LMEM starts at %pa\n",
+ &intel_graphics_fake_lmem_res.start);
+
+ return 0;
+}
+
+early_param("i915_fake_lmem_start", early_i915_fake_lmem_init);
+
static void __init force_disable_hpet(int num, int slot, int func)
{
#ifdef CONFIG_HPET_TIMER
@@ -252,6 +252,7 @@ void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
resource_size_t offset;
offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= intel_graphics_fake_lmem_res.start;
return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
}
@@ -262,6 +263,7 @@ void __iomem *i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object
resource_size_t offset;
offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= intel_graphics_fake_lmem_res.start;
return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
}
@@ -275,6 +277,7 @@ void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!(obj->flags & I915_BO_ALLOC_CONTIGUOUS));
offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= intel_graphics_fake_lmem_res.start;
return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
}
@@ -1474,6 +1474,14 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
+ /* Check if we support fake LMEM -- enable for live selftests */
+ if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live &&
+ intel_graphics_fake_lmem_res.start) {
+ mkwrite_device_info(dev_priv)->memory_regions =
+ REGION_SMEM | REGION_LMEM;
+ GEM_BUG_ON(!HAS_LMEM(dev_priv));
+ }
+
ret = pci_enable_device(pdev);
if (ret)
goto out_fini;
@@ -2747,6 +2747,9 @@ int i915_gem_init_memory_regions(struct drm_i915_private *i915)
case INTEL_STOLEN:
mem = i915_gem_stolen_setup(i915);
break;
+ case INTEL_LMEM:
+ mem = intel_setup_fake_lmem(i915);
+ break;
}
if (IS_ERR(mem)) {
@@ -9,6 +9,7 @@
#include <linux/ioport.h>
#include <linux/mutex.h>
#include <linux/io-mapping.h>
+#include <drm/drm_mm.h>
#include "i915_buddy.h"
@@ -69,6 +70,9 @@ struct intel_memory_region {
struct io_mapping iomap;
struct resource region;
+ /* For faking for lmem */
+ struct drm_mm_node fake_mappable;
+
struct i915_buddy_mm mm;
struct mutex mm_lock;
@@ -41,9 +41,41 @@ lmem_create_object(struct intel_memory_region *mem,
return obj;
}
+static int init_fake_lmem_bar(struct intel_memory_region *mem)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ unsigned long n;
+ int ret;
+
+ mem->fake_mappable.start = 0;
+ mem->fake_mappable.size = resource_size(&mem->region);
+ mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
+
+ ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
+ if (ret)
+ return ret;
+
+ /* 1:1 map the mappable aperture to our reserved region */
+ for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
+ ggtt->vm.insert_page(&ggtt->vm,
+ mem->region.start + (n << PAGE_SHIFT),
+ n << PAGE_SHIFT, I915_CACHE_NONE, 0);
+ }
+
+ return 0;
+}
+
+static void release_fake_lmem_bar(struct intel_memory_region *mem)
+{
+ if (drm_mm_node_allocated(&mem->fake_mappable))
+ drm_mm_remove_node(&mem->fake_mappable);
+}
+
static void
region_lmem_release(struct intel_memory_region *mem)
{
+ release_fake_lmem_bar(mem);
io_mapping_fini(&mem->iomap);
intel_memory_region_release_buddy(mem);
}
@@ -53,6 +85,11 @@ region_lmem_init(struct intel_memory_region *mem)
{
int ret;
+ if (intel_graphics_fake_lmem_res.start) {
+ ret = init_fake_lmem_bar(mem);
+ GEM_BUG_ON(ret);
+ }
+
if (!io_mapping_init_wc(&mem->iomap,
mem->io_start,
resource_size(&mem->region)))
@@ -70,3 +107,35 @@ const struct intel_memory_region_ops intel_region_lmem_ops = {
.release = region_lmem_release,
.create_object = lmem_create_object,
};
+
+struct intel_memory_region *
+intel_setup_fake_lmem(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct intel_memory_region *mem;
+ resource_size_t mappable_end;
+ resource_size_t io_start;
+ resource_size_t start;
+
+ GEM_BUG_ON(HAS_MAPPABLE_APERTURE(i915));
+ GEM_BUG_ON(!intel_graphics_fake_lmem_res.start);
+
+ /* Your mappable aperture belongs to me now! */
+ mappable_end = pci_resource_len(pdev, 2);
+ io_start = pci_resource_start(pdev, 2),
+ start = intel_graphics_fake_lmem_res.start;
+
+ mem = intel_memory_region_create(i915,
+ start,
+ mappable_end,
+ I915_GTT_PAGE_SIZE_4K,
+ io_start,
+ &intel_region_lmem_ops);
+ if (!IS_ERR(mem)) {
+ DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region);
+ DRM_INFO("Intel graphics fake LMEM IO start: %llx\n",
+ (u64)mem->io_start);
+ }
+
+ return mem;
+}
@@ -6,6 +6,11 @@
#ifndef __INTEL_REGION_LMEM_H
#define __INTEL_REGION_LMEM_H
+struct drm_i915_private;
+
extern const struct intel_memory_region_ops intel_region_lmem_ops;
+struct intel_memory_region *
+intel_setup_fake_lmem(struct drm_i915_private *i915);
+
#endif /* !__INTEL_REGION_LMEM_H */
@@ -39,6 +39,9 @@ bool i915_gpu_turbo_disable(void);
/* Exported from arch/x86/kernel/early-quirks.c */
extern struct resource intel_graphics_stolen_res;
+/* Exported from arch/x86/kernel/early-printk.c */
+extern struct resource intel_graphics_fake_lmem_res;
+
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
Intended for upstream testing so that we can still exercise the LMEM plumbing and !HAS_MAPPABLE_APERTURE paths. Smoke tested on Skull Canyon device. This works by allocating an intel_memory_region for a reserved portion of system memory, which we treat like LMEM. For the LMEMBAR we steal the aperture and 1:1 it map to the stolen region. To enable simply set i915_fake_lmem_start= on the kernel cmdline with the start of reserved region(see memmap=). The size of the region we can use is determined by the size of the mappable aperture, so the size of reserved region should be >= mappable_end. eg. memmap=2G$16G i915_fake_lmem_start=0x400000000 Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com> --- arch/x86/kernel/early-quirks.c | 26 ++++++++ drivers/gpu/drm/i915/gem/i915_gem_lmem.c | 3 + drivers/gpu/drm/i915/i915_drv.c | 8 +++ drivers/gpu/drm/i915/i915_gem_gtt.c | 3 + drivers/gpu/drm/i915/intel_memory_region.h | 4 ++ drivers/gpu/drm/i915/intel_region_lmem.c | 69 ++++++++++++++++++++++ drivers/gpu/drm/i915/intel_region_lmem.h | 5 ++ include/drm/i915_drm.h | 3 + 8 files changed, 121 insertions(+)