@@ -894,14 +894,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
* slots as well as the bo size.
*/
dev_priv->has_gmr = true;
- dev_priv->bdev.man[VMW_PL_GMR].func = &vmw_gmrid_manager_func;
- dev_priv->bdev.man[VMW_PL_GMR].available_caching = TTM_PL_FLAG_CACHED;
- dev_priv->bdev.man[VMW_PL_GMR].default_caching = TTM_PL_FLAG_CACHED;
/* TODO: This is most likely not correct */
- dev_priv->bdev.man[VMW_PL_GMR].use_tt = true;
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
- refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
- VMW_PL_GMR) != 0) {
+ refuse_dma ||
+ vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
DRM_INFO("No GMR memory available. "
"Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
@@ -909,13 +905,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
dev_priv->has_mob = true;
- dev_priv->bdev.man[VMW_PL_MOB].func = &vmw_gmrid_manager_func;
- dev_priv->bdev.man[VMW_PL_MOB].available_caching = TTM_PL_FLAG_CACHED;
- dev_priv->bdev.man[VMW_PL_MOB].default_caching = TTM_PL_FLAG_CACHED;
- /* TODO: This is most likely not correct */
- dev_priv->bdev.man[VMW_PL_MOB].use_tt = true;
- if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
- VMW_PL_MOB) != 0) {
+
+ if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
DRM_INFO("No MOB memory available. "
"3D will be disabled.\n");
dev_priv->has_mob = false;
@@ -1221,7 +1221,7 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
* GMR Id manager
*/
-extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
+int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type);
/**
* Prime - vmwgfx_prime.c
@@ -94,22 +94,28 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
}
}
-static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
+static const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
+
+int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
{
- struct vmw_private *dev_priv =
- container_of(man->bdev, struct vmw_private, bdev);
+ struct ttm_mem_type_manager *man = &dev_priv->bdev.man[type];
struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL);
if (unlikely(!gman))
return -ENOMEM;
+ man->func = &vmw_gmrid_manager_func;
+ man->available_caching = TTM_PL_FLAG_CACHED;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ /* TODO: This is most likely not correct */
+ man->use_tt = true;
+ ttm_bo_init_mm_base(&dev_priv->bdev, man, 0);
spin_lock_init(&gman->lock);
gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
- switch (p_size) {
+ switch (type) {
case VMW_PL_GMR:
gman->max_gmr_ids = dev_priv->max_gmr_ids;
gman->max_gmr_pages = dev_priv->max_gmr_pages;
@@ -143,8 +149,7 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
drm_printf(printer, "No debug info available for the GMR id manager\n");
}
-const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
- .init = vmw_gmrid_man_init,
+static const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
.takedown = vmw_gmrid_man_takedown,
.get_node = vmw_gmrid_man_get_node,
.put_node = vmw_gmrid_man_put_node,