@@ -433,7 +433,7 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe)
if (!xe->info.probe_display)
return;
- if (xe->d3cold.allowed) {
+ if (xe->d3cold.target_state) {
xe_display_enable_d3cold(xe);
return;
}
@@ -459,7 +459,7 @@ void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
if (!xe->info.probe_display)
return;
- if (xe->d3cold.allowed)
+ if (xe->d3cold.target_state)
xe_display_pm_suspend_late(xe);
/*
@@ -537,7 +537,7 @@ void xe_display_pm_runtime_resume(struct xe_device *xe)
if (!xe->info.probe_display)
return;
- if (xe->d3cold.allowed) {
+ if (xe->d3cold.target_state) {
xe_display_disable_d3cold(xe);
return;
}
@@ -20,6 +20,7 @@
#include "xe_memirq_types.h"
#include "xe_oa_types.h"
#include "xe_platform_types.h"
+#include "xe_pm.h"
#include "xe_pmu_types.h"
#include "xe_pt_types.h"
#include "xe_sriov_types.h"
@@ -496,8 +497,8 @@ struct xe_device {
/** @d3cold.capable: Indicates if root port is d3cold capable */
bool capable;
- /** @d3cold.allowed: Indicates if d3cold is a valid device state */
- bool allowed;
+ /** @d3cold.target_state: Indicates d3cold target state */
+ enum xe_d3_state target_state;
/** @d3cold.vrsr_capable: Indicates if d3cold VRAM Self Refresh is supported */
bool vrsr_capable;
@@ -976,7 +976,7 @@ static int xe_pci_runtime_suspend(struct device *dev)
pci_save_state(pdev);
- if (xe->d3cold.allowed) {
+ if (xe->d3cold.target_state) {
d3cold_toggle(pdev, D3COLD_ENABLE);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
@@ -1001,7 +1001,7 @@ static int xe_pci_runtime_resume(struct device *dev)
pci_restore_state(pdev);
- if (xe->d3cold.allowed) {
+ if (xe->d3cold.target_state) {
err = pci_enable_device(pdev);
if (err)
return err;
@@ -1017,7 +1017,7 @@ static int xe_pci_runtime_idle(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct xe_device *xe = pdev_to_xe_device(pdev);
- xe_pm_d3cold_allowed_toggle(xe);
+ xe_pm_d3cold_target_state_toggle(xe);
return 0;
}
@@ -585,7 +585,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_display_pm_runtime_suspend(xe);
- if (xe->d3cold.allowed) {
+ if (xe->d3cold.target_state) {
err = xe_bo_evict_all(xe);
if (err)
goto out_resume;
@@ -632,7 +632,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_rpm_lockmap_acquire(xe);
- if (xe->d3cold.allowed) {
+ if (xe->d3cold.target_state) {
err = xe_pcode_ready(xe, true);
if (err)
goto out;
@@ -655,7 +655,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_display_pm_runtime_resume(xe);
- if (xe->d3cold.allowed) {
+ if (xe->d3cold.target_state) {
err = xe_bo_restore_user(xe);
if (err)
goto out;
@@ -897,13 +897,13 @@ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
}
/**
- * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
+ * xe_pm_d3cold_target_state_toggle - Check conditions to toggle target_state
* @xe: xe device instance
*
* To be called during runtime_pm idle callback.
* Check for all the D3Cold conditions ahead of runtime suspend.
*/
-void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
+void xe_pm_d3cold_target_state_toggle(struct xe_device *xe)
{
struct ttm_resource_manager *man;
u32 total_vram_used_mb = 0;
@@ -911,7 +911,7 @@ void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
int i;
if (!xe->d3cold.capable) {
- xe->d3cold.allowed = false;
+ xe->d3cold.target_state = XE_D3HOT;
return;
}
@@ -926,9 +926,9 @@ void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
mutex_lock(&xe->d3cold.lock);
if (total_vram_used_mb < xe->d3cold.vram_threshold)
- xe->d3cold.allowed = true;
+ xe->d3cold.target_state = XE_D3COLD_OFF;
else
- xe->d3cold.allowed = false;
+ xe->d3cold.target_state = XE_D3HOT;
mutex_unlock(&xe->d3cold.lock);
}
@@ -12,6 +12,12 @@
struct xe_device;
+enum xe_d3_state {
+ XE_D3HOT = 0,
+ XE_D3COLD_VRSR,
+ XE_D3COLD_OFF,
+};
+
int xe_pm_suspend(struct xe_device *xe);
int xe_pm_resume(struct xe_device *xe);
@@ -30,7 +36,7 @@ void xe_pm_runtime_get_noresume(struct xe_device *xe);
bool xe_pm_runtime_resume_and_get(struct xe_device *xe);
void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
-void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
+void xe_pm_d3cold_target_state_toggle(struct xe_device *xe);
bool xe_rpm_reclaim_safe(const struct xe_device *xe);
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
int xe_pm_module_init(void);
Add xe_d3_state enum to add support for VRAM Self Refresh d3cold state. Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com> --- drivers/gpu/drm/xe/display/xe_display.c | 6 +++--- drivers/gpu/drm/xe/xe_device_types.h | 5 +++-- drivers/gpu/drm/xe/xe_pci.c | 6 +++--- drivers/gpu/drm/xe/xe_pm.c | 16 ++++++++-------- drivers/gpu/drm/xe/xe_pm.h | 8 +++++++- 5 files changed, 24 insertions(+), 17 deletions(-)