diff mbox series

[14/33] drm/amdgpu: prepare map process for multi-process debug devices

Message ID 20230525172745.702700-14-jonathan.kim@amd.com (mailing list archive)
State New, archived
Headers show
Series [01/33] drm/amdkfd: add debug and runtime enable interface | expand

Commit Message

Kim, Jonathan May 25, 2023, 5:27 p.m. UTC
Unlike single process debug devices, multi-process debug devices allow
debug mode setting per-VMID (non-device-global).

Because the HWS manages PASID-VMID mapping, the new MAP_PROCESS API allows
the KFD to forward the required SPI debug register write requests.

To request a new debug mode setting change, the KFD must be able to
preempt all queues then remap all queues with these new setting
requests for MAP_PROCESS to take effect.

Note that by default, trap enablement in non-debug mode must be disabled
for performance reasons for multi-process debug devices due to setup
overhead in FW.

v2: spot fixup new kfd_node references

Signed-off-by: Jonathan Kim <jonathan.kim@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_debug.h        |  5 ++
 .../drm/amd/amdkfd/kfd_device_queue_manager.c | 51 +++++++++++++++++++
 .../drm/amd/amdkfd/kfd_device_queue_manager.h |  3 ++
 .../drm/amd/amdkfd/kfd_packet_manager_v9.c    | 14 +++++
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |  9 ++++
 drivers/gpu/drm/amd/amdkfd/kfd_process.c      |  5 ++
 6 files changed, 87 insertions(+)

Comments

Felix Kuehling May 30, 2023, 7:55 p.m. UTC | #1
Am 2023-05-25 um 13:27 schrieb Jonathan Kim:
> Unlike single process debug devices, multi-process debug devices allow
> debug mode setting per-VMID (non-device-global).
>
> Because the HWS manages PASID-VMID mapping, the new MAP_PROCESS API allows
> the KFD to forward the required SPI debug register write requests.
>
> To request a new debug mode setting change, the KFD must be able to
> preempt all queues then remap all queues with these new setting
> requests for MAP_PROCESS to take effect.
>
> Note that by default, trap enablement in non-debug mode must be disabled
> for performance reasons for multi-process debug devices due to setup
> overhead in FW.
>
> v2: spot fixup new kfd_node references
>
> Signed-off-by: Jonathan Kim <jonathan.kim@amd.com>
> ---
>   drivers/gpu/drm/amd/amdkfd/kfd_debug.h        |  5 ++
>   .../drm/amd/amdkfd/kfd_device_queue_manager.c | 51 +++++++++++++++++++
>   .../drm/amd/amdkfd/kfd_device_queue_manager.h |  3 ++
>   .../drm/amd/amdkfd/kfd_packet_manager_v9.c    | 14 +++++
>   drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |  9 ++++
>   drivers/gpu/drm/amd/amdkfd/kfd_process.c      |  5 ++
>   6 files changed, 87 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
> index a8abfe2a0a14..db6d72e7930f 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
> @@ -29,4 +29,9 @@ int kfd_dbg_trap_disable(struct kfd_process *target);
>   int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
>   			void __user *runtime_info,
>   			uint32_t *runtime_info_size);
> +static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev)
> +{
> +	return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2);

This needs to be updated to include 9.4.3 as well. Is that coming in a 
different patch? Other than that, this patch is

Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>


> +}
> +
>   #endif
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index c8519adc89ac..badfe1210bc4 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -36,6 +36,7 @@
>   #include "kfd_kernel_queue.h"
>   #include "amdgpu_amdkfd.h"
>   #include "mes_api_def.h"
> +#include "kfd_debug.h"
>   
>   /* Size of the per-pipe EOP queue */
>   #define CIK_HPD_EOP_BYTES_LOG2 11
> @@ -2593,6 +2594,56 @@ int release_debug_trap_vmid(struct device_queue_manager *dqm,
>   	return r;
>   }
>   
> +int debug_lock_and_unmap(struct device_queue_manager *dqm)
> +{
> +	int r;
> +
> +	if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
> +		pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
> +		return -EINVAL;
> +	}
> +
> +	if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
> +		return 0;
> +
> +	dqm_lock(dqm);
> +
> +	r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false);
> +	if (r)
> +		dqm_unlock(dqm);
> +
> +	return r;
> +}
> +
> +int debug_map_and_unlock(struct device_queue_manager *dqm)
> +{
> +	int r;
> +
> +	if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
> +		pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
> +		return -EINVAL;
> +	}
> +
> +	if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
> +		return 0;
> +
> +	r = map_queues_cpsch(dqm);
> +
> +	dqm_unlock(dqm);
> +
> +	return r;
> +}
> +
> +int debug_refresh_runlist(struct device_queue_manager *dqm)
> +{
> +	int r = debug_lock_and_unmap(dqm);
> +
> +	if (r)
> +		return r;
> +
> +	return debug_map_and_unlock(dqm);
> +}
> +
>   #if defined(CONFIG_DEBUG_FS)
>   
>   static void seq_reg_dump(struct seq_file *m,
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> index bf7aa3f84182..bb75d93712eb 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> @@ -290,6 +290,9 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
>   			struct qcm_process_device *qpd);
>   int release_debug_trap_vmid(struct device_queue_manager *dqm,
>   			struct qcm_process_device *qpd);
> +int debug_lock_and_unmap(struct device_queue_manager *dqm);
> +int debug_map_and_unlock(struct device_queue_manager *dqm);
> +int debug_refresh_runlist(struct device_queue_manager *dqm);
>   
>   static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
>   {
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
> index 0fe73dbd28af..29a2d0499b67 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
> @@ -88,6 +88,10 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
>   {
>   	struct pm4_mes_map_process_aldebaran *packet;
>   	uint64_t vm_page_table_base_addr = qpd->page_table_base;
> +	struct kfd_dev *kfd = pm->dqm->dev->kfd;
> +	struct kfd_process_device *pdd =
> +			container_of(qpd, struct kfd_process_device, qpd);
> +	int i;
>   
>   	packet = (struct pm4_mes_map_process_aldebaran *)buffer;
>   	memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
> @@ -102,6 +106,16 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
>   	packet->bitfields14.num_oac = qpd->num_oac;
>   	packet->bitfields14.sdma_enable = 1;
>   	packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
> +	packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override |
> +						pdd->spi_dbg_launch_mode;
> +
> +	if (pdd->process->debug_trap_enabled) {
> +		for (i = 0; i < kfd->device_info.num_of_watch_points; i++)
> +			packet->tcp_watch_cntl[i] = pdd->watch_points[i];
> +
> +		packet->bitfields2.single_memops =
> +				!!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP);
> +	}
>   
>   	packet->sh_mem_config = qpd->sh_mem_config;
>   	packet->sh_mem_bases = qpd->sh_mem_bases;
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> index 4c912b7735b5..8fca7175daab 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> @@ -816,6 +816,12 @@ struct kfd_process_device {
>   	uint64_t faults;
>   	uint64_t page_in;
>   	uint64_t page_out;
> +
> +	/* Tracks debug per-vmid request settings */
> +	uint32_t spi_dbg_override;
> +	uint32_t spi_dbg_launch_mode;
> +	uint32_t watch_points[4];
> +
>   	/*
>   	 * If this process has been checkpointed before, then the user
>   	 * application will use the original gpu_id on the
> @@ -952,6 +958,9 @@ struct kfd_process {
>   
>   	bool xnack_enabled;
>   
> +	/* Tracks debug per-vmid request for debug flags */
> +	bool dbg_flags;
> +
>   	atomic_t poison;
>   	/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
>   	bool queues_paused;
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> index d63a764dafb9..0281f79fcd7d 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> @@ -1612,6 +1612,11 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
>   	}
>   
>   	p->pdds[p->n_pdds++] = pdd;
> +	if (kfd_dbg_is_per_vmid_supported(pdd->dev))
> +		pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
> +							pdd->dev->adev,
> +							false,
> +							0);
>   
>   	/* Init idr used for memory handle translation */
>   	idr_init(&pdd->alloc_idr);
Kim, Jonathan May 30, 2023, 7:58 p.m. UTC | #2
[Public]

> -----Original Message-----
> From: Kuehling, Felix <Felix.Kuehling@amd.com>
> Sent: Tuesday, May 30, 2023 3:56 PM
> To: Kim, Jonathan <Jonathan.Kim@amd.com>; amd-
> gfx@lists.freedesktop.org; dri-devel@lists.freedesktop.org
> Cc: Huang, JinHuiEric <JinHuiEric.Huang@amd.com>
> Subject: Re: [PATCH 14/33] drm/amdgpu: prepare map process for multi-
> process debug devices
>
> Am 2023-05-25 um 13:27 schrieb Jonathan Kim:
> > Unlike single process debug devices, multi-process debug devices allow
> > debug mode setting per-VMID (non-device-global).
> >
> > Because the HWS manages PASID-VMID mapping, the new MAP_PROCESS
> API allows
> > the KFD to forward the required SPI debug register write requests.
> >
> > To request a new debug mode setting change, the KFD must be able to
> > preempt all queues then remap all queues with these new setting
> > requests for MAP_PROCESS to take effect.
> >
> > Note that by default, trap enablement in non-debug mode must be
> disabled
> > for performance reasons for multi-process debug devices due to setup
> > overhead in FW.
> >
> > v2: spot fixup new kfd_node references
> >
> > Signed-off-by: Jonathan Kim <jonathan.kim@amd.com>
> > ---
> >   drivers/gpu/drm/amd/amdkfd/kfd_debug.h        |  5 ++
> >   .../drm/amd/amdkfd/kfd_device_queue_manager.c | 51
> +++++++++++++++++++
> >   .../drm/amd/amdkfd/kfd_device_queue_manager.h |  3 ++
> >   .../drm/amd/amdkfd/kfd_packet_manager_v9.c    | 14 +++++
> >   drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |  9 ++++
> >   drivers/gpu/drm/amd/amdkfd/kfd_process.c      |  5 ++
> >   6 files changed, 87 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
> b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
> > index a8abfe2a0a14..db6d72e7930f 100644
> > --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
> > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
> > @@ -29,4 +29,9 @@ int kfd_dbg_trap_disable(struct kfd_process *target);
> >   int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
> >                     void __user *runtime_info,
> >                     uint32_t *runtime_info_size);
> > +static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev)
> > +{
> > +   return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2);
>
> This needs to be updated to include 9.4.3 as well. Is that coming in a
> different patch? Other than that, this patch is

That's correct.  This series does not enable the debugger for GFX9.4.3.
This will be a follow-up series that Eric will provide.

Thanks.

Jon

>
> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
>
>
> > +}
> > +
> >   #endif
> > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> > index c8519adc89ac..badfe1210bc4 100644
> > --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> > @@ -36,6 +36,7 @@
> >   #include "kfd_kernel_queue.h"
> >   #include "amdgpu_amdkfd.h"
> >   #include "mes_api_def.h"
> > +#include "kfd_debug.h"
> >
> >   /* Size of the per-pipe EOP queue */
> >   #define CIK_HPD_EOP_BYTES_LOG2 11
> > @@ -2593,6 +2594,56 @@ int release_debug_trap_vmid(struct
> device_queue_manager *dqm,
> >     return r;
> >   }
> >
> > +int debug_lock_and_unmap(struct device_queue_manager *dqm)
> > +{
> > +   int r;
> > +
> > +   if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
> > +           pr_err("Unsupported on sched_policy: %i\n", dqm-
> >sched_policy);
> > +           return -EINVAL;
> > +   }
> > +
> > +   if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
> > +           return 0;
> > +
> > +   dqm_lock(dqm);
> > +
> > +   r = unmap_queues_cpsch(dqm,
> KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false);
> > +   if (r)
> > +           dqm_unlock(dqm);
> > +
> > +   return r;
> > +}
> > +
> > +int debug_map_and_unlock(struct device_queue_manager *dqm)
> > +{
> > +   int r;
> > +
> > +   if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
> > +           pr_err("Unsupported on sched_policy: %i\n", dqm-
> >sched_policy);
> > +           return -EINVAL;
> > +   }
> > +
> > +   if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
> > +           return 0;
> > +
> > +   r = map_queues_cpsch(dqm);
> > +
> > +   dqm_unlock(dqm);
> > +
> > +   return r;
> > +}
> > +
> > +int debug_refresh_runlist(struct device_queue_manager *dqm)
> > +{
> > +   int r = debug_lock_and_unmap(dqm);
> > +
> > +   if (r)
> > +           return r;
> > +
> > +   return debug_map_and_unlock(dqm);
> > +}
> > +
> >   #if defined(CONFIG_DEBUG_FS)
> >
> >   static void seq_reg_dump(struct seq_file *m,
> > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> > index bf7aa3f84182..bb75d93712eb 100644
> > --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> > @@ -290,6 +290,9 @@ int reserve_debug_trap_vmid(struct
> device_queue_manager *dqm,
> >                     struct qcm_process_device *qpd);
> >   int release_debug_trap_vmid(struct device_queue_manager *dqm,
> >                     struct qcm_process_device *qpd);
> > +int debug_lock_and_unmap(struct device_queue_manager *dqm);
> > +int debug_map_and_unlock(struct device_queue_manager *dqm);
> > +int debug_refresh_runlist(struct device_queue_manager *dqm);
> >
> >   static inline unsigned int get_sh_mem_bases_32(struct
> kfd_process_device *pdd)
> >   {
> > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
> b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
> > index 0fe73dbd28af..29a2d0499b67 100644
> > --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
> > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
> > @@ -88,6 +88,10 @@ static int pm_map_process_aldebaran(struct
> packet_manager *pm,
> >   {
> >     struct pm4_mes_map_process_aldebaran *packet;
> >     uint64_t vm_page_table_base_addr = qpd->page_table_base;
> > +   struct kfd_dev *kfd = pm->dqm->dev->kfd;
> > +   struct kfd_process_device *pdd =
> > +                   container_of(qpd, struct kfd_process_device, qpd);
> > +   int i;
> >
> >     packet = (struct pm4_mes_map_process_aldebaran *)buffer;
> >     memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
> > @@ -102,6 +106,16 @@ static int pm_map_process_aldebaran(struct
> packet_manager *pm,
> >     packet->bitfields14.num_oac = qpd->num_oac;
> >     packet->bitfields14.sdma_enable = 1;
> >     packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd-
> >queue_count;
> > +   packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override |
> > +                                           pdd->spi_dbg_launch_mode;
> > +
> > +   if (pdd->process->debug_trap_enabled) {
> > +           for (i = 0; i < kfd->device_info.num_of_watch_points; i++)
> > +                   packet->tcp_watch_cntl[i] = pdd->watch_points[i];
> > +
> > +           packet->bitfields2.single_memops =
> > +                           !!(pdd->process->dbg_flags &
> KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP);
> > +   }
> >
> >     packet->sh_mem_config = qpd->sh_mem_config;
> >     packet->sh_mem_bases = qpd->sh_mem_bases;
> > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> > index 4c912b7735b5..8fca7175daab 100644
> > --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> > @@ -816,6 +816,12 @@ struct kfd_process_device {
> >     uint64_t faults;
> >     uint64_t page_in;
> >     uint64_t page_out;
> > +
> > +   /* Tracks debug per-vmid request settings */
> > +   uint32_t spi_dbg_override;
> > +   uint32_t spi_dbg_launch_mode;
> > +   uint32_t watch_points[4];
> > +
> >     /*
> >      * If this process has been checkpointed before, then the user
> >      * application will use the original gpu_id on the
> > @@ -952,6 +958,9 @@ struct kfd_process {
> >
> >     bool xnack_enabled;
> >
> > +   /* Tracks debug per-vmid request for debug flags */
> > +   bool dbg_flags;
> > +
> >     atomic_t poison;
> >     /* Queues are in paused stated because we are in the process of
> doing a CRIU checkpoint */
> >     bool queues_paused;
> > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> > index d63a764dafb9..0281f79fcd7d 100644
> > --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> > @@ -1612,6 +1612,11 @@ struct kfd_process_device
> *kfd_create_process_device_data(struct kfd_node *dev,
> >     }
> >
> >     p->pdds[p->n_pdds++] = pdd;
> > +   if (kfd_dbg_is_per_vmid_supported(pdd->dev))
> > +           pdd->spi_dbg_override = pdd->dev->kfd2kgd-
> >disable_debug_trap(
> > +                                                   pdd->dev->adev,
> > +                                                   false,
> > +                                                   0);
> >
> >     /* Init idr used for memory handle translation */
> >     idr_init(&pdd->alloc_idr);
diff mbox series

Patch

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
index a8abfe2a0a14..db6d72e7930f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
@@ -29,4 +29,9 @@  int kfd_dbg_trap_disable(struct kfd_process *target);
 int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd,
 			void __user *runtime_info,
 			uint32_t *runtime_info_size);
+static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev)
+{
+	return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2);
+}
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c8519adc89ac..badfe1210bc4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -36,6 +36,7 @@ 
 #include "kfd_kernel_queue.h"
 #include "amdgpu_amdkfd.h"
 #include "mes_api_def.h"
+#include "kfd_debug.h"
 
 /* Size of the per-pipe EOP queue */
 #define CIK_HPD_EOP_BYTES_LOG2 11
@@ -2593,6 +2594,56 @@  int release_debug_trap_vmid(struct device_queue_manager *dqm,
 	return r;
 }
 
+int debug_lock_and_unmap(struct device_queue_manager *dqm)
+{
+	int r;
+
+	if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+		pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+		return -EINVAL;
+	}
+
+	if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
+		return 0;
+
+	dqm_lock(dqm);
+
+	r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false);
+	if (r)
+		dqm_unlock(dqm);
+
+	return r;
+}
+
+int debug_map_and_unlock(struct device_queue_manager *dqm)
+{
+	int r;
+
+	if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+		pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
+		return -EINVAL;
+	}
+
+	if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
+		return 0;
+
+	r = map_queues_cpsch(dqm);
+
+	dqm_unlock(dqm);
+
+	return r;
+}
+
+int debug_refresh_runlist(struct device_queue_manager *dqm)
+{
+	int r = debug_lock_and_unmap(dqm);
+
+	if (r)
+		return r;
+
+	return debug_map_and_unlock(dqm);
+}
+
 #if defined(CONFIG_DEBUG_FS)
 
 static void seq_reg_dump(struct seq_file *m,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index bf7aa3f84182..bb75d93712eb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -290,6 +290,9 @@  int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
 			struct qcm_process_device *qpd);
 int release_debug_trap_vmid(struct device_queue_manager *dqm,
 			struct qcm_process_device *qpd);
+int debug_lock_and_unmap(struct device_queue_manager *dqm);
+int debug_map_and_unlock(struct device_queue_manager *dqm);
+int debug_refresh_runlist(struct device_queue_manager *dqm);
 
 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
 {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 0fe73dbd28af..29a2d0499b67 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -88,6 +88,10 @@  static int pm_map_process_aldebaran(struct packet_manager *pm,
 {
 	struct pm4_mes_map_process_aldebaran *packet;
 	uint64_t vm_page_table_base_addr = qpd->page_table_base;
+	struct kfd_dev *kfd = pm->dqm->dev->kfd;
+	struct kfd_process_device *pdd =
+			container_of(qpd, struct kfd_process_device, qpd);
+	int i;
 
 	packet = (struct pm4_mes_map_process_aldebaran *)buffer;
 	memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
@@ -102,6 +106,16 @@  static int pm_map_process_aldebaran(struct packet_manager *pm,
 	packet->bitfields14.num_oac = qpd->num_oac;
 	packet->bitfields14.sdma_enable = 1;
 	packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
+	packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override |
+						pdd->spi_dbg_launch_mode;
+
+	if (pdd->process->debug_trap_enabled) {
+		for (i = 0; i < kfd->device_info.num_of_watch_points; i++)
+			packet->tcp_watch_cntl[i] = pdd->watch_points[i];
+
+		packet->bitfields2.single_memops =
+				!!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP);
+	}
 
 	packet->sh_mem_config = qpd->sh_mem_config;
 	packet->sh_mem_bases = qpd->sh_mem_bases;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 4c912b7735b5..8fca7175daab 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -816,6 +816,12 @@  struct kfd_process_device {
 	uint64_t faults;
 	uint64_t page_in;
 	uint64_t page_out;
+
+	/* Tracks debug per-vmid request settings */
+	uint32_t spi_dbg_override;
+	uint32_t spi_dbg_launch_mode;
+	uint32_t watch_points[4];
+
 	/*
 	 * If this process has been checkpointed before, then the user
 	 * application will use the original gpu_id on the
@@ -952,6 +958,9 @@  struct kfd_process {
 
 	bool xnack_enabled;
 
+	/* Tracks debug per-vmid request for debug flags */
+	bool dbg_flags;
+
 	atomic_t poison;
 	/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
 	bool queues_paused;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d63a764dafb9..0281f79fcd7d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1612,6 +1612,11 @@  struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
 	}
 
 	p->pdds[p->n_pdds++] = pdd;
+	if (kfd_dbg_is_per_vmid_supported(pdd->dev))
+		pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
+							pdd->dev->adev,
+							false,
+							0);
 
 	/* Init idr used for memory handle translation */
 	idr_init(&pdd->alloc_idr);