diff mbox series

[RFC,v2,1/2] drm/imagination: Add initial Imagination Technologies PowerVR driver

Message ID 20230413103419.293493-2-sarah.walker@imgtec.com (mailing list archive)
State New, archived
Headers show
Series Imagination Technologies PowerVR DRM driver | expand

Commit Message

Sarah Walker April 13, 2023, 10:34 a.m. UTC
---
 drivers/gpu/drm/imagination/Kconfig           |   11 +
 drivers/gpu/drm/imagination/Makefile          |   36 +
 drivers/gpu/drm/imagination/pvr_ccb.c         |  380 +
 drivers/gpu/drm/imagination/pvr_ccb.h         |   51 +
 drivers/gpu/drm/imagination/pvr_cccb.c        |  390 ++
 drivers/gpu/drm/imagination/pvr_cccb.h        |  112 +
 drivers/gpu/drm/imagination/pvr_context.c     | 1428 ++++
 drivers/gpu/drm/imagination/pvr_context.h     |  412 ++
 drivers/gpu/drm/imagination/pvr_debugfs.c     |   53 +
 drivers/gpu/drm/imagination/pvr_debugfs.h     |   29 +
 drivers/gpu/drm/imagination/pvr_device.c      |  762 ++
 drivers/gpu/drm/imagination/pvr_device.h      |  760 ++
 drivers/gpu/drm/imagination/pvr_device_info.c |  223 +
 drivers/gpu/drm/imagination/pvr_device_info.h |  133 +
 drivers/gpu/drm/imagination/pvr_drv.c         | 1634 +++++
 drivers/gpu/drm/imagination/pvr_drv.h         |   89 +
 drivers/gpu/drm/imagination/pvr_dump.c        |  353 +
 drivers/gpu/drm/imagination/pvr_dump.h        |   17 +
 drivers/gpu/drm/imagination/pvr_free_list.c   |  559 ++
 drivers/gpu/drm/imagination/pvr_free_list.h   |  185 +
 drivers/gpu/drm/imagination/pvr_fw.c          | 1107 +++
 drivers/gpu/drm/imagination/pvr_fw.h          |  345 +
 drivers/gpu/drm/imagination/pvr_fw_info.h     |  115 +
 drivers/gpu/drm/imagination/pvr_fw_meta.c     |  598 ++
 drivers/gpu/drm/imagination/pvr_fw_meta.h     |   14 +
 drivers/gpu/drm/imagination/pvr_fw_mips.c     |  276 +
 drivers/gpu/drm/imagination/pvr_fw_mips.h     |   38 +
 .../gpu/drm/imagination/pvr_fw_startstop.c    |  279 +
 .../gpu/drm/imagination/pvr_fw_startstop.h    |   13 +
 drivers/gpu/drm/imagination/pvr_fw_trace.c    |  505 ++
 drivers/gpu/drm/imagination/pvr_fw_trace.h    |   78 +
 drivers/gpu/drm/imagination/pvr_gem.c         | 1122 +++
 drivers/gpu/drm/imagination/pvr_gem.h         |  386 +
 drivers/gpu/drm/imagination/pvr_hwrt.c        |  551 ++
 drivers/gpu/drm/imagination/pvr_hwrt.h        |  163 +
 drivers/gpu/drm/imagination/pvr_job.c         | 1096 +++
 drivers/gpu/drm/imagination/pvr_job.h         |  116 +
 drivers/gpu/drm/imagination/pvr_params.c      |  147 +
 drivers/gpu/drm/imagination/pvr_params.h      |   72 +
 drivers/gpu/drm/imagination/pvr_power.c       |  196 +
 drivers/gpu/drm/imagination/pvr_power.h       |   37 +
 .../gpu/drm/imagination/pvr_rogue_cr_defs.h   | 6193 +++++++++++++++++
 .../imagination/pvr_rogue_cr_defs_client.h    |  160 +
 drivers/gpu/drm/imagination/pvr_rogue_defs.h  |  179 +
 drivers/gpu/drm/imagination/pvr_rogue_fwif.h  | 2271 ++++++
 .../drm/imagination/pvr_rogue_fwif_check.h    |  491 ++
 .../drm/imagination/pvr_rogue_fwif_client.h   |  369 +
 .../imagination/pvr_rogue_fwif_client_check.h |  133 +
 .../drm/imagination/pvr_rogue_fwif_common.h   |   60 +
 .../pvr_rogue_fwif_resetframework.h           |   29 +
 .../gpu/drm/imagination/pvr_rogue_fwif_sf.h   |  890 +++
 .../drm/imagination/pvr_rogue_fwif_shared.h   |  258 +
 .../imagination/pvr_rogue_fwif_shared_check.h |  107 +
 .../drm/imagination/pvr_rogue_fwif_stream.h   |   78 +
 .../drm/imagination/pvr_rogue_heap_config.h   |  113 +
 drivers/gpu/drm/imagination/pvr_rogue_meta.h  |  356 +
 drivers/gpu/drm/imagination/pvr_rogue_mips.h  |  335 +
 .../drm/imagination/pvr_rogue_mips_check.h    |   56 +
 .../gpu/drm/imagination/pvr_rogue_mmu_defs.h  |  136 +
 drivers/gpu/drm/imagination/pvr_stream.c      |  321 +
 drivers/gpu/drm/imagination/pvr_stream.h      |   74 +
 drivers/gpu/drm/imagination/pvr_stream_defs.c |  270 +
 drivers/gpu/drm/imagination/pvr_stream_defs.h |   14 +
 drivers/gpu/drm/imagination/pvr_vendor.h      |   77 +
 drivers/gpu/drm/imagination/pvr_vm.c          | 3811 ++++++++++
 drivers/gpu/drm/imagination/pvr_vm.h          |   99 +
 drivers/gpu/drm/imagination/pvr_vm_mips.c     |  223 +
 drivers/gpu/drm/imagination/pvr_vm_mips.h     |   22 +
 .../gpu/drm/imagination/vendor/pvr_mt8173.c   |  121 +
 include/uapi/drm/pvr_drm.h                    | 1502 ++++
 70 files changed, 33619 insertions(+)
 create mode 100644 drivers/gpu/drm/imagination/Kconfig
 create mode 100644 drivers/gpu/drm/imagination/Makefile
 create mode 100644 drivers/gpu/drm/imagination/pvr_ccb.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_ccb.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_cccb.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_cccb.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_context.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_context.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_debugfs.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_debugfs.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_device.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_device.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_device_info.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_device_info.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_drv.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_drv.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_dump.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_dump.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_free_list.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_free_list.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_fw.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_fw.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_fw_info.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_fw_meta.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_fw_meta.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_fw_mips.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_fw_mips.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_fw_startstop.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_fw_startstop.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_fw_trace.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_fw_trace.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_gem.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_gem.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_hwrt.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_hwrt.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_job.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_job.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_params.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_params.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_power.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_power.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_defs.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_heap_config.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_meta.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_mips.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_mips_check.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_stream.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_stream.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_stream_defs.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_stream_defs.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_vendor.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_vm.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_vm.h
 create mode 100644 drivers/gpu/drm/imagination/pvr_vm_mips.c
 create mode 100644 drivers/gpu/drm/imagination/pvr_vm_mips.h
 create mode 100644 drivers/gpu/drm/imagination/vendor/pvr_mt8173.c
 create mode 100644 include/uapi/drm/pvr_drm.h
diff mbox series

Patch

diff --git a/drivers/gpu/drm/imagination/Kconfig b/drivers/gpu/drm/imagination/Kconfig
new file mode 100644
index 000000000000..83248822d0ed
--- /dev/null
+++ b/drivers/gpu/drm/imagination/Kconfig
@@ -0,0 +1,11 @@ 
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+# Copyright (c) 2022 Imagination Technologies Ltd.
+
+config DRM_POWERVR
+	tristate "Imagination Technologies PowerVR Graphics"
+	depends on ARM64
+	depends on DRM
+	select FW_LOADER
+	help
+	  Choose this option if you have a system that has an Imagination
+	  Technologies PowerVR Rogue GPU.
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
new file mode 100644
index 000000000000..f3dc71828187
--- /dev/null
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -0,0 +1,36 @@ 
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+# Copyright (c) 2022 Imagination Technologies Ltd.
+
+subdir-ccflags-y := -I$(srctree)/$(src)
+
+powervr-y := \
+	pvr_ccb.o \
+	pvr_cccb.o \
+	pvr_context.o \
+	pvr_device.o \
+	pvr_device_info.o \
+	pvr_drv.o \
+	pvr_dump.o \
+	pvr_free_list.o \
+	pvr_fw.o \
+	pvr_fw_meta.o \
+	pvr_fw_mips.o \
+	pvr_fw_startstop.o \
+	pvr_fw_trace.o \
+	pvr_gem.o \
+	pvr_hwrt.o \
+	pvr_job.o \
+	pvr_params.o \
+	pvr_power.o \
+	pvr_stream.o \
+	pvr_stream_defs.o \
+	pvr_vm.o \
+	pvr_vm_mips.o
+
+powervr-y += \
+	vendor/pvr_mt8173.o
+
+powervr-$(CONFIG_DEBUG_FS) += \
+	pvr_debugfs.o
+
+obj-$(CONFIG_DRM_POWERVR) += powervr.o
diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c
new file mode 100644
index 000000000000..85eae4bc4d72
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_ccb.c
@@ -0,0 +1,380 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_ccb.h"
+#include "pvr_device.h"
+#include "pvr_dump.h"
+#include "pvr_free_list.h"
+#include "pvr_fw.h"
+#include "pvr_gem.h"
+#include "pvr_power.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#define ACQUIRE_SLOT_TIMEOUT (1 * HZ) /* 1s */
+
+/**
+ * pvr_ccb_init() - Initialise a CCB
+ * @pvr_dev: Device pointer.
+ * @pvr_ccb: Pointer to CCB structure to initialise.
+ * @num_cmds_log2: Log2 of number of commands in this CCB.
+ * @cmd_size: Command size for this CCB.
+ *
+ * Return:
+ *  * Zero on success, or
+ *  * Any error code returned by pvr_gem_create_and_map_fw_object().
+ */
+static int
+pvr_ccb_init(struct pvr_device *pvr_dev, struct pvr_ccb *pvr_ccb,
+	     u32 num_cmds_log2, size_t cmd_size)
+{
+	u32 num_cmds = 1 << num_cmds_log2;
+	u32 ccb_size = num_cmds * cmd_size;
+	int err;
+
+	mutex_init(&pvr_ccb->lock);
+
+	/*
+	 * Map CCB and control structure as uncached, so we don't have to flush
+	 * CPU cache repeatedly when polling for space.
+	 */
+	pvr_ccb->ctrl = pvr_gem_create_and_map_fw_object(pvr_dev, sizeof(*pvr_ccb->ctrl),
+							 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+							 &pvr_ccb->ctrl_obj);
+	if (IS_ERR(pvr_ccb->ctrl)) {
+		err = PTR_ERR(pvr_ccb->ctrl);
+		goto err_out;
+	}
+
+	pvr_ccb->ccb = pvr_gem_create_and_map_fw_object(pvr_dev, ccb_size,
+							PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							DRM_PVR_BO_CREATE_ZEROED,
+							&pvr_ccb->ccb_obj);
+	if (IS_ERR(pvr_ccb->ccb)) {
+		err = PTR_ERR(pvr_ccb->ccb);
+		goto err_free_ctrl;
+	}
+
+	pvr_gem_get_fw_addr(pvr_ccb->ctrl_obj, &pvr_ccb->ctrl_fw_addr);
+	pvr_gem_get_fw_addr(pvr_ccb->ccb_obj, &pvr_ccb->ccb_fw_addr);
+
+	pvr_ccb->ctrl->write_offset = 0;
+	pvr_ccb->ctrl->read_offset = 0;
+	pvr_ccb->ctrl->wrap_mask = num_cmds - 1;
+	pvr_ccb->ctrl->cmd_size = cmd_size;
+
+	return 0;
+
+err_free_ctrl:
+	pvr_fw_object_vunmap(pvr_ccb->ctrl_obj, false);
+	pvr_fw_object_release(pvr_ccb->ctrl_obj);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_ccb_fini() - Release CCB structure
+ * @pvr_ccb: CCB to release.
+ */
+void
+pvr_ccb_fini(struct pvr_ccb *pvr_ccb)
+{
+	pvr_fw_object_vunmap(pvr_ccb->ccb_obj, false);
+	pvr_fw_object_release(pvr_ccb->ccb_obj);
+
+	pvr_fw_object_vunmap(pvr_ccb->ctrl_obj, false);
+	pvr_fw_object_release(pvr_ccb->ctrl_obj);
+}
+
+/**
+ * pvr_ccb_slot_available_locked() - Test whether any slots are available in CCB
+ * @pvr_ccb: CCB to test.
+ * @write_offset: Address to store number of next available slot. May be %NULL.
+ *
+ * Caller must hold @pvr_ccb->lock.
+ *
+ * Return:
+ *  * %true if a slot is available, or
+ *  * %false if no slot is available.
+ */
+static __always_inline bool
+pvr_ccb_slot_available_locked(struct pvr_ccb *pvr_ccb, u32 *write_offset)
+{
+	struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
+	u32 next_write_offset = (ctrl->write_offset + 1) & ctrl->wrap_mask;
+
+	lockdep_assert_held(&pvr_ccb->lock);
+
+	if (ctrl->read_offset != next_write_offset) {
+		if (write_offset)
+			*write_offset = next_write_offset;
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * pvr_ccb_acquire_slot_locked() - Acquire slot in CCB
+ * @pvr_ccb: CCB to acquire slot in.
+ * @write_offset: Address to store acquired slot number.
+ *
+ * Caller must hold @pvr_ccb->lock.
+ *
+ * Return:
+ *  * Zero on success, or
+ *  * -EBUSY if function times out waiting for a slot.
+ */
+static int
+pvr_ccb_acquire_slot_locked(struct pvr_ccb *pvr_ccb, u32 *write_offset)
+{
+	unsigned long start_timestamp = jiffies;
+
+	lockdep_assert_held(&pvr_ccb->lock);
+
+	while ((jiffies - start_timestamp) < ACQUIRE_SLOT_TIMEOUT) {
+		if (pvr_ccb_slot_available_locked(pvr_ccb, write_offset))
+			return 0;
+		usleep_range(1, 50);
+	}
+
+	return -EBUSY;
+}
+
+static void
+process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *cmd)
+{
+	switch (cmd->cmd_type) {
+	case ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART:
+		pvr_power_lock(pvr_dev);
+
+		/* Stop FW. */
+		WARN_ON(pvr_power_set_state(pvr_dev, PVR_POWER_STATE_OFF));
+
+		/* Clear the FW faulted flags. */
+		pvr_dev->fw_dev.fwif_sysdata->hwr_state_flags &= ~(ROGUE_FWIF_HWR_FW_FAULT |
+								  ROGUE_FWIF_HWR_RESTART_REQUESTED);
+
+		/* Start FW again. */
+		WARN_ON(pvr_power_set_state(pvr_dev, PVR_POWER_STATE_ON));
+
+		pvr_power_unlock(pvr_dev);
+		break;
+
+	case ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION: {
+		struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data *data =
+			&cmd->cmd_data.cmd_freelists_reconstruction;
+		struct rogue_fwif_kccb_cmd resp_cmd;
+		struct rogue_fwif_freelists_reconstruction_data *resp_data =
+			&resp_cmd.cmd_data.free_lists_reconstruction_data;
+		u32 i;
+
+		for (i = 0; i < data->freelist_count; i++)
+			pvr_free_list_reconstruct(pvr_dev, data->freelist_ids[i]);
+
+		resp_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE;
+		resp_cmd.kccb_flags = 0;
+		resp_data->freelist_count = data->freelist_count;
+
+		memcpy(resp_data->freelist_ids, data->freelist_ids,
+		       data->freelist_count * sizeof(resp_data->freelist_ids[0]));
+
+		WARN_ON(pvr_kccb_send_cmd(pvr_dev, &resp_cmd, NULL));
+		break;
+	}
+
+	case ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION:
+		pvr_context_reset_notification(pvr_dev,
+					       &cmd->cmd_data.cmd_context_reset_notification);
+		break;
+
+	default:
+		drm_info(from_pvr_device(pvr_dev), "Received unknown FWCCB command %x\n",
+			 cmd->cmd_type);
+		break;
+	}
+}
+
+/**
+ * pvr_fwccb_process_worker() - Process any pending FWCCB commands
+ * @work: Work item.
+ *
+ * For this initial implementation, FWCCB commands will be printed to the console but otherwise not
+ * processed.
+ */
+static void
+pvr_fwccb_process_worker(struct work_struct *work)
+{
+	struct pvr_device *pvr_dev = container_of(work, struct pvr_device, fwccb_work);
+	struct rogue_fwif_fwccb_cmd *fwccb = pvr_dev->fwccb.ccb;
+	struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->fwccb.ctrl;
+
+	mutex_lock(&pvr_dev->fwccb.lock);
+
+	while (ctrl->read_offset != ctrl->write_offset) {
+		struct rogue_fwif_fwccb_cmd cmd = fwccb[ctrl->read_offset];
+
+		ctrl->read_offset = (ctrl->read_offset + 1) & ctrl->wrap_mask;
+
+		/* Drop FWCCB lock while we process command. */
+		mutex_unlock(&pvr_dev->fwccb.lock);
+
+		process_fwccb_command(pvr_dev, &cmd);
+
+		mutex_lock(&pvr_dev->fwccb.lock);
+	}
+
+	mutex_unlock(&pvr_dev->fwccb.lock);
+}
+
+/**
+ * pvr_kccb_send_cmd_power_locked() - Send command to the KCCB, with the power lock held
+ * @pvr_dev: Device pointer.
+ * @cmd: Command to sent.
+ * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
+ *
+ * Returns:
+ *  * Zero on success, or
+ *  * -EBUSY if timeout while waiting for a free KCCB slot.
+ */
+int
+pvr_kccb_send_cmd_power_locked(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
+			       u32 *kccb_slot)
+{
+	struct pvr_ccb *pvr_ccb = &pvr_dev->kccb;
+	struct rogue_fwif_kccb_cmd *kccb = pvr_ccb->ccb;
+	struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl;
+	u32 old_write_offset;
+	u32 new_write_offset;
+	int err;
+
+	lockdep_assert_held(&pvr_dev->power_lock);
+	WARN_ON(pvr_dev->power_state != PVR_POWER_STATE_ON);
+
+	mutex_lock(&pvr_ccb->lock);
+
+	old_write_offset = ctrl->write_offset;
+
+	err = pvr_ccb_acquire_slot_locked(pvr_ccb, &new_write_offset);
+	if (err)
+		goto err_unlock;
+
+	memcpy(&kccb[old_write_offset], cmd,
+	       sizeof(struct rogue_fwif_kccb_cmd));
+	if (kccb_slot) {
+		*kccb_slot = old_write_offset;
+		/* Clear return status for this slot. */
+		WRITE_ONCE(pvr_dev->kccb_rtn[old_write_offset],
+			   ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE);
+	}
+	mb(); /* memory barrier */
+	ctrl->write_offset = new_write_offset;
+
+	mutex_unlock(&pvr_ccb->lock);
+
+	/* Kick MTS */
+	pvr_fw_mts_schedule(pvr_dev,
+			    PVR_FWIF_DM_GP & ~ROGUE_CR_MTS_SCHEDULE_DM_CLRMSK);
+
+	return 0;
+
+err_unlock:
+	mutex_unlock(&pvr_ccb->lock);
+
+	return err;
+}
+
+/**
+ * pvr_kccb_send_cmd() - Send command to the KCCB
+ * @pvr_dev: Device pointer.
+ * @cmd: Command to sent.
+ * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL.
+ *
+ * Returns:
+ *  * Zero on success, or
+ *  * -EBUSY if timeout while waiting for a free KCCB slot.
+ */
+int
+pvr_kccb_send_cmd(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd,
+		  u32 *kccb_slot)
+{
+	int err;
+
+	pvr_power_lock(pvr_dev);
+
+	err = pvr_power_set_state(pvr_dev, PVR_POWER_STATE_ON);
+	if (err)
+		goto err_power_unlock;
+
+	err = pvr_kccb_send_cmd_power_locked(pvr_dev, cmd, kccb_slot);
+
+err_power_unlock:
+	pvr_power_unlock(pvr_dev);
+
+	return err;
+}
+
+/**
+ * pvr_kccb_wait_for_completion() - Wait for a KCCB command to complete
+ * @pvr_dev: Device pointer.
+ * @slot_nr: KCCB slot to wait on.
+ * @timeout: Timeout length (in jiffies).
+ * @rtn_out: Location to store KCCB command result. May be %NULL.
+ *
+ * Returns:
+ *  * Zero on success, or
+ *  * -ETIMEDOUT on timeout.
+ */
+int
+pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr,
+			     u32 timeout, u32 *rtn_out)
+{
+	int ret = wait_event_timeout(pvr_dev->kccb_rtn_q, READ_ONCE(pvr_dev->kccb_rtn[slot_nr]) &
+				     ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED, timeout);
+
+	if (ret && rtn_out)
+		*rtn_out = READ_ONCE(pvr_dev->kccb_rtn[slot_nr]);
+
+	return ret ? 0 : -ETIMEDOUT;
+}
+
+/**
+ * pvr_kccb_init() - Initialise device KCCB
+ * @pvr_dev: Target PowerVR device
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_ccb_init().
+ */
+int
+pvr_kccb_init(struct pvr_device *pvr_dev)
+{
+	return pvr_ccb_init(pvr_dev, &pvr_dev->kccb,
+			    ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT,
+			    sizeof(struct rogue_fwif_kccb_cmd));
+}
+
+/**
+ * pvr_fwccb_init() - Initialise device FWCCB
+ * @pvr_dev: Target PowerVR device
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_ccb_init().
+ */
+int
+pvr_fwccb_init(struct pvr_device *pvr_dev)
+{
+	INIT_WORK(&pvr_dev->fwccb_work, pvr_fwccb_process_worker);
+
+	return pvr_ccb_init(pvr_dev, &pvr_dev->fwccb,
+			    ROGUE_FWIF_FWCCB_NUMCMDS_LOG2,
+			    sizeof(struct rogue_fwif_fwccb_cmd));
+}
diff --git a/drivers/gpu/drm/imagination/pvr_ccb.h b/drivers/gpu/drm/imagination/pvr_ccb.h
new file mode 100644
index 000000000000..2b4bb522302e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_ccb.h
@@ -0,0 +1,51 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_CCB_H__
+#define __PVR_CCB_H__
+
+#include "pvr_rogue_fwif.h"
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+struct pvr_ccb {
+	/** @ctrl_obj: FW object representing CCB control structure. */
+	struct pvr_fw_object *ctrl_obj;
+	/** @ccb_obj: FW object representing CCB. */
+	struct pvr_fw_object *ccb_obj;
+
+	/** @ctrl_fw_addr: FW virtual address of CCB control structure. */
+	u32 ctrl_fw_addr;
+	/** @ccb_fw_addr: FW virtual address of CCB. */
+	u32 ccb_fw_addr;
+
+	/** @lock: Mutex protecting @ctrl and @ccb. */
+	struct mutex lock;
+	/**
+	 * @ctrl: Kernel mapping of CCB control structure. @lock must be held
+	 *        when accessing.
+	 */
+	struct rogue_fwif_ccb_ctl *ctrl;
+	/** @ccb: Kernel mapping of CCB. @lock must be held when accessing. */
+	void *ccb;
+};
+
+int pvr_kccb_init(struct pvr_device *pvr_dev);
+int pvr_fwccb_init(struct pvr_device *pvr_dev);
+void pvr_ccb_fini(struct pvr_ccb *ccb);
+
+int pvr_kccb_send_cmd(struct pvr_device *pvr_dev,
+		      struct rogue_fwif_kccb_cmd *cmd, u32 *kccb_slot);
+int pvr_kccb_send_cmd_power_locked(struct pvr_device *pvr_dev,
+				   struct rogue_fwif_kccb_cmd *cmd, u32 *kccb_slot);
+int pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr, u32 timeout,
+				 u32 *rtn_out);
+
+#endif /* __PVR_CCB_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_cccb.c b/drivers/gpu/drm/imagination/pvr_cccb.c
new file mode 100644
index 000000000000..d0975d1c6fcf
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_cccb.c
@@ -0,0 +1,390 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_ccb.h"
+#include "pvr_cccb.h"
+#include "pvr_device.h"
+#include "pvr_gem.h"
+#include "pvr_hwrt.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#define PADDING_COMMAND_SIZE sizeof(struct rogue_fwif_ccb_cmd_header)
+
+static __always_inline u32
+get_ccb_space(u32 w_off, u32 r_off, u32 ccb_size)
+{
+	return (((r_off) - (w_off)) + ((ccb_size) - 1)) & ((ccb_size) - 1);
+}
+
+/**
+ * pvr_cccb_init() - Initialise a Client CCB
+ * @pvr_dev: Device pointer.
+ * @pvr_cccb: Pointer to Client CCB structure to initialise.
+ * @size_log2: Log2 size of Client CCB in bytes.
+ * @name: Name of owner of Client CCB. Used for fence context.
+ *
+ * Return:
+ *  * Zero on success, or
+ *  * Any error code returned by pvr_gem_create_and_map_fw_object().
+ */
+int
+pvr_cccb_init(struct pvr_device *pvr_dev, struct pvr_cccb *pvr_cccb,
+	      u32 size_log2, const char *name)
+{
+	size_t size = 1 << size_log2;
+	int err;
+
+	mutex_init(&pvr_cccb->lock);
+
+	/*
+	 * Map CCCB and control structure as uncached, so we don't have to flush
+	 * CPU cache repeatedly when polling for space.
+	 */
+	pvr_cccb->ctrl = pvr_gem_create_and_map_fw_object(pvr_dev, sizeof(*pvr_cccb->ctrl),
+							  PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+							  &pvr_cccb->ctrl_obj);
+	if (IS_ERR(pvr_cccb->ctrl)) {
+		err = PTR_ERR(pvr_cccb->ctrl);
+		goto err_out;
+	}
+
+	pvr_cccb->cccb = pvr_gem_create_and_map_fw_object(pvr_dev, size,
+							  PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							  DRM_PVR_BO_CREATE_ZEROED,
+							  &pvr_cccb->cccb_obj);
+	if (IS_ERR(pvr_cccb->cccb)) {
+		err = PTR_ERR(pvr_cccb->cccb);
+		goto err_free_ctrl;
+	}
+
+	pvr_gem_get_fw_addr(pvr_cccb->ctrl_obj, &pvr_cccb->ctrl_fw_addr);
+	pvr_gem_get_fw_addr(pvr_cccb->cccb_obj, &pvr_cccb->cccb_fw_addr);
+
+	WRITE_ONCE(pvr_cccb->ctrl->write_offset, 0);
+	WRITE_ONCE(pvr_cccb->ctrl->read_offset, 0);
+	WRITE_ONCE(pvr_cccb->ctrl->dep_offset, 0);
+	WRITE_ONCE(pvr_cccb->ctrl->wrap_mask, size - 1);
+	pvr_cccb->size = size;
+	pvr_cccb->write_offset = 0;
+	pvr_cccb->wrap_mask = size - 1;
+
+	return 0;
+
+err_free_ctrl:
+	pvr_fw_object_vunmap(pvr_cccb->ctrl_obj, false);
+	pvr_fw_object_release(pvr_cccb->ctrl_obj);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_cccb_fini() - Release Client CCB structure
+ * @pvr_cccb: Client CCB to release.
+ */
+void
+pvr_cccb_fini(struct pvr_cccb *pvr_cccb)
+{
+	pvr_fw_object_vunmap(pvr_cccb->cccb_obj, false);
+	pvr_fw_object_release(pvr_cccb->cccb_obj);
+
+	pvr_fw_object_vunmap(pvr_cccb->ctrl_obj, false);
+	pvr_fw_object_release(pvr_cccb->ctrl_obj);
+}
+
+static void
+build_padding_command(void *cmd_ptr, u32 remaining)
+{
+	struct rogue_fwif_ccb_cmd_header *cmd = cmd_ptr;
+
+	WRITE_ONCE(cmd->cmd_type, ROGUE_FWIF_CCB_CMD_TYPE_PADDING);
+	WRITE_ONCE(cmd->cmd_size, remaining - sizeof(*cmd));
+}
+
+/**
+ * pvr_cccb_check_command_space_locked() - Check if a command sequence fits in the CCCB
+ * @pvr_cccb: Target Client CCB.
+ * @size: Size of the command sequence.
+ *
+ * Caller must hold @pvr_cccb->lock.
+ *
+ * Returns:
+ *  * Zero on success, or
+ *  * -ENOMEM if insufficient space is currently available in the CCCB, or
+ *  * -E2BIG if the command will never fit in the CCCB.
+ */
+static int pvr_cccb_check_command_space_locked(struct pvr_cccb *pvr_cccb, size_t size)
+{
+	struct rogue_fwif_cccb_ctl *ctrl = pvr_cccb->ctrl;
+	u32 read_offset = READ_ONCE(ctrl->read_offset);
+	u32 remaining = pvr_cccb->size - pvr_cccb->write_offset;
+	u32 required_size = size;
+
+	lockdep_assert_held(&pvr_cccb->lock);
+
+	/*
+	 * Always ensure we have enough room for a padding command at the end of
+	 * the CCCB.
+	 */
+	required_size += PADDING_COMMAND_SIZE;
+
+	if (required_size > pvr_cccb->size)
+		return -E2BIG;
+
+	if (remaining < required_size) {
+		/*
+		 * Command would need to wrap, so we need to pad the remainder
+		 * of the CCCB.
+		 */
+		required_size += remaining;
+	}
+
+	if (get_ccb_space(pvr_cccb->write_offset, read_offset, pvr_cccb->size) < required_size)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * pvr_cccb_check_command_space_locked() - Check if a command sequence fits in the CCCB
+ * @pvr_cccb: Target Client CCB.
+ * @size: Size of the command sequence.
+ *
+ * Takes the @pvr_cccb->lock and call pvr_cccb_check_command_space_locked().
+ *
+ * Returns:
+ *  * Zero on success, or
+ *  * -ENOMEM if insufficient space is currently available in the CCCB.
+ */
+int pvr_cccb_check_command_space(struct pvr_cccb *pvr_cccb, size_t size)
+{
+	int ret;
+
+	mutex_lock(&pvr_cccb->lock);
+	ret = pvr_cccb_check_command_space_locked(pvr_cccb, size);
+	mutex_unlock(&pvr_cccb->lock);
+
+	return ret;
+}
+
+/**
+ * pvr_cccb_acquire_command_space_locked() - Acquire space in a Client CCB
+ * @pvr_cccb: Target Client CCB.
+ * @size: Size of allocation, in bytes.
+ * @out_ptr: Pointer to location to store CPU pointer to acquired space.
+ * @new_write_offset: Pointer to location to store new CCB write offset.
+ *
+ * Caller must hold @pvr_cccb->lock, and if this function succeeds then it must
+ * be held until after pvr_release_cccb_space() is called.
+ *
+ * Returns:
+ *  * Zero on success, or
+ *  * -EAGAIN if insufficient space is currently available in the CCCB.
+ */
+static int
+pvr_cccb_acquire_command_space_locked(struct pvr_cccb *pvr_cccb, size_t size,
+				      void **out_ptr, u32 *new_write_offset)
+{
+	struct rogue_fwif_cccb_ctl *ctrl = pvr_cccb->ctrl;
+	u32 read_offset = READ_ONCE(ctrl->read_offset);
+	u32 remaining = pvr_cccb->size - pvr_cccb->write_offset;
+	u32 required_size = size;
+	bool padding_required = false;
+
+	lockdep_assert_held(&pvr_cccb->lock);
+
+	/*
+	 * Always ensure we have enough room for a padding command at the end of
+	 * the CCCB.
+	 */
+	required_size += PADDING_COMMAND_SIZE;
+
+	if (remaining < required_size) {
+		/*
+		 * Command would need to wrap, so we need to pad the remainder
+		 * of the CCCB.
+		 */
+		required_size += remaining;
+		padding_required = true;
+	}
+
+	if (get_ccb_space(pvr_cccb->write_offset, read_offset, pvr_cccb->size) <
+	    required_size)
+		return -EAGAIN;
+
+	if (padding_required) {
+		/* Add padding command */
+		build_padding_command(&pvr_cccb->cccb[pvr_cccb->write_offset], remaining);
+		pvr_cccb->write_offset = 0;
+	}
+
+	*out_ptr = &pvr_cccb->cccb[pvr_cccb->write_offset];
+	*new_write_offset = pvr_cccb->write_offset + size;
+
+	return 0;
+}
+
+/**
+ * pvr_cccb_write_command() - Write a command to a Client CCB
+ * @pvr_cccb: Target Client CCB.
+ * @cmd_data: Pointer to command to write.
+ * @size: Size of command in bytes.
+ *
+ * Caller must have locked the Client CCB with pvr_cccb_lock().
+ *
+ * Returns:
+ *  * Zero on success, or
+ *  * -EAGAIN if insufficient space is currently available in the CCCB.
+ */
+int
+pvr_cccb_write_command(struct pvr_cccb *pvr_cccb, void *cmd_data, size_t size)
+{
+	void *cccb_ptr;
+	u32 new_write_offset;
+	int err;
+
+	lockdep_assert_held(&pvr_cccb->lock);
+
+	err = pvr_cccb_acquire_command_space_locked(pvr_cccb, size, &cccb_ptr,
+						    &new_write_offset);
+	if (err)
+		return err;
+
+	memcpy(cccb_ptr, cmd_data, size);
+	pvr_cccb->write_offset = new_write_offset;
+
+	return 0;
+}
+
+/**
+ * pvr_cccb_write_command_with_header() - Write a command + command header to a
+ *                                        Client CCB
+ * @pvr_cccb: Target Client CCB.
+ * @cmd_type: Client CCB command type. Must be one of %ROGUE_FWIF_CCB_CMD_TYPE_*.
+ * @cmd_size: Size of command in bytes.
+ * @cmd_data: Pointer to command to write.
+ * @ext_job_ref: External job reference.
+ * @int_job_ref: Internal job reference.
+ *
+ * Caller must have locked the Client CCB with pvr_cccb_lock().
+ *
+ * Returns:
+ *  * Zero on success, or
+ *  * -EAGAIN if insufficient space is currently available in the CCCB.
+ */
+int
+pvr_cccb_write_command_with_header(struct pvr_cccb *pvr_cccb, u32 cmd_type, u32 cmd_size,
+				   void *cmd_data, u32 ext_job_ref, u32 int_job_ref)
+{
+	struct rogue_fwif_ccb_cmd_header cmd_header;
+	u8 *cccb_ptr;
+	u32 new_write_offset;
+	const size_t size = sizeof(cmd_header) + cmd_size;
+	int err;
+
+	lockdep_assert_held(&pvr_cccb->lock);
+
+	cmd_header.cmd_type = cmd_type;
+	cmd_header.cmd_size = cmd_size;
+	cmd_header.ext_job_ref = ext_job_ref;
+	cmd_header.int_job_ref = int_job_ref;
+
+	err = pvr_cccb_acquire_command_space_locked(pvr_cccb, size, (void **)&cccb_ptr,
+						    &new_write_offset);
+	if (err)
+		return err;
+
+	memcpy(cccb_ptr, &cmd_header, sizeof(cmd_header));
+	memcpy(cccb_ptr + sizeof(cmd_header), cmd_data, cmd_size);
+	pvr_cccb->write_offset = new_write_offset;
+
+	return 0;
+}
+
+/**
+ * pvr_cccb_wait_for_idle: Wait for Client CCB to go idle
+ * @pvr_cccb: Client CCB to wait on.
+ * @timeout: Timeout length (in jiffies).
+ *
+ * Returns:
+ *  * Zero on success, or
+ *  * -EBUSY on timeout.
+ */
+int
+pvr_cccb_wait_for_idle(struct pvr_cccb *pvr_cccb, u32 timeout)
+{
+	struct rogue_fwif_cccb_ctl *ctrl = pvr_cccb->ctrl;
+	unsigned long end_jiffies = jiffies + timeout;
+
+	while (!time_after(jiffies, end_jiffies)) {
+		if (READ_ONCE(ctrl->read_offset) == READ_ONCE(ctrl->write_offset))
+			return 0;
+		usleep_range(100, 1000);
+	}
+
+	return -EBUSY;
+}
+
+/**
+ * pvr_cccb_unlock_send_kccb_kick: Unlock Client CCB and send KCCB kick to
+ *                                 trigger command processing
+ * @pvr_dev: Device pointer.
+ * @pvr_cccb: Pointer to CCCB to process.
+ * @cctx_fw_addr: FW virtual address for context owning this Client CCB.
+ * @hwrt: HWRT data set associated with this kick. May be %NULL.
+ *
+ * Caller must have locked the Client CCB with pvr_cccb_lock().
+ *
+ * If this function is successful, then the Client CCB will be unlocked. On
+ * error, the Client CCB will still be locked, and it is the caller's
+ * responsibility to unlock it with pvr_cccb_unlock_rollback().
+ *
+ * Return :
+ *  * Zero on success, or
+ *  * Any error returned by pvr_kccb_send_cmd().
+ */
+int
+pvr_cccb_unlock_send_kccb_kick(struct pvr_device *pvr_dev,
+			       struct pvr_cccb *pvr_cccb, u32 cctx_fw_addr,
+			       struct pvr_hwrt_data *hwrt)
+{
+	struct rogue_fwif_kccb_cmd cmd_kick;
+	struct rogue_fwif_kccb_cmd_kick_data *cmd_kick_data =
+		&cmd_kick.cmd_data.cmd_kick_data;
+	u32 *cleanup_ctl;
+	int err;
+
+	lockdep_assert_held(&pvr_cccb->lock);
+
+	cmd_kick.cmd_type = ROGUE_FWIF_KCCB_CMD_KICK;
+	cmd_kick_data->context_fw_addr = cctx_fw_addr;
+	cmd_kick_data->client_woff_update = pvr_cccb->write_offset;
+	cmd_kick_data->client_wrap_mask_update = pvr_cccb->wrap_mask;
+
+	cmd_kick_data->num_cleanup_ctl = 0;
+	cleanup_ctl = cmd_kick_data->cleanup_ctl_fw_addr;
+	if (hwrt) {
+		pvr_gem_get_fw_addr_offset(hwrt->fw_obj,
+					   offsetof(struct rogue_fwif_hwrtdata, cleanup_state),
+					   cleanup_ctl);
+		cmd_kick_data->num_cleanup_ctl++;
+		cleanup_ctl++;
+	}
+	cmd_kick_data->work_est_cmd_header_offset = 0;
+
+	err = pvr_kccb_send_cmd(pvr_dev, &cmd_kick, NULL);
+	if (err)
+		goto err_out;
+
+	mutex_unlock(&pvr_cccb->lock);
+
+	return 0;
+
+err_out:
+	return err;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_cccb.h b/drivers/gpu/drm/imagination/pvr_cccb.h
new file mode 100644
index 000000000000..bf27d8280fe1
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_cccb.h
@@ -0,0 +1,112 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_CCCB_H__
+#define __PVR_CCCB_H__
+
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_fwif_shared.h"
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+/* Forward declaration from pvr_hwrt.h. */
+struct pvr_hwrt_data;
+
+struct pvr_cccb {
+	/** @ctrl_obj: FW object representing CCCB control structure. */
+	struct pvr_fw_object *ctrl_obj;
+
+	/** @ccb_obj: FW object representing CCCB. */
+	struct pvr_fw_object *cccb_obj;
+
+	/** @lock: Mutex protecting @ctrl and @cccb. */
+	struct mutex lock;
+
+	/**
+	 * @ctrl: Kernel mapping of CCCB control structure. @lock must be held
+	 *        when accessing.
+	 */
+	struct rogue_fwif_cccb_ctl *ctrl;
+
+	/** @cccb: Kernel mapping of CCCB. @lock must be held when accessing.*/
+	u8 *cccb;
+
+	/** @ctrl_fw_addr: FW virtual address of CCCB control structure. */
+	u32 ctrl_fw_addr;
+	/** @ccb_fw_addr: FW virtual address of CCCB. */
+	u32 cccb_fw_addr;
+
+	/** @size: Size of CCCB in bytes. */
+	size_t size;
+
+	/** @write_offset: CCCB write offset. */
+	u32 write_offset;
+
+	/** @wrap_mask: CCCB wrap mask. */
+	u32 wrap_mask;
+
+	/** @old_write_offset: CCCB write offset, sampled at CCCB lock time. */
+	u32 old_write_offset;
+};
+
+int pvr_cccb_init(struct pvr_device *pvr_dev, struct pvr_cccb *cccb,
+		  u32 size_log2, const char *name);
+void pvr_cccb_fini(struct pvr_cccb *cccb);
+
+int pvr_cccb_write_command(struct pvr_cccb *pvr_cccb, void *cmd_data,
+			   size_t size);
+int
+pvr_cccb_write_command_with_header(struct pvr_cccb *pvr_cccb, u32 cmd_type, u32 cmd_size,
+				   void *cmd_data, u32 ext_job_ref, u32 int_job_ref);
+int pvr_cccb_wait_for_idle(struct pvr_cccb *pvr_cccb, u32 timeout);
+int pvr_cccb_unlock_send_kccb_kick(struct pvr_device *pvr_dev,
+				   struct pvr_cccb *pvr_cccb, u32 cctx_fw_addr,
+				   struct pvr_hwrt_data *hwrt);
+int pvr_cccb_check_command_space(struct pvr_cccb *pvr_cccb, size_t size);
+
+/**
+ * pvr_cccb_lock() - Lock a client CCB for writing
+ * @pvr_cccb: Target client CCB.
+ */
+static __always_inline void
+pvr_cccb_lock(struct pvr_cccb *pvr_cccb)
+{
+	mutex_lock(&pvr_cccb->lock);
+
+	pvr_cccb->old_write_offset = pvr_cccb->write_offset;
+}
+
+/**
+ * pvr_cccb_unlock_rollback() - Unlock a client CCB and rollback any written
+ *                              commands
+ * @pvr_cccb: Target client CCB.
+ */
+static __always_inline void
+pvr_cccb_unlock_rollback(struct pvr_cccb *pvr_cccb)
+{
+	lockdep_assert_held(&pvr_cccb->lock);
+
+	pvr_cccb->write_offset = pvr_cccb->old_write_offset;
+	mutex_unlock(&pvr_cccb->lock);
+}
+
+/**
+ * pvr_cccb_get_size_of_cmd_with_hdr() - Get the size of a command and its header.
+ * @cmd_size: Command size.
+ *
+ * Returns the size of the command and its header.
+ */
+static __always_inline u32
+pvr_cccb_get_size_of_cmd_with_hdr(u32 cmd_size)
+{
+	return sizeof(struct rogue_fwif_ccb_cmd_header) + cmd_size;
+}
+
+#endif /* __PVR_CCCB_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c
new file mode 100644
index 000000000000..cdbebbcd3e46
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_context.c
@@ -0,0 +1,1428 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_cccb.h"
+#include "pvr_context.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_gem.h"
+#include "pvr_job.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_fwif_common.h"
+#include "pvr_rogue_fwif_resetframework.h"
+
+#include <drm/drm_auth.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+/* TODO: placeholder */
+#define MAX_DEADLINE_MS 30000
+
+#define CLEANUP_SLEEP_TIME_MS 20
+
+#define CTX_COMPUTE_CCCB_SIZE_LOG2 15
+#define CTX_FRAG_CCCB_SIZE_LOG2 15
+#define CTX_GEOM_CCCB_SIZE_LOG2 15
+#define CTX_TRANSFER_CCCB_SIZE_LOG2 15
+
+static struct pvr_context *
+queue_to_ctx(struct pvr_context_queue *queue)
+{
+	switch (queue->fence_ctx->type) {
+	case PVR_CONTEXT_QUEUE_TYPE_GEOMETRY:
+		return &container_of(queue, struct pvr_context_render, ctx_geom.queue)->base;
+
+	case PVR_CONTEXT_QUEUE_TYPE_FRAGMENT:
+		return &container_of(queue, struct pvr_context_render, ctx_frag.queue)->base;
+
+	case PVR_CONTEXT_QUEUE_TYPE_COMPUTE:
+		return &container_of(queue, struct pvr_context_compute, queue)->base;
+
+	case PVR_CONTEXT_QUEUE_TYPE_TRANSFER:
+		return &container_of(queue, struct pvr_context_transfer, queue)->base;
+
+	default:
+		return NULL;
+	}
+}
+
+static void
+pvr_context_queue_cancel_pending_jobs(struct pvr_context_queue *queue)
+{
+	struct pvr_job *job, *tmp_job;
+	LIST_HEAD(cancel_jobs);
+
+	spin_lock(&queue->jobs.lock);
+	list_for_each_entry_safe(job, tmp_job, &queue->jobs.pending, node)
+		list_move_tail(&job->node, &cancel_jobs);
+	spin_unlock(&queue->jobs.lock);
+
+	list_for_each_entry_safe(job, tmp_job, &cancel_jobs, node) {
+		list_del(&job->node);
+		dma_fence_set_error(job->done_fence, -ECANCELED);
+		dma_fence_signal(job->done_fence);
+		pvr_job_put(job);
+	}
+}
+
+static void
+pvr_context_queue_cancel_inflight_jobs(struct pvr_context_queue *queue)
+{
+	/* Signal in_flight job fences. We keep the jobs around to retain the
+	 * context until the FW object backing this context is idle and ready
+	 * for cleanup.
+	 */
+	while (true) {
+		struct dma_fence *done_fence = NULL;
+		struct pvr_job *job;
+		unsigned long flags;
+
+		spin_lock(&queue->jobs.lock);
+		list_for_each_entry_reverse(job, &queue->jobs.in_flight, node) {
+			/* Grab the fence, so it doesn't disappear after we released the lock. */
+			if (!dma_fence_is_signaled(job->done_fence)) {
+				done_fence = dma_fence_get(job->done_fence);
+				break;
+			}
+		}
+		spin_unlock(&queue->jobs.lock);
+
+		if (!done_fence)
+			break;
+
+		spin_lock_irqsave(done_fence->lock, flags);
+
+		/* pvr_context_queue_collect_done_jobs() might have signaled the fence
+		 * when we get there, hence the is_signaled() check.:
+		 */
+		if (!dma_fence_is_signaled_locked(done_fence)) {
+			dma_fence_set_error(done_fence, -ECANCELED);
+			dma_fence_signal_locked(done_fence);
+		}
+		spin_unlock_irqrestore(done_fence->lock, flags);
+
+		dma_fence_put(done_fence);
+	}
+}
+
+static void
+pvr_context_cancel_inflight_jobs(struct pvr_context *ctx)
+{
+	switch (ctx->type) {
+	case DRM_PVR_CTX_TYPE_RENDER:
+		pvr_context_queue_cancel_inflight_jobs(&to_pvr_context_render(ctx)->ctx_geom.queue);
+		pvr_context_queue_cancel_inflight_jobs(&to_pvr_context_render(ctx)->ctx_frag.queue);
+		break;
+
+	case DRM_PVR_CTX_TYPE_COMPUTE:
+		pvr_context_queue_cancel_inflight_jobs(&to_pvr_context_compute(ctx)->queue);
+		break;
+
+	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+		pvr_context_queue_cancel_inflight_jobs(&to_pvr_context_transfer_frag(ctx)->queue);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static int
+pvr_context_queue_fw_cleanup(struct pvr_context_queue *queue)
+{
+	struct pvr_context *ctx = queue_to_ctx(queue);
+	struct pvr_fw_object *fw_obj;
+	u32 fw_obj_offset;
+
+	switch (queue->fence_ctx->type) {
+	case PVR_CONTEXT_QUEUE_TYPE_GEOMETRY:
+		fw_obj = to_pvr_context_render(ctx)->fw_obj;
+		fw_obj_offset = offsetof(struct rogue_fwif_fwrendercontext, geom_context);
+		break;
+
+	case PVR_CONTEXT_QUEUE_TYPE_FRAGMENT:
+		fw_obj = to_pvr_context_render(ctx)->fw_obj;
+		fw_obj_offset = offsetof(struct rogue_fwif_fwrendercontext, frag_context);
+		break;
+
+	case PVR_CONTEXT_QUEUE_TYPE_COMPUTE:
+		fw_obj = to_pvr_context_compute(ctx)->fw_obj;
+		fw_obj_offset = offsetof(struct rogue_fwif_fwcomputecontext, cdm_context);
+		break;
+
+	case PVR_CONTEXT_QUEUE_TYPE_TRANSFER:
+		fw_obj = to_pvr_context_transfer_frag(ctx)->fw_obj;
+		fw_obj_offset = offsetof(struct rogue_fwif_fwtransfercontext, tq_context);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return pvr_fw_structure_cleanup(ctx->pvr_dev,
+					ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT,
+					fw_obj, fw_obj_offset);
+}
+
+static int
+pvr_context_fw_cleanup(struct pvr_context *ctx)
+{
+	struct pvr_context_render *render_ctx;
+	int err;
+
+	switch (ctx->type) {
+	case DRM_PVR_CTX_TYPE_RENDER:
+		render_ctx = to_pvr_context_render(ctx);
+
+		err = pvr_context_queue_fw_cleanup(&render_ctx->ctx_geom.queue);
+		if (err)
+			pvr_context_queue_fw_cleanup(&render_ctx->ctx_frag.queue);
+		else
+			err = pvr_context_queue_fw_cleanup(&render_ctx->ctx_frag.queue);
+
+		return err;
+
+	case DRM_PVR_CTX_TYPE_COMPUTE:
+		return pvr_context_queue_fw_cleanup(&to_pvr_context_compute(ctx)->queue);
+
+	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+		return pvr_context_queue_fw_cleanup(&to_pvr_context_transfer_frag(ctx)->queue);
+
+	default:
+		return -EINVAL;
+	}
+}
+
+/**
+ * pvr_context_queue_check_pending_jobs() - Check if any pending job can be submitted
+ * @queue: Queue to check.
+ *
+ * Iterates over pending jobs, submitting ready jobs and stopping at the first non-ready one.
+ * A job is ready when:
+ * - all its dependencies are signaled
+ * - the job commands fit in the CCCB
+ */
+static void
+pvr_context_queue_check_pending_jobs(struct pvr_context_queue *queue)
+{
+	struct pvr_context *ctx = queue_to_ctx(queue);
+	LIST_HEAD(cancel_jobs);
+
+	while (!atomic_read(&ctx->destroyed)) {
+		struct pvr_job *job;
+
+		spin_lock(&queue->jobs.lock);
+		job = list_first_entry_or_null(&queue->jobs.pending,
+					       struct pvr_job, node);
+		pvr_job_get(job);
+		spin_unlock(&queue->jobs.lock);
+
+		if (!job || !pvr_job_non_native_deps_done(job)) {
+			pvr_job_put(job);
+			break;
+		}
+
+		pvr_job_evict_signaled_native_deps(job);
+
+		if (pvr_job_fits_in_cccb(job) &&
+		    !pvr_job_wait_first_non_signaled_native_dep(job)) {
+			pvr_job_put(job);
+			break;
+		}
+
+		WARN_ON(pvr_job_fits_in_cccb(job));
+		pvr_job_submit(job);
+		pvr_job_put(job);
+	}
+
+	if (atomic_read(&ctx->destroyed))
+		pvr_context_queue_cancel_pending_jobs(queue);
+}
+
+/**
+ * pvr_context_job_pending_worker() - Worker called when a pending_job event is received
+ * @work: Work struct.
+ *
+ * Check all queues bound to the context embedding the job_pending_work object, and
+ * submit ready jobs if any.
+ */
+static void
+pvr_context_job_pending_worker(struct work_struct *work)
+{
+	struct pvr_context *ctx = container_of(work, struct pvr_context, job_pending_work);
+
+	switch (ctx->type) {
+	case DRM_PVR_CTX_TYPE_RENDER:
+		pvr_context_queue_check_pending_jobs(&to_pvr_context_render(ctx)->ctx_geom.queue);
+		pvr_context_queue_check_pending_jobs(&to_pvr_context_render(ctx)->ctx_frag.queue);
+		break;
+
+	case DRM_PVR_CTX_TYPE_COMPUTE:
+		pvr_context_queue_check_pending_jobs(&to_pvr_context_compute(ctx)->queue);
+		break;
+
+	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+		pvr_context_queue_check_pending_jobs(&to_pvr_context_transfer_frag(ctx)->queue);
+		break;
+
+	default:
+		break;
+	}
+
+	/* Release the reference that was taking in pvr_context_pending_job_event(). */
+	pvr_context_put(ctx);
+}
+
+/**
+ * pvr_context_pending_job_event() - Wrapper to queue a job_pending event
+ * @ctx: Context to queue the event on.
+ *
+ * Queues the job_pending_work attached to the context.
+ */
+void pvr_context_pending_job_event(struct pvr_context *ctx)
+{
+	/* Grab a reference, and release it if the work was already queued. */
+	pvr_context_get(ctx);
+	if (!queue_work(ctx->pvr_dev->irq_wq, &ctx->job_pending_work))
+		pvr_context_put(ctx);
+}
+
+static int
+pvr_init_context_common(struct pvr_device *pvr_dev, struct pvr_file *pvr_file,
+			struct pvr_context *ctx, int type,
+			enum pvr_context_priority priority,
+			struct drm_pvr_ioctl_create_context_args *args,
+			u32 id)
+{
+	ctx->type = type;
+	ctx->pvr_dev = pvr_dev;
+	ctx->vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
+	if (!ctx->vm_ctx)
+		return -EINVAL;
+
+	ctx->flags = args->flags;
+	ctx->priority = priority;
+
+	ctx->ctx_id = id;
+
+	strscpy(ctx->process_name, current->comm, sizeof(ctx->process_name));
+
+	kref_init(&ctx->ref_count);
+	INIT_LIST_HEAD(&ctx->active_node);
+	INIT_WORK(&ctx->job_pending_work, pvr_context_job_pending_worker);
+
+	return 0;
+}
+
+static void
+pvr_fini_context_common(struct pvr_device *pvr_dev, struct pvr_context *ctx)
+{
+	pvr_vm_context_put(ctx->vm_ctx);
+}
+
+/**
+ * pvr_context_queue_fence_ctx_release() - Release callback for a queue fence context
+ * @kref: The kref object being released.
+ */
+static void
+pvr_context_queue_fence_ctx_release(struct kref *kref)
+{
+	struct pvr_context_queue_fence_ctx *ctx;
+
+	ctx =  container_of(kref, struct pvr_context_queue_fence_ctx, refcount);
+	pvr_fw_object_vunmap(ctx->timeline_ufo.fw_obj, false);
+	pvr_fw_object_release(ctx->timeline_ufo.fw_obj);
+	kfree(ctx);
+	module_put(THIS_MODULE);
+}
+
+/**
+ * pvr_context_queue_fence_ctx_create() - Create a queue fence context
+ * @pvr_dev: The PowerVR device used to create this queue fence context.
+ * @queue: The queue to create the fence context for.
+ * @type: The type of queue being created.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * -ENOMEM if we fail to allocate memory or fail to acquire a module ref.
+ */
+static int
+pvr_context_queue_fence_ctx_create(struct pvr_device *pvr_dev,
+				   struct pvr_context_queue *queue,
+				   enum pvr_context_queue_type type)
+{
+	struct pvr_context_queue_fence_ctx *ctx;
+	void *cpu_map;
+	int err;
+
+	if (WARN_ON(!try_module_get(THIS_MODULE)))
+		return -ENOENT;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		module_put(THIS_MODULE);
+		return -ENOMEM;
+	}
+
+	cpu_map = pvr_gem_create_and_map_fw_object(pvr_dev, sizeof(*ctx->timeline_ufo.value),
+						   PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+						   DRM_PVR_BO_CREATE_ZEROED,
+						   &ctx->timeline_ufo.fw_obj);
+	if (IS_ERR(cpu_map)) {
+		err = PTR_ERR(cpu_map);
+		goto err_free_ctx;
+	}
+
+	ctx->timeline_ufo.value = cpu_map;
+	ctx->type = type;
+	ctx->id = dma_fence_context_alloc(1);
+	kref_init(&ctx->refcount);
+	spin_lock_init(&ctx->lock);
+	queue->fence_ctx = ctx;
+	return 0;
+
+err_free_ctx:
+	kfree(ctx);
+	return err;
+}
+
+static const char *
+pvr_context_queue_fence_get_driver_name(struct dma_fence *f)
+{
+	return PVR_DRIVER_NAME;
+}
+
+static const char *
+pvr_context_queue_fence_get_timeline_name(struct dma_fence *f)
+{
+	struct pvr_context_queue_fence *fence = to_pvr_context_queue_fence(f);
+
+	switch (fence->ctx->type) {
+	case PVR_CONTEXT_QUEUE_TYPE_GEOMETRY:
+		return "geometry";
+
+	case PVR_CONTEXT_QUEUE_TYPE_FRAGMENT:
+		return "fragment";
+
+	case PVR_CONTEXT_QUEUE_TYPE_COMPUTE:
+		return "compute";
+
+	case PVR_CONTEXT_QUEUE_TYPE_TRANSFER:
+		return "transfer";
+
+	default:
+		return "invalid";
+	}
+}
+
+static void pvr_context_queue_fence_release(struct dma_fence *f)
+{
+	struct pvr_context_queue_fence *fence = to_pvr_context_queue_fence(f);
+
+	kref_put(&fence->ctx->refcount, pvr_context_queue_fence_ctx_release);
+	dma_fence_free(f);
+}
+
+const struct dma_fence_ops pvr_context_queue_fence_ops = {
+	.get_driver_name = pvr_context_queue_fence_get_driver_name,
+	.get_timeline_name = pvr_context_queue_fence_get_timeline_name,
+	.release = pvr_context_queue_fence_release,
+};
+
+struct pvr_context_queue_fence *
+to_pvr_context_queue_fence(struct dma_fence *f)
+{
+	if (f && f->ops == &pvr_context_queue_fence_ops)
+		return container_of(f, struct pvr_context_queue_fence, base);
+
+	return NULL;
+}
+
+struct pvr_context_queue_fence_ctx *
+pvr_context_queue_fence_ctx_from_fence(struct dma_fence *f)
+{
+	static struct pvr_context_queue_fence *fence;
+
+	if (f->ops != &pvr_context_queue_fence_ops)
+		return NULL;
+
+	fence = to_pvr_context_queue_fence(f);
+	return fence->ctx;
+}
+
+/**
+ * pvr_context_queue_fence_create() - Create a queue fence object
+ * @queue: The queue to create the fence on.
+ *
+ * Any job object should have a fence created with
+ * pvr_context_queue_fence_create() attached to it. This fence will be signaled
+ * when the job is done, or when something failed.
+ *
+ * Return:
+ *  * A valid dma_fence pointer on success, or
+ *  * ERR_PTR(-ENOMEM) if we fail to allocate memory.
+ */
+struct dma_fence *
+pvr_context_queue_fence_create(struct pvr_context_queue *queue)
+{
+	struct pvr_context_queue_fence *fence;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return ERR_PTR(-ENOMEM);
+
+	kref_get(&queue->fence_ctx->refcount);
+	fence->ctx = queue->fence_ctx;
+	dma_fence_init(&fence->base, &pvr_context_queue_fence_ops, &fence->ctx->lock,
+		       fence->ctx->id, atomic_inc_return(&fence->ctx->seqno));
+
+	return &fence->base;
+}
+
+/**
+ * pvr_context_queue_init() - Initialize a context queue
+ * @pvr_dev: A PowerVR device.
+ * @queue: The queue object to initialize.
+ * @type: The type of jobs taken by this queue.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * a negative error code when something failed.
+ */
+static int
+pvr_context_queue_init(struct pvr_device *pvr_dev,
+		       struct pvr_context_queue *queue,
+		       enum pvr_context_queue_type type)
+{
+	int err;
+
+	INIT_LIST_HEAD(&queue->jobs.pending);
+	INIT_LIST_HEAD(&queue->jobs.in_flight);
+	spin_lock_init(&queue->jobs.lock);
+
+	err = pvr_context_queue_fence_ctx_create(pvr_dev, queue, type);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+/**
+ * pvr_context_queue_fini() - Cleanup a context queue
+ * @queue: The queue object to cleanup.
+ */
+static void
+pvr_context_queue_fini(struct pvr_context_queue *queue)
+{
+	WARN_ON(!list_empty(&queue->jobs.pending));
+	WARN_ON(!list_empty(&queue->jobs.in_flight));
+	kref_put(&queue->fence_ctx->refcount, pvr_context_queue_fence_ctx_release);
+}
+
+/**
+ * pvr_init_geom_context() - Initialise a geometry context
+ * @pvr_file: Pointer to pvr_file structure.
+ * @ctx_render: Pointer to parent render context.
+ * @args: Arguments from userspace.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_gem_create_and_map_fw_object().
+ */
+static int
+pvr_init_geom_context(struct pvr_file *pvr_file,
+		      struct pvr_context_render *ctx_render,
+		      struct drm_pvr_ioctl_create_context_args *args)
+{
+	struct pvr_device *pvr_dev = ctx_render->base.pvr_dev;
+	struct pvr_context_geom *ctx_geom = &ctx_render->ctx_geom;
+	struct rogue_fwif_geom_ctx_state *geom_ctx_state_fw;
+	int err;
+
+	err = pvr_cccb_init(pvr_dev, &ctx_geom->cccb, CTX_GEOM_CCCB_SIZE_LOG2, "geometry");
+	if (err)
+		goto err_out;
+
+	geom_ctx_state_fw = pvr_gem_create_and_map_fw_object(pvr_dev, sizeof(*geom_ctx_state_fw),
+							     PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							     DRM_PVR_BO_CREATE_ZEROED,
+							     &ctx_geom->ctx_state_obj);
+	if (IS_ERR(geom_ctx_state_fw)) {
+		err = PTR_ERR(geom_ctx_state_fw);
+		goto err_cccb_fini;
+	}
+
+	err = pvr_context_queue_init(pvr_dev, &ctx_geom->queue, PVR_CONTEXT_QUEUE_TYPE_GEOMETRY);
+	if (err)
+		goto err_release_ctx_state;
+
+	geom_ctx_state_fw->geom_core[0].geom_reg_vdm_call_stack_pointer = args->callstack_addr;
+
+	pvr_fw_object_vunmap(ctx_geom->ctx_state_obj, true);
+
+	return 0;
+
+err_release_ctx_state:
+	pvr_fw_object_vunmap(ctx_geom->ctx_state_obj, false);
+	pvr_fw_object_release(ctx_geom->ctx_state_obj);
+
+err_cccb_fini:
+	pvr_cccb_fini(&ctx_geom->cccb);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_fini_geom_context() - Clean up a geometry context
+ * @ctx_render: Pointer to parent render context.
+ */
+static void
+pvr_fini_geom_context(struct pvr_context_render *ctx_render)
+{
+	struct pvr_context_geom *ctx_geom = &ctx_render->ctx_geom;
+
+	pvr_context_queue_fini(&ctx_geom->queue);
+	pvr_fw_object_release(ctx_geom->ctx_state_obj);
+
+	pvr_cccb_fini(&ctx_geom->cccb);
+}
+
+/**
+ * pvr_init_frag_context() - Initialise a fragment context
+ * @pvr_file: Pointer to pvr_file structure.
+ * @ctx_render: Pointer to parent render context.
+ * @args: Arguments from userspace.
+ *
+ * Return:
+ *  * 0 on success.
+ */
+static int
+pvr_init_frag_context(struct pvr_file *pvr_file,
+		      struct pvr_context_render *ctx_render,
+		      struct drm_pvr_ioctl_create_context_args *args)
+{
+	struct pvr_device *pvr_dev = ctx_render->base.pvr_dev;
+	struct pvr_context_frag *ctx_frag = &ctx_render->ctx_frag;
+	u32 num_isp_store_registers;
+	size_t frag_ctx_state_size;
+	int err;
+
+	err = pvr_cccb_init(pvr_dev, &ctx_frag->cccb, CTX_FRAG_CCCB_SIZE_LOG2, "fragment");
+	if (err)
+		goto err_out;
+
+	if (PVR_HAS_FEATURE(pvr_dev, xe_memory_hierarchy)) {
+		WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_raster_pipes, &num_isp_store_registers));
+
+		if (PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support)) {
+			u32 xpu_max_slaves;
+
+			WARN_ON(PVR_FEATURE_VALUE(pvr_dev, xpu_max_slaves, &xpu_max_slaves));
+
+			num_isp_store_registers *= (1 + xpu_max_slaves);
+		}
+	} else {
+		WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_isp_ipp_pipes, &num_isp_store_registers));
+	}
+
+	frag_ctx_state_size = sizeof(struct rogue_fwif_frag_ctx_state) + num_isp_store_registers *
+			      sizeof(((struct rogue_fwif_frag_ctx_state *)0)->frag_reg_isp_store[0]);
+
+	err = pvr_gem_create_fw_object(pvr_dev, frag_ctx_state_size,
+				       PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+				       DRM_PVR_BO_CREATE_ZEROED, &ctx_frag->ctx_state_obj);
+	if (err)
+		goto err_cccb_fini;
+
+	err = pvr_context_queue_init(pvr_dev, &ctx_frag->queue, PVR_CONTEXT_QUEUE_TYPE_FRAGMENT);
+	if (err)
+		goto err_release_ctx_state;
+
+	return 0;
+
+err_release_ctx_state:
+	pvr_fw_object_release(ctx_frag->ctx_state_obj);
+
+err_cccb_fini:
+	pvr_cccb_fini(&ctx_frag->cccb);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_fini_frag_context() - Clean up a fragment context
+ * @ctx_render: Pointer to parent render context.
+ */
+static void
+pvr_fini_frag_context(struct pvr_context_render *ctx_render)
+{
+	struct pvr_context_frag *ctx_frag = &ctx_render->ctx_frag;
+
+	pvr_context_queue_fini(&ctx_frag->queue);
+	pvr_fw_object_release(ctx_frag->ctx_state_obj);
+
+	pvr_cccb_fini(&ctx_frag->cccb);
+}
+
+static int
+remap_priority(struct pvr_file *pvr_file, s32 uapi_priority,
+	       enum pvr_context_priority *priority_out)
+{
+	switch (uapi_priority) {
+	case DRM_PVR_CTX_PRIORITY_LOW:
+		*priority_out = PVR_CTX_PRIORITY_LOW;
+		break;
+	case DRM_PVR_CTX_PRIORITY_NORMAL:
+		*priority_out = PVR_CTX_PRIORITY_MEDIUM;
+		break;
+	case DRM_PVR_CTX_PRIORITY_HIGH:
+		if (!capable(CAP_SYS_NICE) && !drm_is_current_master(from_pvr_file(pvr_file)))
+			return -EACCES;
+		*priority_out = PVR_CTX_PRIORITY_HIGH;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * pvr_init_fw_common_context() - Initialise an FW-side common context structure
+ * @ctx: Pointer to context.
+ * @cctx_fw: Pointer to FW common context structure.
+ * @dm_type: Data master type.
+ * @priority: Context priority.
+ * @max_deadline_ms: Maximum deadline for work on this context.
+ * @cctx_id: Common context ID.
+ * @ctx_state_obj: FW object representing context state.
+ * @cccb: Client CCB for this context.
+ */
+static void
+pvr_init_fw_common_context(struct pvr_context *ctx,
+			   struct rogue_fwif_fwcommoncontext *cctx_fw,
+			   u32 dm_type, u32 priority, u32 max_deadline_ms,
+			   u32 cctx_id, struct pvr_fw_object *ctx_state_obj,
+			   struct pvr_cccb *cccb)
+{
+	struct pvr_fw_object *fw_mem_ctx_obj = pvr_vm_get_fw_mem_context(ctx->vm_ctx);
+
+	cctx_fw->ccbctl_fw_addr = cccb->ctrl_fw_addr;
+	cctx_fw->ccb_fw_addr = cccb->cccb_fw_addr;
+
+	cctx_fw->dm = dm_type;
+	cctx_fw->priority = ctx->priority;
+	cctx_fw->priority_seq_num = 0;
+	cctx_fw->max_deadline_ms = max_deadline_ms;
+	cctx_fw->pid = task_tgid_nr(current);
+	cctx_fw->server_common_context_id = cctx_id;
+
+	pvr_gem_get_fw_addr(fw_mem_ctx_obj, &cctx_fw->fw_mem_context_fw_addr);
+
+	pvr_gem_get_fw_addr(ctx_state_obj, &cctx_fw->context_state_addr);
+}
+
+static void
+pvr_fini_fw_common_context(struct pvr_context *ctx)
+{
+}
+
+/**
+ * pvr_init_fw_render_context() - Initialise an FW-side render context structure
+ * @ctx_render: Pointer to parent render context.
+ * @args: Context creation arguments from userspace.
+ *
+ * Return:
+ *  * 0 on success.
+ */
+static int
+pvr_init_fw_render_context(struct pvr_context_render *ctx_render,
+			   struct drm_pvr_ioctl_create_context_args *args)
+{
+	struct rogue_fwif_static_rendercontext_state *static_rendercontext_state;
+	struct rogue_fwif_fwrendercontext *fw_render_context;
+	int err;
+
+	if (args->static_context_state_len != sizeof(*static_rendercontext_state)) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	fw_render_context = pvr_gem_create_and_map_fw_object(ctx_render->base.pvr_dev,
+							     sizeof(*fw_render_context),
+							     PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							     DRM_PVR_BO_CREATE_ZEROED,
+							     &ctx_render->fw_obj);
+	if (IS_ERR(fw_render_context)) {
+		err = PTR_ERR(fw_render_context);
+		goto err_out;
+	}
+
+	static_rendercontext_state = &fw_render_context->static_render_context_state;
+
+	/* Copy static render context state from userspace. */
+	if (copy_from_user(static_rendercontext_state, u64_to_user_ptr(args->static_context_state),
+			   sizeof(*static_rendercontext_state))) {
+		err = -EFAULT;
+		goto err_destroy_gem_object;
+	}
+
+	pvr_init_fw_common_context(&ctx_render->base, &fw_render_context->geom_context,
+				   PVR_FWIF_DM_GEOM, args->priority, MAX_DEADLINE_MS,
+				   ctx_render->base.ctx_id, ctx_render->ctx_geom.ctx_state_obj,
+				   &ctx_render->ctx_geom.cccb);
+
+	pvr_init_fw_common_context(&ctx_render->base, &fw_render_context->frag_context,
+				   PVR_FWIF_DM_FRAG, args->priority, MAX_DEADLINE_MS,
+				   ctx_render->base.ctx_id, ctx_render->ctx_frag.ctx_state_obj,
+				   &ctx_render->ctx_frag.cccb);
+
+	pvr_fw_object_vunmap(ctx_render->fw_obj, true);
+	return 0;
+
+err_destroy_gem_object:
+	pvr_fw_object_vunmap(ctx_render->fw_obj, true);
+	pvr_fw_object_release(ctx_render->fw_obj);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_fini_fw_render_context() - Clean up an FW-side render context structure
+ * @ctx_render: Pointer to parent render context.
+ */
+static void
+pvr_fini_fw_render_context(struct pvr_context_render *ctx_render)
+{
+	struct pvr_context *ctx = from_pvr_context_render(ctx_render);
+
+	pvr_fini_fw_common_context(ctx);
+	pvr_fini_fw_common_context(ctx);
+
+	pvr_fw_object_release(ctx_render->fw_obj);
+}
+
+/**
+ * pvr_init_compute_context() - Initialise a compute context structure
+ * @pvr_file: Pointer to pvr_file structure.
+ * @ctx_compute: Pointer to parent compute context.
+ * @args: Context creation arguments from userspace.
+ *
+ * Return:
+ *  * 0 on success.
+ */
+static int
+pvr_init_compute_context(struct pvr_file *pvr_file, struct pvr_context_compute *ctx_compute,
+			 struct drm_pvr_ioctl_create_context_args *args)
+{
+	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+	struct rogue_fwif_cdm_registers_cswitch *ctxswitch_regs;
+	struct rogue_fwif_fwcomputecontext *fw_compute_context;
+	int err;
+
+	if (args->static_context_state_len != sizeof(*ctxswitch_regs)) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	err = pvr_cccb_init(pvr_dev, &ctx_compute->cccb, CTX_COMPUTE_CCCB_SIZE_LOG2, "compute");
+	if (err)
+		goto err_out;
+
+	err = pvr_gem_create_fw_object(pvr_dev, sizeof(struct rogue_fwif_compute_ctx_state),
+				       PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+				       DRM_PVR_BO_CREATE_ZEROED, &ctx_compute->ctx_state_obj);
+	if (err)
+		goto err_cccb_fini;
+
+	fw_compute_context = pvr_gem_create_and_map_fw_object(ctx_compute->base.pvr_dev,
+							      sizeof(*fw_compute_context),
+							      PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							      DRM_PVR_BO_CREATE_ZEROED,
+							      &ctx_compute->fw_obj);
+	if (IS_ERR(fw_compute_context)) {
+		err = PTR_ERR(fw_compute_context);
+		goto err_destroy_ctx_state_obj;
+	}
+
+	ctxswitch_regs =
+		&fw_compute_context->static_compute_context_state.ctxswitch_regs;
+
+	/* Copy static compute context state from userspace. */
+	if (copy_from_user(ctxswitch_regs,
+			   u64_to_user_ptr(args->static_context_state),
+			   sizeof(*ctxswitch_regs))) {
+		err = -EFAULT;
+		goto err_destroy_gem_object;
+	}
+
+	err = pvr_context_queue_init(pvr_dev, &ctx_compute->queue, PVR_CONTEXT_QUEUE_TYPE_COMPUTE);
+	if (err)
+		goto err_destroy_gem_object;
+
+	pvr_init_fw_common_context(&ctx_compute->base, &fw_compute_context->cdm_context,
+				   PVR_FWIF_DM_CDM, args->priority, MAX_DEADLINE_MS,
+				   ctx_compute->base.ctx_id, ctx_compute->ctx_state_obj,
+				   &ctx_compute->cccb);
+
+	pvr_fw_object_vunmap(ctx_compute->fw_obj, true);
+	return 0;
+
+err_destroy_gem_object:
+	pvr_fw_object_vunmap(ctx_compute->fw_obj, true);
+	pvr_fw_object_release(ctx_compute->fw_obj);
+
+err_destroy_ctx_state_obj:
+	pvr_fw_object_release(ctx_compute->ctx_state_obj);
+
+err_cccb_fini:
+	pvr_cccb_fini(&ctx_compute->cccb);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_fini_compute_context() - Clean up a compute context structure
+ * @ctx_compute: Pointer to compute context.
+ */
+static void
+pvr_fini_compute_context(struct pvr_context_compute *ctx_compute)
+{
+	struct pvr_context *ctx = from_pvr_context_compute(ctx_compute);
+
+	pvr_fini_fw_common_context(ctx);
+	pvr_context_queue_fini(&ctx_compute->queue);
+	pvr_fw_object_release(ctx_compute->fw_obj);
+	pvr_fw_object_release(ctx_compute->ctx_state_obj);
+	pvr_cccb_fini(&ctx_compute->cccb);
+}
+
+/**
+ * pvr_init_transfer_context() - Initialise a transfer context structure
+ * @pvr_file: Pointer to pvr_file structure.
+ * @ctx_transfer: Pointer to parent transfer context.
+ * @args: Context creation arguments from userspace.
+ *
+ * Return:
+ *  * 0 on success.
+ */
+static int
+pvr_init_transfer_context(struct pvr_file *pvr_file, struct pvr_context_transfer *ctx_transfer,
+			  struct drm_pvr_ioctl_create_context_args *args)
+{
+	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+	struct rogue_fwif_fwtransfercontext *fw_transfer_context;
+	size_t transfer_ctx_state_size;
+	u32 num_isp_store_registers;
+	int err;
+
+	err = pvr_cccb_init(pvr_dev, &ctx_transfer->cccb, CTX_TRANSFER_CCCB_SIZE_LOG2,
+			    "transfer_frag");
+	if (err)
+		goto err_out;
+
+	if (PVR_HAS_FEATURE(pvr_dev, xe_memory_hierarchy))
+		num_isp_store_registers = 1;
+	else
+		WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_isp_ipp_pipes, &num_isp_store_registers));
+
+	transfer_ctx_state_size = sizeof(struct rogue_fwif_frag_ctx_state) +
+				  num_isp_store_registers *
+				  sizeof(((struct rogue_fwif_frag_ctx_state *)0)->frag_reg_isp_store[0]);
+
+	err = pvr_gem_create_fw_object(pvr_dev, transfer_ctx_state_size,
+				       PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+				       DRM_PVR_BO_CREATE_ZEROED, &ctx_transfer->ctx_state_obj);
+	if (err)
+		goto err_cccb_fini;
+
+	fw_transfer_context = pvr_gem_create_and_map_fw_object(ctx_transfer->base.pvr_dev,
+							       sizeof(*fw_transfer_context),
+							       PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							       DRM_PVR_BO_CREATE_ZEROED,
+							       &ctx_transfer->fw_obj);
+	if (IS_ERR(fw_transfer_context)) {
+		err = PTR_ERR(fw_transfer_context);
+		goto err_destroy_ctx_state_obj;
+	}
+
+	err = pvr_context_queue_init(pvr_dev, &ctx_transfer->queue,
+				     PVR_CONTEXT_QUEUE_TYPE_TRANSFER);
+	if (err)
+		goto err_destroy_ctx_state_obj;
+
+	pvr_init_fw_common_context(&ctx_transfer->base, &fw_transfer_context->tq_context,
+				   PVR_FWIF_DM_FRAG, args->priority, MAX_DEADLINE_MS,
+				   ctx_transfer->base.ctx_id, ctx_transfer->ctx_state_obj,
+				   &ctx_transfer->cccb);
+
+	pvr_fw_object_vunmap(ctx_transfer->fw_obj, true);
+	return 0;
+
+err_destroy_ctx_state_obj:
+	pvr_fw_object_release(ctx_transfer->ctx_state_obj);
+
+err_cccb_fini:
+	pvr_cccb_fini(&ctx_transfer->cccb);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_fini_transfer_context() - Clean up a transfer context structure
+ * @ctx_transfer: Pointer to transfer context.
+ */
+static void
+pvr_fini_transfer_context(struct pvr_context_transfer *ctx_transfer)
+{
+	struct pvr_context *ctx = from_pvr_context_transfer(ctx_transfer);
+
+	pvr_fini_fw_common_context(ctx);
+	pvr_context_queue_fini(&ctx_transfer->queue);
+	pvr_fw_object_release(ctx_transfer->fw_obj);
+	pvr_fw_object_release(ctx_transfer->ctx_state_obj);
+	pvr_cccb_fini(&ctx_transfer->cccb);
+}
+
+/**
+ * pvr_create_render_context() - Create a combination geometry/fragment render
+ *                               context and return a handle
+ * @pvr_file: Pointer to pvr_file structure.
+ * @args: Creation arguments from userspace.
+ * @id: FW context ID.
+ *
+ * The context is initialised with refcount of 1.
+ *
+ * Return:
+ *  * Context pointer on success, or
+ *  * -%ENOMEM on out-of-memory, or
+ *  * Any error returned by xa_alloc().
+ */
+struct pvr_context *
+pvr_create_render_context(struct pvr_file *pvr_file,
+			  struct drm_pvr_ioctl_create_context_args *args,
+			  u32 id)
+{
+	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+	struct pvr_context_render *ctx_render;
+	enum pvr_context_priority priority;
+	int err;
+
+	if (!args->static_context_state) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	err = remap_priority(pvr_file, args->priority, &priority);
+	if (err)
+		goto err_out;
+
+	ctx_render = kzalloc(sizeof(*ctx_render), GFP_KERNEL);
+	if (!ctx_render) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	err = pvr_init_context_common(pvr_dev, pvr_file,
+				      from_pvr_context_render(ctx_render),
+				      DRM_PVR_CTX_TYPE_RENDER, priority, args, id);
+	if (err < 0)
+		goto err_free;
+
+	err = pvr_init_geom_context(pvr_file, ctx_render, args);
+	if (err < 0)
+		goto err_destroy_common_context;
+
+	err = pvr_init_frag_context(pvr_file, ctx_render, args);
+	if (err < 0)
+		goto err_destroy_geom_context;
+
+	err = pvr_init_fw_render_context(ctx_render, args);
+	if (err < 0)
+		goto err_destroy_frag_context;
+
+	return from_pvr_context_render(ctx_render);
+
+err_destroy_frag_context:
+	pvr_fini_frag_context(ctx_render);
+
+err_destroy_geom_context:
+	pvr_fini_geom_context(ctx_render);
+
+err_destroy_common_context:
+	pvr_fini_context_common(pvr_dev, from_pvr_context_render(ctx_render));
+
+err_free:
+	kfree(ctx_render);
+
+err_out:
+	return ERR_PTR(err);
+}
+
+/**
+ * pvr_create_compute_context() - Create a compute context and return a handle
+ * @pvr_file: Pointer to pvr_file structure.
+ * @args: Creation arguments from userspace.
+ * @id: FW context ID.
+ *
+ * The context is initialised with refcount of 1.
+ *
+ * Return:
+ *  * Context pointer on success, or
+ *  * -%ENOMEM on out-of-memory, or
+ *  * Any error returned by xa_alloc().
+ */
+struct pvr_context *
+pvr_create_compute_context(struct pvr_file *pvr_file,
+			   struct drm_pvr_ioctl_create_context_args *args,
+			   u32 id)
+{
+	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+	struct pvr_context_compute *ctx_compute;
+	enum pvr_context_priority priority;
+	int err;
+
+	if (!args->static_context_state || args->callstack_addr) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	err = remap_priority(pvr_file, args->priority, &priority);
+	if (err)
+		goto err_out;
+
+	ctx_compute = kzalloc(sizeof(*ctx_compute), GFP_KERNEL);
+	if (!ctx_compute) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	err = pvr_init_context_common(pvr_dev, pvr_file,
+				      from_pvr_context_compute(ctx_compute),
+				      DRM_PVR_CTX_TYPE_COMPUTE, priority, args, id);
+	if (err < 0)
+		goto err_free;
+
+	err = pvr_init_compute_context(pvr_file, ctx_compute, args);
+	if (err < 0)
+		goto err_destroy_common_context;
+
+	return from_pvr_context_compute(ctx_compute);
+
+err_destroy_common_context:
+	pvr_fini_context_common(pvr_dev, from_pvr_context_compute(ctx_compute));
+
+err_free:
+	kfree(ctx_compute);
+
+err_out:
+	return ERR_PTR(err);
+}
+
+/**
+ * pvr_create_transfer_context() - Create a transfer context and return a handle
+ * @pvr_file: Pointer to pvr_file structure.
+ * @args: Creation arguments from userspace.
+ * @id: FW context ID.
+ *
+ * The context is initialised with refcount of 1.
+ *
+ * Return:
+ *  * Context pointer on success, or
+ *  * -%ENOMEM on out-of-memory, or
+ *  * Any error returned by xa_alloc().
+ */
+struct pvr_context *
+pvr_create_transfer_context(struct pvr_file *pvr_file,
+			    struct drm_pvr_ioctl_create_context_args *args,
+			    u32 id)
+{
+	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+	struct pvr_context_transfer *ctx_transfer;
+	enum pvr_context_priority priority;
+	int err;
+
+	if (args->callstack_addr || args->static_context_state) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	err = remap_priority(pvr_file, args->priority, &priority);
+	if (err)
+		goto err_out;
+
+	ctx_transfer = kzalloc(sizeof(*ctx_transfer), GFP_KERNEL);
+	if (!ctx_transfer) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	err = pvr_init_context_common(pvr_dev, pvr_file,
+				      from_pvr_context_transfer(ctx_transfer),
+				      args->type, priority, args, id);
+	if (err < 0)
+		goto err_free;
+
+	err = pvr_init_transfer_context(pvr_file, ctx_transfer, args);
+	if (err < 0)
+		goto err_destroy_common_context;
+
+	return from_pvr_context_transfer(ctx_transfer);
+
+err_destroy_common_context:
+	pvr_fini_context_common(pvr_dev, from_pvr_context_transfer(ctx_transfer));
+
+err_free:
+	kfree(ctx_transfer);
+
+err_out:
+	return ERR_PTR(err);
+}
+
+static void
+pvr_release_context(struct kref *ref_count)
+{
+	struct pvr_context *ctx =
+		container_of(ref_count, struct pvr_context, ref_count);
+	struct pvr_device *pvr_dev = ctx->pvr_dev;
+
+	WARN_ON(pvr_context_fw_cleanup(ctx));
+
+	if (WARN_ON(!list_empty(&ctx->active_node))) {
+		spin_lock(&pvr_dev->active_contexts.lock);
+		list_del_init(&ctx->active_node);
+		spin_unlock(&pvr_dev->active_contexts.lock);
+	}
+
+	xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id);
+
+	if (ctx->type == DRM_PVR_CTX_TYPE_RENDER) {
+		struct pvr_context_render *ctx_render = to_pvr_context_render(ctx);
+
+		pvr_fini_fw_render_context(ctx_render);
+
+		/* Destroy owned geometry & fragment contexts. */
+		pvr_fini_frag_context(ctx_render);
+		pvr_fini_geom_context(ctx_render);
+	} else if (ctx->type == DRM_PVR_CTX_TYPE_COMPUTE) {
+		struct pvr_context_compute *ctx_compute = to_pvr_context_compute(ctx);
+
+		pvr_fini_compute_context(ctx_compute);
+	} else if (ctx->type == DRM_PVR_CTX_TYPE_TRANSFER_FRAG) {
+		struct pvr_context_transfer *ctx_transfer = to_pvr_context_transfer_frag(ctx);
+
+		pvr_fini_transfer_context(ctx_transfer);
+	}
+
+	pvr_fini_context_common(ctx->pvr_dev, ctx);
+
+	kfree(ctx);
+}
+
+/**
+ * pvr_context_put() - Release reference on context
+ * @ctx: Target context.
+ */
+void
+pvr_context_put(struct pvr_context *ctx)
+{
+	if (ctx)
+		kref_put(&ctx->ref_count, pvr_release_context);
+}
+
+/**
+ * pvr_context_destroy() - Destroy context
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Userspace context handle.
+ *
+ * Removes context from context list and drops initial reference. Context will
+ * then be destroyed once all outstanding references are dropped.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * -%EINVAL if context not in context list.
+ */
+int
+pvr_context_destroy(struct pvr_file *pvr_file, u32 handle)
+{
+	struct pvr_context *ctx = xa_erase(&pvr_file->ctx_handles, handle);
+
+	if (!ctx)
+		return -EINVAL;
+
+	/* Flag as destroyed so no jobs are submitted. */
+	atomic_set(&ctx->destroyed, 1);
+
+	/* Cancel in-flight jobs. This is not actually cancelling the jobs, just
+	 * signaling done fences attached to them.
+	 */
+	pvr_context_cancel_inflight_jobs(ctx);
+
+	/* Trigger pending job processing to cancel all pending jobs. */
+	pvr_context_pending_job_event(ctx);
+
+	/* Release the reference held by the handle set. */
+	pvr_context_put(ctx);
+
+	return 0;
+}
+
+/**
+ * pvr_destroy_contexts_for_file: Destroy any contexts associated with the given file
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all contexts associated with @pvr_file from the device context list and drops initial
+ * references. Contexts will then be destroyed once all outstanding references are dropped.
+ */
+void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
+{
+	struct pvr_context *ctx;
+	unsigned long handle;
+
+	xa_for_each(&pvr_file->ctx_handles, handle, ctx)
+		pvr_context_destroy(pvr_file, handle);
+}
+
+/**
+ * pvr_context_queue_collect_done_jobs() - Collect all done jobs and add them to the list
+ * @queue: Queue to collect done jobs on.
+ * @done_jobs: List to queue these done jobs to.
+ *
+ * Collect all jobs whose sequence number is below the current timeline UFO sequence.
+ */
+static void
+pvr_context_queue_collect_done_jobs(struct pvr_context_queue *queue, struct list_head *done_jobs)
+{
+	struct pvr_job *job, *tmp_job;
+	u32 cur_seqno;
+
+	spin_lock(&queue->jobs.lock);
+	cur_seqno = *queue->fence_ctx->timeline_ufo.value;
+	list_for_each_entry_safe(job, tmp_job, &queue->jobs.in_flight, node) {
+		if (cur_seqno < job->done_fence->seqno)
+			break;
+
+		list_move_tail(&job->node, done_jobs);
+	}
+	spin_unlock(&queue->jobs.lock);
+}
+
+/**
+ * pvr_context_queue_collect_done_jobs() - Collect all done jobs and add them to the list
+ * @context: Context to collect done jobs on.
+ * @done_jobs: List to queue these done jobs to.
+ *
+ * Collect all jobs on all queues belonging to this context.
+ */
+static void
+pvr_context_collect_done_jobs(struct pvr_context *ctx, struct list_head *done_jobs)
+{
+	switch (ctx->type) {
+	case DRM_PVR_CTX_TYPE_RENDER:
+		pvr_context_queue_collect_done_jobs(&to_pvr_context_render(ctx)->ctx_geom.queue,
+						    done_jobs);
+		pvr_context_queue_collect_done_jobs(&to_pvr_context_render(ctx)->ctx_frag.queue,
+						    done_jobs);
+		break;
+
+	case DRM_PVR_CTX_TYPE_COMPUTE:
+		pvr_context_queue_collect_done_jobs(&to_pvr_context_compute(ctx)->queue,
+						    done_jobs);
+		break;
+
+	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+		pvr_context_queue_collect_done_jobs(&to_pvr_context_transfer_frag(ctx)->queue,
+						    done_jobs);
+		break;
+
+	default:
+		break;
+	}
+}
+
+/**
+ * pvr_context_process_worker() - Called to process context updates when a FW interrupt is received
+ * @work: Work struct embedded in the context object.
+ *
+ * Right now, we simply iterate over all active contexts, collect done jobs and signal the
+ * fences attached to them. If more context processing is needed at some point, it can be done
+ * here as well.
+ */
+static void
+pvr_context_process_worker(struct work_struct *work)
+{
+	struct pvr_device *pvr_dev = container_of(work, struct pvr_device, context_work);
+	struct pvr_context *ctx, *tmp_ctx;
+	struct pvr_job *job, *tmp_job;
+	LIST_HEAD(done_jobs);
+
+	spin_lock(&pvr_dev->active_contexts.lock);
+	list_for_each_entry_safe(ctx, tmp_ctx, &pvr_dev->active_contexts.list, active_node) {
+		pvr_context_collect_done_jobs(ctx, &done_jobs);
+		if (!pvr_context_has_in_flight_jobs(ctx))
+			list_del_init(&ctx->active_node);
+	}
+	spin_unlock(&pvr_dev->active_contexts.lock);
+
+	list_for_each_entry_safe(job, tmp_job, &done_jobs, node) {
+		list_del(&job->node);
+		dma_fence_signal(job->done_fence);
+		pvr_job_put(job);
+	}
+}
+
+/**
+ * pvr_context_has_in_flight_jobs() - Check if a context has in-flight jobs
+ * @ctx: Context to check.
+ *
+ * Check if a context has in-flight jobs. Must be called with the
+ * active_contexts.lock held, since any modification to the in_flight list
+ * is protected by this lock.
+ *
+ * Returns:
+ *  * %true if the context has in-flight jobs, or
+ *  * %false otherwise.
+ */
+bool pvr_context_has_in_flight_jobs(struct pvr_context *ctx)
+{
+	lockdep_assert_held(&ctx->pvr_dev->active_contexts.lock);
+
+	switch (ctx->type) {
+	case DRM_PVR_CTX_TYPE_RENDER:
+		return !list_empty(&to_pvr_context_render(ctx)->ctx_geom.queue.jobs.in_flight) ||
+		       !list_empty(&to_pvr_context_render(ctx)->ctx_frag.queue.jobs.in_flight);
+
+	case DRM_PVR_CTX_TYPE_COMPUTE:
+		return !list_empty(&to_pvr_context_compute(ctx)->queue.jobs.in_flight);
+
+	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG:
+		return !list_empty(&to_pvr_context_transfer_frag(ctx)->queue.jobs.in_flight);
+
+	default:
+		return false;
+	}
+}
+
+/**
+ * pvr_context_device_init() - Context-related device initialization
+ * @pvr_dev: Device object being initialized.
+ *
+ * Initializes the active_contexts data and the context_work. More context-related
+ * initialization can be added here if needed.
+ */
+void pvr_context_device_init(struct pvr_device *pvr_dev)
+{
+	spin_lock_init(&pvr_dev->active_contexts.lock);
+	INIT_LIST_HEAD(&pvr_dev->active_contexts.list);
+	INIT_WORK(&pvr_dev->context_work, pvr_context_process_worker);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_context.h b/drivers/gpu/drm/imagination/pvr_context.h
new file mode 100644
index 000000000000..b38d60a92ad5
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_context.h
@@ -0,0 +1,412 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_CONTEXT_H__
+#define __PVR_CONTEXT_H__
+
+#include <linux/compiler_attributes.h>
+#include <linux/dma-fence.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+#include "pvr_cccb.h"
+#include "pvr_device.h"
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+enum pvr_context_queue_type {
+	PVR_CONTEXT_QUEUE_TYPE_GEOMETRY,
+	PVR_CONTEXT_QUEUE_TYPE_FRAGMENT,
+	PVR_CONTEXT_QUEUE_TYPE_COMPUTE,
+	PVR_CONTEXT_QUEUE_TYPE_TRANSFER,
+};
+
+/**
+ * struct pvr_context_queue_fence_ctx - Queue fence context
+ *
+ * Used to implement dma_fence_ops. The fence context and the queue are kept
+ * separate even though there's a 1:1 mapping between them. We do that so
+ * we can release the queue and its resource even if some external users
+ * have references to dma_fence objects allocated here. In that case, all fences
+ * should have been signaled by the driver before the queue is destroyed, and the
+ * fence context will fade away as soon as all references to queue fences allocated
+ * by this context are released.
+ */
+struct pvr_context_queue_fence_ctx {
+	/**
+	 * @refcount: Tracks the number of active users of this fence context.
+	 *
+	 * The queue owns a ref, as well as any dma_fence object allocated from this
+	 * context. This allows us to get a module reference and avoids any module
+	 * removal while external users still own a ref to the fence, which would
+	 * cause access to freed memory since the dma_fence_ops are part of this
+	 * module, even though the fence object is allocated dynamically.
+	 */
+	struct kref refcount;
+
+	/** @type: Queue type. Used to implement get_timeline_name(). */
+	enum pvr_context_queue_type type;
+
+	/** @id: Context ID allocated with dma_fence_context_alloc(). */
+	u64 id;
+
+	/** @seqno: Sequence number incremented each time a fence is created. */
+	atomic_t seqno;
+
+	/** @lock: Lock used to synchronize access to fences allocated by this context. */
+	spinlock_t lock;
+
+	/** @timeline_ufo: Timeline UFO for the context queue. */
+	struct {
+		 /** @fw_obj: FW object representing the UFO value. */
+		struct pvr_fw_object *fw_obj;
+
+		/** @value: CPU mapping of the UFO value. */
+		u32 *value;
+	} timeline_ufo;
+};
+
+/**
+ * struct pvr_context_queue_fence() - Queue fence object
+ *
+ * Allocated anytime a job is created. This fence object will be signaled when the
+ * underlying timeline UFO reaches the sequence number specified in this object. No
+ * direct access to the UFO object here, we do software signaling instead, which
+ * simplifies things a bit.
+ *
+ * If there's any need to add native UFO support, we can extend the
+ * pvr_context_queue_fence_ctx object to point to the queue, so we get access to
+ * the UFO, and can queue UFO waits directly at the CCCB level, thus killing some
+ * CPU waits done in pvr_job_deps_done(). Note that we will still need
+ * pvr_job_deps_done() to deal with non-UFO fences, so not sure it is worth the
+ * extra complexity.
+ */
+struct pvr_context_queue_fence {
+	/** @base: Base dma_fence object. */
+	struct dma_fence base;
+
+	/** @ctx: Fence context that created this fence object. */
+	struct pvr_context_queue_fence_ctx *ctx;
+};
+
+struct pvr_context_queue_fence *to_pvr_context_queue_fence(struct dma_fence *f);
+
+/**
+ * struct pvr_context_queue - Queue object.
+ *
+ * This is where we keep track of jobs that are or will be submitted to the CCCB.
+ * We also have a timeline UFO per queue to determine when jobs are done.
+ */
+struct pvr_context_queue {
+	/** @fence_ctx: Internal fence context object. */
+	struct pvr_context_queue_fence_ctx *fence_ctx;
+
+	/** @jobs: List of jobs queued to this context queue and the lock protecting it. */
+	struct {
+		/** @lock: Lock protecting the in_fight and pending lists. */
+		spinlock_t lock;
+
+		/** @in_flight: List of in-flight jobs, waiting for signaling. */
+		struct list_head in_flight;
+
+		/**
+		 * @pendings: List of jobs pending jobs.
+		 *
+		 * Jobs in there are waiting for their dependencies to be signaled or for
+		 * some space in the CCCB to queue their commands.
+		 */
+		struct list_head pending;
+	} jobs;
+};
+
+/**
+ * struct pvr_context_geom - Geometry render context data
+ */
+struct pvr_context_geom {
+	/**
+	 * @ctx_state_obj: FW object representing context register state.
+	 */
+	struct pvr_fw_object *ctx_state_obj;
+
+	/** @queue: Geometry queue. */
+	struct pvr_context_queue queue;
+
+	/** @cccb: Client Circular Command Buffer. */
+	struct pvr_cccb cccb;
+};
+
+/**
+ * struct pvr_context_frag - Fragment render context data
+ */
+struct pvr_context_frag {
+	/**
+	 * @ctx_state_obj: FW object representing context register state.
+	 */
+	struct pvr_fw_object *ctx_state_obj;
+
+	/** @queue: Fragment queue. */
+	struct pvr_context_queue queue;
+
+	/** @cccb: Client Circular Command Buffer. */
+	struct pvr_cccb cccb;
+};
+
+enum pvr_context_priority {
+	PVR_CTX_PRIORITY_LOW = 0,
+	PVR_CTX_PRIORITY_MEDIUM,
+	PVR_CTX_PRIORITY_HIGH,
+};
+
+/**
+ * struct pvr_context - Context data
+ */
+struct pvr_context {
+	/** @ref_count: Refcount for context. */
+	struct kref ref_count;
+
+	/** @pvr_dev: Pointer to owning device. */
+	struct pvr_device *pvr_dev;
+
+	/** @vm_ctx: Pointer to associated VM context. */
+	struct pvr_vm_context *vm_ctx;
+
+	/** @type: Type of context. */
+	enum drm_pvr_ctx_type type;
+
+	/** @flags: Context flags. */
+	u32 flags;
+
+	/** @priority: Context priority*/
+	enum pvr_context_priority priority;
+
+	/** @ctx_id: FW context ID. */
+	u32 ctx_id;
+
+	char process_name[PVR_COREDUMP_PROCESS_NAME_LEN];
+
+	/** @active_node: Used to queue the context to the active context list. */
+	struct list_head active_node;
+
+	/** @job_pending: Job pending worker, used to evalulate job dependencies. */
+	struct work_struct job_pending_work;
+
+	/** @destroyed: True when the context has been destroyed. */
+	atomic_t destroyed;
+};
+
+/**
+ * struct pvr_context_render - Render context data
+ */
+struct pvr_context_render {
+	/** @base: Base context structure. */
+	struct pvr_context base;
+
+	/** @ctx_geom: Geometry context data. */
+	struct pvr_context_geom ctx_geom;
+
+	/** @ctx_frag: Fragment context data. */
+	struct pvr_context_frag ctx_frag;
+
+	/** @fw_obj: FW object representing FW-side context data. */
+	struct pvr_fw_object *fw_obj;
+};
+
+/**
+ * struct pvr_context_compute - Compute context data
+ */
+struct pvr_context_compute {
+	/** @base: Base context structure. */
+	struct pvr_context base;
+
+	/** @fw_obj: FW object representing FW-side context data. */
+	struct pvr_fw_object *fw_obj;
+
+	/** @queue: Compute queue. */
+	struct pvr_context_queue queue;
+
+	/**
+	 * @ctx_state_obj: FW object representing context register state.
+	 */
+	struct pvr_fw_object *ctx_state_obj;
+
+	/** @cccb: Client Circular Command Buffer. */
+	struct pvr_cccb cccb;
+};
+
+/**
+ * struct pvr_context_transfer - Transfer context data
+ */
+struct pvr_context_transfer {
+	/** @base: Base context structure. */
+	struct pvr_context base;
+
+	/** @fw_obj: FW object representing FW-side context data. */
+	struct pvr_fw_object *fw_obj;
+
+	/** @queue: Transfer queue. */
+	struct pvr_context_queue queue;
+
+	/**
+	 * @ctx_state_obj: FW object representing context register state.
+	 */
+	struct pvr_fw_object *ctx_state_obj;
+
+	/** @cccb: Client Circular Command Buffer. */
+	struct pvr_cccb cccb;
+};
+
+struct pvr_context *
+pvr_create_render_context(struct pvr_file *pvr_file,
+			  struct drm_pvr_ioctl_create_context_args *args,
+			  u32 handle);
+struct pvr_context *
+pvr_create_compute_context(struct pvr_file *pvr_file,
+			   struct drm_pvr_ioctl_create_context_args *args,
+			   u32 handle);
+struct pvr_context *
+pvr_create_transfer_context(struct pvr_file *pvr_file,
+			    struct drm_pvr_ioctl_create_context_args *args,
+			    u32 handle);
+
+static __always_inline struct pvr_context *
+from_pvr_context_render(struct pvr_context_render *ctx_render)
+{
+	return &ctx_render->base;
+};
+
+static __always_inline struct pvr_context_render *
+to_pvr_context_render(struct pvr_context *ctx)
+{
+	if (ctx->type != DRM_PVR_CTX_TYPE_RENDER)
+		return NULL;
+
+	return container_of(ctx, struct pvr_context_render, base);
+}
+
+static __always_inline struct pvr_context *
+from_pvr_context_compute(struct pvr_context_compute *ctx_context)
+{
+	return &ctx_context->base;
+};
+
+static __always_inline struct pvr_context_compute *
+to_pvr_context_compute(struct pvr_context *ctx)
+{
+	if (ctx->type != DRM_PVR_CTX_TYPE_COMPUTE)
+		return NULL;
+
+	return container_of(ctx, struct pvr_context_compute, base);
+}
+
+static __always_inline struct pvr_context *
+from_pvr_context_transfer(struct pvr_context_transfer *ctx_context)
+{
+	return &ctx_context->base;
+};
+
+static __always_inline struct pvr_context_transfer *
+to_pvr_context_transfer_frag(struct pvr_context *ctx)
+{
+	if (ctx->type != DRM_PVR_CTX_TYPE_TRANSFER_FRAG)
+		return NULL;
+
+	return container_of(ctx, struct pvr_context_transfer, base);
+}
+
+/**
+ * pvr_context_get() - Take additional reference on context.
+ * @ctx: Context pointer.
+ *
+ * Call pvr_context_put() to release.
+ *
+ * Returns:
+ *  * The requested context on success, or
+ *  * %NULL if no context pointer passed.
+ */
+static __always_inline struct pvr_context *
+pvr_context_get(struct pvr_context *ctx)
+{
+	if (ctx)
+		kref_get(&ctx->ref_count);
+
+	return ctx;
+}
+
+/**
+ * pvr_context_lookup() - Lookup context pointer from handle and file.
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Context handle.
+ *
+ * Takes reference on context. Call pvr_context_put() to release.
+ *
+ * Return:
+ *  * The requested context on success, or
+ *  * %NULL on failure (context does not exist, or does not belong to @pvr_file).
+ */
+static __always_inline struct pvr_context *
+pvr_context_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+	struct pvr_context *ctx;
+
+	/* Take the array lock to protect against context removal.  */
+	xa_lock(&pvr_file->ctx_handles);
+	ctx = pvr_context_get(xa_load(&pvr_file->ctx_handles, handle));
+	xa_unlock(&pvr_file->ctx_handles);
+
+	return ctx;
+}
+
+/**
+ * pvr_context_lookup_id() - Lookup context pointer from ID.
+ * @pvr_dev: Device pointer.
+ * @id: FW context ID.
+ *
+ * Takes reference on context. Call pvr_context_put() to release.
+ *
+ * Return:
+ *  * The requested context on success, or
+ *  * %NULL on failure (context does not exist).
+ */
+static __always_inline struct pvr_context *
+pvr_context_lookup_id(struct pvr_device *pvr_dev, u32 id)
+{
+	struct pvr_context *ctx;
+
+	/* Take the array lock to protect against context removal.  */
+	xa_lock(&pvr_dev->ctx_ids);
+
+	/* Contexts are removed from the ctx_ids set in the context release path,
+	 * meaning the ref_count reached zero before they get removed. We need
+	 * to make sure we're not trying to acquire a context that's being
+	 * destroyed.
+	 */
+	ctx = xa_load(&pvr_dev->ctx_ids, id);
+	if (!kref_get_unless_zero(&ctx->ref_count))
+		ctx = NULL;
+
+	xa_unlock(&pvr_dev->ctx_ids);
+
+	return ctx;
+}
+
+struct pvr_context_queue_fence_ctx *
+pvr_context_queue_fence_ctx_from_fence(struct dma_fence *fence);
+
+void pvr_context_put(struct pvr_context *ctx);
+
+int pvr_context_destroy(struct pvr_file *pvr_file, u32 handle);
+
+void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file);
+
+bool pvr_context_has_in_flight_jobs(struct pvr_context *ctx);
+
+struct dma_fence *pvr_context_queue_fence_create(struct pvr_context_queue *queue);
+
+void pvr_context_pending_job_event(struct pvr_context *ctx);
+
+void pvr_context_device_init(struct pvr_device *pvr_dev);
+
+#endif /* __PVR_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.c b/drivers/gpu/drm/imagination/pvr_debugfs.c
new file mode 100644
index 000000000000..02e44c070861
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_debugfs.c
@@ -0,0 +1,53 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_debugfs.h"
+
+#include "pvr_device.h"
+#include "pvr_fw_trace.h"
+#include "pvr_params.h"
+
+#include <linux/dcache.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
+static const struct pvr_debugfs_entry pvr_debugfs_entries[] = {
+	{"pvr_params", pvr_params_debugfs_init},
+	{"pvr_fw", pvr_fw_trace_debugfs_init},
+};
+
+void
+pvr_debugfs_init(struct drm_minor *minor)
+{
+	struct drm_device *drm_dev = minor->dev;
+	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+	struct dentry *root = minor->debugfs_root;
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) {
+		const struct pvr_debugfs_entry *entry = &pvr_debugfs_entries[i];
+		struct dentry *dir;
+
+		dir = debugfs_create_dir(entry->name, root);
+		if (IS_ERR(dir)) {
+			drm_warn(drm_dev,
+				 "failed to create debugfs dir '%s' (err=%d)",
+				 entry->name, (int)PTR_ERR(dir));
+			continue;
+		}
+
+		entry->init(pvr_dev, dir);
+	}
+}
+
+/*
+ * Since all entries are created under &drm_minor->debugfs_root, there's no
+ * need for a pvr_debugfs_fini() as DRM will clean up everything under its root
+ * automatically.
+ */
diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.h b/drivers/gpu/drm/imagination/pvr_debugfs.h
new file mode 100644
index 000000000000..adf2f7d7a0a9
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_debugfs.h
@@ -0,0 +1,29 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_DEBUGFS_H__
+#define __PVR_DEBUGFS_H__
+
+/* Forward declaration from <drm/drm_drv.h>. */
+struct drm_minor;
+
+#if defined(CONFIG_DEBUG_FS)
+/* Forward declaration from "pvr_device.h". */
+struct pvr_device;
+
+/* Forward declaration from <linux/dcache.h>. */
+struct dentry;
+
+struct pvr_debugfs_entry {
+	const char *name;
+	void (*init)(struct pvr_device *pvr_dev, struct dentry *dir);
+};
+
+void pvr_debugfs_init(struct drm_minor *minor);
+#else /* defined(CONFIG_DEBUG_FS) */
+#include <linux/compiler_attributes.h>
+
+static __always_inline void pvr_debugfs_init(struct drm_minor *minor) {}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* __PVR_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c
new file mode 100644
index 000000000000..200c25f1499b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_device.c
@@ -0,0 +1,762 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_device_info.h"
+
+#include "pvr_fw.h"
+#include "pvr_params.h"
+#include "pvr_power.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_stream.h"
+
+#include <drm/drm_print.h>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/compiler_attributes.h>
+#include <linux/compiler_types.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/* Major number for the supported version of the firmware. */
+#define PVR_FW_VERSION_MAJOR 1
+
+/**
+ * pvr_device_reg_init() - Initialize kernel access to a PowerVR device's
+ * control registers.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Sets struct pvr_device->regs.
+ *
+ * This method of mapping the device control registers into memory ensures that
+ * they are unmapped when the driver is detached (i.e. no explicit cleanup is
+ * required).
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error returned by devm_platform_ioremap_resource().
+ */
+static int
+pvr_device_reg_init(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
+	struct resource *regs_resource;
+	void __iomem *regs;
+	int err;
+
+	pvr_dev->regs_resource = NULL;
+	pvr_dev->regs = NULL;
+
+	regs = devm_platform_get_and_ioremap_resource(plat_dev, 0, &regs_resource);
+	if (IS_ERR(regs)) {
+		err = PTR_ERR(regs);
+		drm_err(drm_dev, "failed to ioremap gpu registers (err=%d)\n",
+			err);
+		return err;
+	}
+
+	pvr_dev->regs = regs;
+	pvr_dev->regs_resource = regs_resource;
+
+	return 0;
+}
+
+/**
+ * pvr_device_reg_fini() - Deinitialize kernel access to a PowerVR device's
+ * control registers.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This is essentially a no-op, since pvr_device_reg_init() already ensures that
+ * struct pvr_device->regs is unmapped when the device is detached. This
+ * function just sets struct pvr_device->regs to %NULL.
+ */
+static __always_inline void
+pvr_device_reg_fini(struct pvr_device *pvr_dev)
+{
+	pvr_dev->regs = NULL;
+}
+
+/**
+ * pvr_device_clk_init() - Initialize clocks required by a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Sets struct pvr_device->core_clk, struct pvr_device->sys_clk and
+ * struct pvr_device->mem_clk.
+ *
+ * Three clocks are required by the PowerVR device: core, sys and mem. On
+ * return, this function guarantees that the clocks are in one of the following
+ * states:
+ *
+ *  * All successfully initialized,
+ *  * Core errored, sys and mem uninitialized,
+ *  * Core deinitialized, sys errored, mem uninitialized, or
+ *  * Core and sys deinitialized, mem errored.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * Any error returned by devm_clk_get(), or
+ *  * Any error returned by clk_prepare_enable().
+ */
+static int pvr_device_clk_init(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct clk *core_clk;
+	struct clk *sys_clk;
+	struct clk *mem_clk;
+	int err;
+
+	pvr_dev->core_clk = NULL;
+	pvr_dev->sys_clk = NULL;
+	pvr_dev->mem_clk = NULL;
+
+	core_clk = devm_clk_get(drm_dev->dev, "core_clk");
+	if (IS_ERR(core_clk)) {
+		err = PTR_ERR(core_clk);
+		drm_err(drm_dev, "failed to get core_clk (err=%d)\n", err);
+		goto err_out;
+	}
+
+	sys_clk = devm_clk_get(drm_dev->dev, "sys_clk");
+	if (IS_ERR(sys_clk))
+		sys_clk = NULL;
+
+	mem_clk = devm_clk_get(drm_dev->dev, "mem_clk");
+	if (IS_ERR(mem_clk))
+		mem_clk = NULL;
+
+	err = clk_prepare(core_clk);
+	if (err)
+		goto err_out;
+
+	if (sys_clk) {
+		err = clk_prepare(sys_clk);
+		if (err)
+			goto err_deinit_core_clk;
+	}
+
+	if (mem_clk) {
+		err = clk_prepare(mem_clk);
+		if (err)
+			goto err_deinit_sys_clk;
+	}
+
+	pvr_dev->core_clk = core_clk;
+	pvr_dev->sys_clk = sys_clk;
+	pvr_dev->mem_clk = mem_clk;
+
+	return 0;
+
+err_deinit_sys_clk:
+	if (sys_clk)
+		clk_disable_unprepare(sys_clk);
+err_deinit_core_clk:
+	clk_disable_unprepare(core_clk);
+err_out:
+	return err;
+}
+
+/**
+ * pvr_device_clk_fini() - Deinitialize clocks required by a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ */
+static void
+pvr_device_clk_fini(struct pvr_device *pvr_dev)
+{
+	if (pvr_dev->mem_clk)
+		clk_unprepare(pvr_dev->mem_clk);
+	if (pvr_dev->sys_clk)
+		clk_unprepare(pvr_dev->sys_clk);
+	clk_unprepare(pvr_dev->core_clk);
+
+	pvr_dev->core_clk = NULL;
+	pvr_dev->sys_clk = NULL;
+	pvr_dev->mem_clk = NULL;
+}
+
+/**
+ * pvr_device_regulator_init() - Initialise regulator required by a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * The regulator is not a required devicetree property. If it is not present then this function will
+ * succeed, but &pvr_device->regulator will be %NULL.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * Any error returned by devm_regulator_get().
+ */
+static int
+pvr_device_regulator_init(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct regulator *regulator;
+	int err;
+
+	regulator = devm_regulator_get(drm_dev->dev, "power");
+	if (IS_ERR(regulator)) {
+		err = PTR_ERR(regulator);
+		/* Regulator is not required, so ENODEV is allowed here. */
+		if (err != -ENODEV)
+			goto err_out;
+		regulator = NULL;
+	}
+
+	pvr_dev->regulator = regulator;
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_device_clk_core_get_freq - Get current PowerVR device core clock frequency
+ * @pvr_dev: Target PowerVR device.
+ * @freq_out: Pointer to location to store core clock frequency in Hz.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * -%EINVAL if frequency can not be determined.
+ */
+int
+pvr_device_clk_core_get_freq(struct pvr_device *pvr_dev, u32 *freq_out)
+{
+	u32 freq = clk_get_rate(pvr_dev->core_clk);
+
+	if (!freq)
+		return -EINVAL;
+
+	*freq_out = freq;
+	return 0;
+}
+
+static irqreturn_t pvr_meta_irq_handler(int irq, void *data)
+{
+	struct pvr_device *pvr_dev = data;
+
+	if (!pvr_dev->fw_dev.funcs->check_and_ack_irq(pvr_dev))
+		return IRQ_NONE; /* Spurious IRQ - ignore. */
+
+	/* Only process IRQ work if FW is currently running. */
+	if (pvr_dev->fw_dev.booted) {
+		queue_work(pvr_dev->irq_wq, &pvr_dev->fwccb_work);
+		wake_up(&pvr_dev->kccb_rtn_q);
+		queue_work(pvr_dev->irq_wq, &pvr_dev->context_work);
+		pvr_power_check_idle(pvr_dev);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * pvr_device_irq_init() - Initialise IRQ required by a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ *  * 0 on success,
+ *  * Any error returned by platform_get_irq_byname(), or
+ *  * Any error returned by request_irq().
+ */
+static int
+pvr_device_irq_init(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
+	int err;
+
+	init_waitqueue_head(&pvr_dev->kccb_rtn_q);
+
+	pvr_dev->irq_wq = alloc_workqueue("powervr-irq", WQ_UNBOUND, 0);
+	if (!pvr_dev->irq_wq) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	pvr_dev->irq = platform_get_irq_byname(plat_dev, "gpu");
+	if (pvr_dev->irq < 0) {
+		err = pvr_dev->irq;
+		goto err_destroy_wq;
+	}
+
+	err = request_irq(pvr_dev->irq, pvr_meta_irq_handler, IRQF_SHARED, NULL, pvr_dev);
+	if (err)
+		goto err_destroy_wq;
+
+	return 0;
+
+err_destroy_wq:
+	destroy_workqueue(pvr_dev->irq_wq);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_device_irq_fini() - Deinitialise IRQ required by a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ */
+static void
+pvr_device_irq_fini(struct pvr_device *pvr_dev)
+{
+	free_irq(pvr_dev->irq, pvr_dev);
+	destroy_workqueue(pvr_dev->irq_wq);
+}
+
+/**
+ * pvr_build_firmware_filename() - Construct a PowerVR firmware filename
+ * @pvr_dev: Target PowerVR device.
+ * @base: First part of the filename.
+ * @major: Major version number.
+ *
+ * A PowerVR firmware filename consists of three parts separated by underscores
+ * (``'_'``) along with a '.fw' file suffix. The first part is the exact value
+ * of @base, the second part is the hardware version string derived from @pvr_fw
+ * and the final part is the firmware version number constructed from @major with
+ * a 'v' prefix, e.g. powervr/rogue_4.40.2.51_v1.fw.
+ *
+ * The returned string will have been slab allocated and must be freed with
+ * kfree().
+ *
+ * Return:
+ *  * The constructed filename on success, or
+ *  * Any error returned by kasprintf().
+ */
+static char *
+pvr_build_firmware_filename(struct pvr_device *pvr_dev, const char *base,
+			    u8 major)
+{
+	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
+
+	return kasprintf(GFP_KERNEL, "%s_%d.%d.%d.%d_v%d.fw", base, gpu_id->b,
+			 gpu_id->v, gpu_id->n, gpu_id->c, major);
+}
+
+/**
+ * pvr_request_firmware() - Load firmware for a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * See pvr_build_firmware_filename() for details on firmware file naming.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * Any error returned by pvr_build_firmware_filename(), or
+ *  * Any error returned by request_firmware().
+ */
+static int
+pvr_request_firmware(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = &pvr_dev->base;
+	char *filename;
+	const struct firmware *fw;
+	int err;
+
+	filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue",
+					       PVR_FW_VERSION_MAJOR);
+	if (IS_ERR(filename))
+		return PTR_ERR(filename);
+
+	/*
+	 * This function takes a copy of &filename, meaning we can free our
+	 * instance before returning.
+	 */
+	err = request_firmware(&fw, filename, pvr_dev->base.dev);
+	if (err) {
+		drm_err(drm_dev, "failed to load firmware %s (err=%d)\n",
+			filename, err);
+		goto err_free_filename;
+	}
+
+	drm_info(drm_dev, "loaded firmware %s\n", filename);
+	kfree(filename);
+
+	pvr_dev->fw_dev.firmware = fw;
+
+	return 0;
+
+err_free_filename:
+	kfree(filename);
+
+	return err;
+}
+
+/**
+ * pvr_load_gpu_id() - Load a PowerVR device's GPU ID (BVNC) from control registers.
+ *
+ * Sets struct pvr_dev.gpu_id.
+ *
+ * @pvr_dev: Target PowerVR device.
+ */
+static void
+pvr_load_gpu_id(struct pvr_device *pvr_dev)
+{
+	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
+	u64 bvnc;
+
+	/*
+	 * Try reading the BVNC using the newer (cleaner) method first. If the
+	 * B value is zero, fall back to the older method.
+	 */
+	bvnc = PVR_CR_READ64(pvr_dev, CORE_ID__PBVNC);
+
+	gpu_id->b = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__BRANCH_ID);
+	if (gpu_id->b != 0) {
+		gpu_id->v = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__VERSION_ID);
+		gpu_id->n = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS);
+		gpu_id->c = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__CONFIG_ID);
+	} else {
+		u32 core_rev = PVR_CR_READ32(pvr_dev, CORE_REVISION);
+		u32 core_id = PVR_CR_READ32(pvr_dev, CORE_ID);
+		u16 core_id_config = PVR_CR_FIELD_GET(core_id, CORE_ID_CONFIG);
+
+		gpu_id->b = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MAJOR);
+		gpu_id->v = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MINOR);
+		gpu_id->n = FIELD_GET(0xFF00, core_id_config);
+		gpu_id->c = FIELD_GET(0x00FF, core_id_config);
+	}
+}
+
+/**
+ * pvr_set_dma_info() - Set PowerVR device DMA information
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Sets the DMA mask and max segment size for the PowerVR device.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * Any error returned by PVR_FEATURE_VALUE(), or
+ *  * Any error returned by dma_set_mask().
+ */
+
+static int
+pvr_set_dma_info(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	u16 phys_bus_width;
+	int err;
+
+	err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width);
+	if (err) {
+		drm_err(drm_dev, "Failed to get device physical bus width\n");
+		return err;
+	}
+
+	/*
+	 * See the comment on &pvr_drm_driver.prime_fd_to_handle for an
+	 * explanation of the dma_set_mask function and dma_set_max_seg_size
+	 * calls below.
+	 */
+	err = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(phys_bus_width));
+	if (err) {
+		drm_err(drm_dev, "Failed to set DMA mask (err=%d)\n", err);
+		return err;
+	}
+
+	dma_set_max_seg_size(drm_dev->dev, UINT_MAX);
+
+	return 0;
+}
+
+/**
+ * pvr_device_gpu_init() - GPU-specific initialization for a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * The following steps are taken to ensure the device is ready:
+ *
+ *  1. Read the hardware version information from control registers,
+ *  2. Initialise the hardware feature information,
+ *  3. Setup the device DMA information,
+ *  4. Setup the device-scoped memory context, and
+ *  5. Load firmware into the device.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%ENODEV if the GPU is not supported,
+ *  * Any error returned by pvr_set_dma_info(),
+ *  * Any error returned by pvr_memory_context_init(), or
+ *  * Any error returned by pvr_request_firmware().
+ */
+static int
+pvr_device_gpu_init(struct pvr_device *pvr_dev)
+{
+	int err;
+
+	pvr_load_gpu_id(pvr_dev);
+
+	err = pvr_device_info_init(pvr_dev);
+	if (err)
+		goto err_out;
+
+	if (PVR_HAS_FEATURE(pvr_dev, meta)) {
+		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_META;
+	} else if (PVR_HAS_FEATURE(pvr_dev, mips)) {
+		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_MIPS;
+	} else if (PVR_HAS_FEATURE(pvr_dev, riscv_fw_processor)) {
+		pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_RISCV;
+	} else {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	pvr_stream_create_musthave_masks(pvr_dev);
+
+	err = pvr_set_dma_info(pvr_dev);
+	if (err)
+		goto err_out;
+
+	pvr_dev->kernel_vm_ctx = pvr_vm_create_context(pvr_dev, false);
+	if (IS_ERR(pvr_dev->kernel_vm_ctx)) {
+		err = PTR_ERR(pvr_dev->kernel_vm_ctx);
+		goto err_out;
+	}
+
+	err = pvr_request_firmware(pvr_dev);
+	if (err)
+		goto err_vm_ctx_put;
+
+	err = pvr_fw_init(pvr_dev);
+	if (err)
+		goto err_release_firmware;
+
+	return 0;
+
+err_release_firmware:
+	release_firmware(pvr_dev->fw_dev.firmware);
+
+err_vm_ctx_put:
+	pvr_vm_context_put(pvr_dev->kernel_vm_ctx);
+	pvr_dev->kernel_vm_ctx = NULL;
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_device_gpu_fini() - GPU-specific deinitialization for a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ */
+static void
+pvr_device_gpu_fini(struct pvr_device *pvr_dev)
+{
+	pvr_fw_fini(pvr_dev);
+	release_firmware(pvr_dev->fw_dev.firmware);
+	WARN_ON(!pvr_vm_context_put(pvr_dev->kernel_vm_ctx));
+	pvr_dev->kernel_vm_ctx = NULL;
+}
+
+/**
+ * pvr_device_init() - Initialize a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ *
+ * If this function returns successfully, the device will have been fully
+ * initialized. Otherwise, any parts of the device initialized before an error
+ * occurs will be de-initialized before returning.
+ *
+ * NOTE: The initialization steps currently taken are the bare minimum required
+ *       to read from the control registers. The device is unlikely to function
+ *       until further initialization steps are added. [This note should be
+ *       removed when that happens.]
+ *
+ * Return:
+ *  * 0 on success,
+ *  * Any error returned by pvr_device_reg_init(),
+ *  * Any error returned by pvr_device_clk_init(), or
+ *  * Any error returned by pvr_device_gpu_init().
+ */
+int
+pvr_device_init(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct device *dev = drm_dev->dev;
+	int err;
+
+	/*
+	 * Setup device parameters. We do this first in case other steps
+	 * depend on them.
+	 */
+	err = pvr_device_params_init(&pvr_dev->params);
+	if (err)
+		return err;
+
+	/* Enable and initialize clocks required for the device to operate. */
+	err = pvr_device_clk_init(pvr_dev);
+	if (err)
+		goto err_out;
+
+	err = pvr_device_regulator_init(pvr_dev);
+	if (err)
+		goto err_device_clk_fini;
+
+	/* Explicitly power the GPU so we can access control registers before the FW is booted. */
+	err = pm_runtime_get_sync(dev);
+	if (err)
+		goto err_device_clk_fini;
+
+	/* Map the control registers into memory. */
+	err = pvr_device_reg_init(pvr_dev);
+	if (err)
+		goto err_pm_runtime_put;
+
+	err = pvr_device_irq_init(pvr_dev);
+	if (err)
+		goto err_device_reg_fini;
+
+	/* Perform GPU-specific initialization steps. */
+	err = pvr_device_gpu_init(pvr_dev);
+	if (err)
+		goto err_device_irq_fini;
+
+	pm_runtime_put_autosuspend(dev);
+
+	return 0;
+
+err_device_irq_fini:
+	pvr_device_irq_fini(pvr_dev);
+
+err_device_reg_fini:
+	pvr_device_reg_fini(pvr_dev);
+
+err_pm_runtime_put:
+	pm_runtime_put_sync_suspend(dev);
+
+err_device_clk_fini:
+	pvr_device_clk_fini(pvr_dev);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_device_fini() - Deinitialize a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ */
+void
+pvr_device_fini(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct device *dev = drm_dev->dev;
+
+	/*
+	 * Deinitialization stages are performed in reverse order compared to
+	 * the initialization stages in pvr_device_init().
+	 */
+	pm_runtime_get_sync(dev);
+	pvr_device_gpu_fini(pvr_dev);
+	pvr_device_irq_fini(pvr_dev);
+	pvr_device_reg_fini(pvr_dev);
+	pm_runtime_put_sync_suspend(dev);
+	pvr_device_clk_fini(pvr_dev);
+
+	/* TODO: Remaining deinitialization steps */
+}
+
+bool
+pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk)
+{
+	switch (quirk) {
+	case 47217:
+		return PVR_HAS_QUIRK(pvr_dev, 47217);
+	case 48545:
+		return PVR_HAS_QUIRK(pvr_dev, 48545);
+	case 49927:
+		return PVR_HAS_QUIRK(pvr_dev, 49927);
+	case 51764:
+		return PVR_HAS_QUIRK(pvr_dev, 51764);
+	case 62269:
+		return PVR_HAS_QUIRK(pvr_dev, 62269);
+	default:
+		return false;
+	};
+}
+
+bool
+pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement)
+{
+	switch (enhancement) {
+	case 35421:
+		return PVR_HAS_ENHANCEMENT(pvr_dev, 35421);
+	case 42064:
+		return PVR_HAS_ENHANCEMENT(pvr_dev, 42064);
+	default:
+		return false;
+	};
+}
+
+/**
+ * pvr_device_has_feature() - Look up device feature based on feature definition
+ * @pvr_dev: Device pointer.
+ * @feature: Feature to look up. Should be one of %PVR_FEATURE_*.
+ *
+ * Returns:
+ *  * %true if feature is present on device, or
+ *  * %false if feature is not present on device.
+ */
+bool
+pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature)
+{
+	switch (feature) {
+	case PVR_FEATURE_CLUSTER_GROUPING:
+		return PVR_HAS_FEATURE(pvr_dev, cluster_grouping);
+
+	case PVR_FEATURE_COMPUTE_MORTON_CAPABLE:
+		return PVR_HAS_FEATURE(pvr_dev, compute_morton_capable);
+
+	case PVR_FEATURE_FB_CDC_V4:
+		return PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4);
+
+	case PVR_FEATURE_GPU_MULTICORE_SUPPORT:
+		return PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support);
+
+	case PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE:
+		return PVR_HAS_FEATURE(pvr_dev, isp_zls_d24_s8_packing_ogl_mode);
+
+	case PVR_FEATURE_S7_TOP_INFRASTRUCTURE:
+		return PVR_HAS_FEATURE(pvr_dev, s7_top_infrastructure);
+
+	case PVR_FEATURE_TESSELLATION:
+		return PVR_HAS_FEATURE(pvr_dev, tessellation);
+
+	case PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS:
+		return PVR_HAS_FEATURE(pvr_dev, tpu_dm_global_registers);
+
+	case PVR_FEATURE_VDM_DRAWINDIRECT:
+		return PVR_HAS_FEATURE(pvr_dev, vdm_drawindirect);
+
+	case PVR_FEATURE_VDM_OBJECT_LEVEL_LLS:
+		return PVR_HAS_FEATURE(pvr_dev, vdm_object_level_lls);
+
+	case PVR_FEATURE_ZLS_SUBTILE:
+		return PVR_HAS_FEATURE(pvr_dev, zls_subtile);
+
+	/* Derived features. */
+	case PVR_FEATURE_CDM_USER_MODE_QUEUE: {
+		u8 cdm_control_stream_format = 0;
+
+		PVR_FEATURE_VALUE(pvr_dev, cdm_control_stream_format, &cdm_control_stream_format);
+		return (cdm_control_stream_format >= 2 && cdm_control_stream_format <= 4);
+	}
+
+	case PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP:
+		if (PVR_HAS_FEATURE(pvr_dev, fbcdc_algorithm)) {
+			u8 fbcdc_algorithm = 0;
+
+			PVR_FEATURE_VALUE(pvr_dev, fbcdc_algorithm, &fbcdc_algorithm);
+			return (fbcdc_algorithm < 3 || PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4));
+		}
+		return false;
+
+	default:
+		WARN(true, "Looking up undefined feature %u\n", feature);
+		return false;
+	}
+}
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
new file mode 100644
index 000000000000..6d6f60bd5648
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -0,0 +1,760 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_DEVICE_H__
+#define __PVR_DEVICE_H__
+
+#include "pvr_ccb.h"
+#include "pvr_device_info.h"
+#include "pvr_fw.h"
+#include "pvr_params.h"
+#include "pvr_rogue_fwif_stream.h"
+#include "pvr_stream.h"
+#include "pvr_vendor.h"
+#include "pvr_vm.h"
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_mm.h>
+
+#include <linux/bits.h>
+#include <linux/compiler_attributes.h>
+#include <linux/compiler_types.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/xarray.h>
+
+/* Forward declaration from <linux/clk.h>. */
+struct clk;
+
+/* Forward declaration from <linux/firmware.h>. */
+struct firmware;
+
+/* Forward declaration from <linux/regulator/consumer.h>. */
+struct regulator;
+
+/* Forward declaration from "pvr_fw.h". */
+struct pvr_fw_funcs;
+
+/**
+ * struct pvr_gpu_id - Hardware GPU ID information for a PowerVR device
+ * @b: Branch ID.
+ * @v: Version ID.
+ * @n: Number of scalable units.
+ * @c: Config ID.
+ */
+struct pvr_gpu_id {
+	u16 b, v, n, c;
+};
+
+/**
+ * struct pvr_fw_version - Firmware version information
+ * @major: Major version number.
+ * @minor: Minor version number.
+ */
+struct pvr_fw_version {
+	u16 major, minor;
+};
+
+/**
+ * struct pvr_vendor - Vendor specific data for @pvr_device.
+ */
+struct pvr_vendor {
+	/**
+	 * @callbacks: Callback functions for vendor specific functionality.
+	 *
+	 * May be %NULL.
+	 */
+	const struct pvr_vendor_callbacks *callbacks;
+
+	/** @data: Vendor specific data. */
+	void *data;
+};
+
+enum pvr_power_state {
+	PVR_POWER_STATE_OFF = 0,
+	PVR_POWER_STATE_ON
+};
+
+/**
+ * struct pvr_device - powervr-specific wrapper for &struct drm_device
+ */
+struct pvr_device {
+	/**
+	 * @base: The underlying &struct drm_device.
+	 *
+	 * Do not access this member directly, instead call
+	 * from_pvr_device().
+	 */
+	struct drm_device base;
+
+	/** @gpu_id: GPU ID detected at runtime. */
+	struct pvr_gpu_id gpu_id;
+
+	/**
+	 * @features: Hardware feature information.
+	 *
+	 * Do not access this member directly, instead use PVR_HAS_FEATURE()
+	 * or PVR_FEATURE_VALUE() macros.
+	 */
+	struct pvr_device_features features;
+
+	/**
+	 * @quirks: Hardware quirk information.
+	 *
+	 * Do not access this member directly, instead use PVR_HAS_QUIRK().
+	 */
+	struct pvr_device_quirks quirks;
+
+	/**
+	 * @enhancements: Hardware enhancement information.
+	 *
+	 * Do not access this member directly, instead use
+	 * PVR_HAS_ENHANCEMENT().
+	 */
+	struct pvr_device_enhancements enhancements;
+
+	/** @fw_version: Firmware version detected at runtime. */
+	struct pvr_fw_version fw_version;
+
+	/** @regs_resource: Resource representing device control registers. */
+	struct resource *regs_resource;
+
+	/**
+	 * @regs: Device control registers.
+	 *
+	 * These are mapped into memory when the device is initialized; that
+	 * location is where this pointer points.
+	 */
+	void __iomem *regs;
+
+	/** @core_clk: General core clock. */
+	struct clk *core_clk;
+
+	/** @sys_clk: System bus clock. */
+	struct clk *sys_clk;
+
+	/** @mem_clk: Memory clock. */
+	struct clk *mem_clk;
+
+	/** @regulator: Power regulator. */
+	struct regulator *regulator;
+
+	/** @irq: IRQ number. */
+	int irq;
+
+	/** @irq_wq: Workqueue for actions triggered off the IRQ handler. */
+	struct workqueue_struct *irq_wq;
+
+	/** @kccb_rtn_q: Waitqueue for KCCB command return waiters. */
+	wait_queue_head_t kccb_rtn_q;
+
+	/** @vendor: Vendor specific device data. */
+	struct pvr_vendor vendor;
+
+	/** @kccb: Kernel CCB. */
+	struct pvr_ccb kccb;
+
+	/** @fwccb: Firmware CCB. */
+	struct pvr_ccb fwccb;
+
+	/** @fwccb_work: Work item for FWCCB processing. */
+	struct work_struct fwccb_work;
+
+	/** @delayed_idle_work: Delayed work item for idle checking. */
+	struct delayed_work delayed_idle_work;
+
+	/** @kccb_rtn_obj: Object representing KCCB return slots. */
+	struct pvr_fw_object *kccb_rtn_obj;
+
+	/**
+	 * @kccb_rtn: Pointer to CPU mapping of KCCB return slots. Must be
+	 *            accessed by READ_ONCE()/WRITE_ONCE().
+	 */
+	u32 *kccb_rtn;
+
+	/**
+	 * @kernel_vm_ctx: Virtual memory context used for kernel mappings.
+	 *
+	 * This is used for mappings in the firmware address region when a META firmware processor
+	 * is in use.
+	 *
+	 * When a MIPS firmware processor is in use, this will be %NULL.
+	 */
+	struct pvr_vm_context *kernel_vm_ctx;
+
+	/** @fw_dev: Firmware related data. */
+	struct pvr_fw_device fw_dev;
+
+	/** @power_state: Current GPU power state. */
+	enum pvr_power_state power_state;
+
+	/** @power_lock: Mutex protecting power state. */
+	struct mutex power_lock;
+
+	/**
+	 * @params: Device-specific parameters.
+	 *
+	 *          The values of these parameters are initialized from the
+	 *          defaults specified as module parameters. They may be
+	 *          modified at runtime via debugfs (if enabled).
+	 */
+	struct pvr_device_params params;
+
+	/** @stream_musthave_quirks: Bit array of "must-have" quirks for stream commands. */
+	u32 stream_musthave_quirks[PVR_STREAM_TYPE_MAX][PVR_STREAM_EXTHDR_TYPE_MAX];
+
+	/**
+	 * @ctx_ids: Array of contexts belonging to this device. Array members
+	 *           are of type "struct pvr_context *".
+	 *
+	 * This array is used to allocate IDs used by the firmware.
+	 */
+	struct xarray ctx_ids;
+
+	/**
+	 * @free_list_ids: Array of free lists belonging to this device. Array members
+	 *                 are of type "struct pvr_free_list *".
+	 *
+	 * This array is used to allocate IDs used by the firmware.
+	 */
+	struct xarray free_list_ids;
+
+	/**
+	 * @jobs_ids: Array of objects belonging to this device. Array members
+	 *            are of type "struct pvr_job *".
+	 */
+	struct xarray job_ids;
+
+	/**
+	 * @active_contexts: Active context list and the lock protecting it.
+	 *
+	 * Used to iterate over in-flight jobs and signal fences for done jobs.
+	 */
+	struct {
+		/** @list: Active context list. */
+		struct list_head list;
+
+		/** @lock: Lock protecting access to the active context list. */
+		spinlock_t lock;
+	} active_contexts;
+
+	/** @context_work: Work item for context processing. */
+	struct work_struct context_work;
+};
+
+/**
+ * struct pvr_file - powervr-specific data to be assigned to &struct
+ * drm_file.driver_priv
+ */
+struct pvr_file {
+	/**
+	 * @file: A reference to the parent &struct drm_file.
+	 *
+	 * Do not access this member directly, instead call from_pvr_file().
+	 */
+	struct drm_file *file;
+
+	/**
+	 * @pvr_dev: A reference to the powervr-specific wrapper for the
+	 *           associated device. Saves on repeated calls to
+	 *           to_pvr_device().
+	 */
+	struct pvr_device *pvr_dev;
+
+	/** @fw_mem_ctx_obj: Firmware object representing firmware memory context. */
+	struct pvr_fw_object *fw_mem_ctx_obj;
+
+	/**
+	 * @ctx_handles: Array of contexts belonging to this file. Array members
+	 *               are of type "struct pvr_context *".
+	 *
+	 * This array is used to allocate handles returned to userspace.
+	 */
+	struct xarray ctx_handles;
+
+	/**
+	 * @free_list_handles: Array of free lists belonging to this file. Array
+	 * members are of type "struct pvr_free_list *".
+	 *
+	 * This array is used to allocate handles returned to userspace.
+	 */
+	struct xarray free_list_handles;
+
+	/**
+	 * @hwrt_handles: Array of HWRT datasets belonging to this file. Array
+	 * members are of type "struct pvr_hwrt_dataset *".
+	 *
+	 * This array is used to allocate handles returned to userspace.
+	 */
+	struct xarray hwrt_handles;
+
+	/**
+	 * @vm_ctx_handles: Array of VM contexts belonging to this file. Array
+	 * members are of type "struct pvr_vm_context *".
+	 *
+	 * This array is used to allocate handles returned to userspace.
+	 */
+	struct xarray vm_ctx_handles;
+};
+
+/**
+ * PVR_HAS_FEATURE() - Tests whether a PowerVR device has a given feature
+ * @pvr_dev: [IN] Target PowerVR device.
+ * @feature: [IN] Hardware feature name.
+ *
+ * Feature names are derived from those found in &struct pvr_device_features by
+ * dropping the 'has_' prefix, which is applied by this macro.
+ *
+ * Return:
+ *  * true if the named feature is present in the hardware
+ *  * false if the named feature is not present in the hardware
+ */
+#define PVR_HAS_FEATURE(pvr_dev, feature) ((pvr_dev)->features.has_##feature)
+
+/**
+ * PVR_FEATURE_VALUE() - Gets a PowerVR device feature value
+ * @pvr_dev: [IN] Target PowerVR device.
+ * @feature: [IN] Feature name.
+ * @value_out: [OUT] Feature value.
+ *
+ * This macro will get a feature value for those features that have values.
+ *
+ * Feature names are derived from those found in &struct pvr_device_features by
+ * dropping the 'has_' prefix.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * -%EINVAL if the named feature is not present in the hardware
+ */
+#define PVR_FEATURE_VALUE(pvr_dev, feature, value_out)              \
+	({                                                          \
+		struct pvr_device *__pvr_dev = pvr_dev;             \
+		int __ret = -EINVAL;                                \
+		if (__pvr_dev->features.has_##feature) {            \
+			*(value_out) = __pvr_dev->features.feature; \
+			__ret = 0;                                  \
+		}                                                   \
+		__ret;                                              \
+	})
+
+/**
+ * PVR_HAS_QUIRK() - Tests whether a physical device has a given quirk
+ * @pvr_dev: [IN] Target PowerVR device.
+ * @quirk: [IN] Hardware quirk name.
+ *
+ * Quirk numbers are derived from those found in #pvr_device_quirks by
+ * dropping the 'has_brn' prefix, which is applied by this macro.
+ *
+ * Returns
+ *  * true if the quirk is present in the hardware, or
+ *  * false if the quirk is not present in the hardware.
+ */
+#define PVR_HAS_QUIRK(pvr_dev, quirk) ((pvr_dev)->quirks.has_brn##quirk)
+
+/**
+ * PVR_HAS_ENHANCEMENT() - Tests whether a physical device has a given
+ *                         enhancement
+ * @pvr_dev: [IN] Target PowerVR device.
+ * @enhancement: [IN] Hardware enhancement name.
+ *
+ * Enhancement numbers are derived from those found in #pvr_device_enhancements
+ * by dropping the 'has_ern' prefix, which is applied by this macro.
+ *
+ * Returns
+ *  * true if the enhancement is present in the hardware, or
+ *  * false if the enhancement is not present in the hardware.
+ */
+#define PVR_HAS_ENHANCEMENT(pvr_dev, enhancement) ((pvr_dev)->enhancements.has_ern##enhancement)
+
+static __always_inline struct drm_device *
+from_pvr_device(struct pvr_device *pvr_dev)
+{
+	return &pvr_dev->base;
+}
+
+static __always_inline struct pvr_device *
+to_pvr_device(struct drm_device *drm_dev)
+{
+	return container_of(drm_dev, struct pvr_device, base);
+}
+
+static __always_inline struct drm_file *
+from_pvr_file(struct pvr_file *pvr_file)
+{
+	return pvr_file->file;
+}
+
+static __always_inline struct pvr_file *
+to_pvr_file(struct drm_file *file)
+{
+	return file->driver_priv;
+}
+
+/**
+ * PVR_PACKED_BVNC() - Packs B, V, N and C values into a 64-bit unsigned integer
+ * @b: Branch ID.
+ * @v: Version ID.
+ * @n: Number of scalable units.
+ * @c: Config ID.
+ *
+ * The packed layout is as follows:
+ *
+ *    +--------+--------+--------+-------+
+ *    | 63..48 | 47..32 | 31..16 | 15..0 |
+ *    +========+========+========+=======+
+ *    | B      | V      | N      | C     |
+ *    +--------+--------+--------+-------+
+ *
+ * pvr_gpu_id_to_packed_bvnc() should be used instead of this macro when a
+ * &struct pvr_gpu_id is available in order to ensure proper type checking.
+ *
+ * Return: Packed BVNC.
+ */
+/* clang-format off */
+#define PVR_PACKED_BVNC(b, v, n, c) \
+	((((u64)(b) & GENMASK_ULL(15, 0)) << 48) | \
+	 (((u64)(v) & GENMASK_ULL(15, 0)) << 32) | \
+	 (((u64)(n) & GENMASK_ULL(15, 0)) << 16) | \
+	 (((u64)(c) & GENMASK_ULL(15, 0)) <<  0))
+/* clang-format on */
+
+/**
+ * pvr_gpu_id_to_packed_bvnc() - Packs B, V, N and C values into a 64-bit
+ * unsigned integer
+ * @gpu_id: GPU ID.
+ *
+ * The packed layout is as follows:
+ *
+ *    +--------+--------+--------+-------+
+ *    | 63..48 | 47..32 | 31..16 | 15..0 |
+ *    +========+========+========+=======+
+ *    | B      | V      | N      | C     |
+ *    +--------+--------+--------+-------+
+ *
+ * This should be used in preference to PVR_PACKED_BVNC() when a &struct
+ * pvr_gpu_id is available in order to ensure proper type checking.
+ *
+ * Return: Packed BVNC.
+ */
+static __always_inline u64
+pvr_gpu_id_to_packed_bvnc(struct pvr_gpu_id *gpu_id)
+{
+	return PVR_PACKED_BVNC(gpu_id->b, gpu_id->v, gpu_id->n, gpu_id->c);
+}
+
+static __always_inline void
+packed_bvnc_to_pvr_gpu_id(u64 bvnc, struct pvr_gpu_id *gpu_id)
+{
+	gpu_id->b = (bvnc & GENMASK_ULL(63, 48)) >> 48;
+	gpu_id->v = (bvnc & GENMASK_ULL(47, 32)) >> 32;
+	gpu_id->n = (bvnc & GENMASK_ULL(31, 16)) >> 16;
+	gpu_id->c = bvnc & GENMASK_ULL(15, 0);
+}
+
+int pvr_device_init(struct pvr_device *pvr_dev);
+void pvr_device_fini(struct pvr_device *pvr_dev);
+
+int
+pvr_device_clk_core_get_freq(struct pvr_device *pvr_dev, u32 *freq_out);
+
+bool
+pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk);
+bool
+pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement);
+bool
+pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature);
+
+/**
+ * PVR_CR_READ32() - Read a 32-bit register from a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ *
+ * This macro is a wrapper around __pvr_cr_read32(). It applies ROGUE_CR_ prefix
+ * to the provided @reg name, making it behave comparably to the
+ * PVR_CR_FIELD_GET() macro.
+ *
+ * Return: The value of the requested register.
+ */
+#define PVR_CR_READ32(pvr_dev, reg) __pvr_cr_read32(pvr_dev, ROGUE_CR_##reg)
+
+/**
+ * PVR_CR_READ64() - Read a 64-bit register from a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ *
+ * This macro is a wrapper around __pvr_cr_read64(). It applies ROGUE_CR_ prefix
+ * to the provided @reg name, making it behave comparably to the
+ * PVR_CR_FIELD_GET() macro.
+ *
+ * Return: The value of the requested register.
+ */
+#define PVR_CR_READ64(pvr_dev, reg) __pvr_cr_read64(pvr_dev, ROGUE_CR_##reg)
+
+/**
+ * PVR_CR_WRITE32() - Write to a 32-bit register in a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ * @val: Value to write.
+ *
+ * This macro is a wrapper around __pvr_cr_write32(). It applies ROGUE_CR_
+ * prefix to the provided @reg name, making it behave comparably to the
+ * PVR_CR_FIELD_GET() macro.
+ */
+#define PVR_CR_WRITE32(pvr_dev, reg, val) \
+	__pvr_cr_write32(pvr_dev, ROGUE_CR_##reg, val)
+
+/**
+ * PVR_CR_WRITE64() - Write to a 64-bit register in a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ * @val: Value to write.
+ *
+ * This macro is a wrapper around __pvr_cr_write64(). It applies ROGUE_CR_
+ * prefix to the provided @reg name, making it behave comparably to the
+ * PVR_CR_FIELD_GET() macro.
+ */
+#define PVR_CR_WRITE64(pvr_dev, reg, val) \
+	__pvr_cr_write64(pvr_dev, ROGUE_CR_##reg, val)
+
+/**
+ * PVR_CR_FIELD_GET() - Extract a single field from a PowerVR control register
+ * @val: Value of the target register.
+ * @field: Field specifier, as defined in "pvr_rogue_cr_defs.h".
+ *
+ * Return: The extracted field.
+ */
+#define PVR_CR_FIELD_GET(val, field) FIELD_GET(~ROGUE_CR_##field##_CLRMSK, val)
+
+/**
+ * __pvr_cr_read32() - Read a 32-bit register from a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ *
+ * Do not call this function directly; use the PVR_CR_READ32() macro instead.
+ *
+ * Return: The value of the requested register.
+ */
+static __always_inline u32
+__pvr_cr_read32(struct pvr_device *pvr_dev, u32 reg)
+{
+	return ioread32(pvr_dev->regs + reg);
+}
+
+/**
+ * __pvr_cr_read64() - Read a 64-bit register from a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ *
+ * Do not call this function directly; use the PVR_CR_READ64() macro instead.
+ *
+ * Return: The value of the requested register.
+ */
+static __always_inline u64
+__pvr_cr_read64(struct pvr_device *pvr_dev, u32 reg)
+{
+	return ioread64(pvr_dev->regs + reg);
+}
+
+/**
+ * __pvr_cr_write32() - Write to a 32-bit register in a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ * @val: Value to write.
+ *
+ * Do not call this function directly; use the PVR_CR_WRITE32() macro instead.
+ */
+static __always_inline void
+__pvr_cr_write32(struct pvr_device *pvr_dev, u32 reg, u32 val)
+{
+	iowrite32(val, pvr_dev->regs + reg);
+}
+
+/**
+ * __pvr_cr_write64() - Write to a 64-bit register in a PowerVR device
+ * @pvr_dev: Target PowerVR device.
+ * @reg: Target register.
+ * @val: Value to write.
+ *
+ * Do not call this function directly; use the PVR_CR_WRITE64() macro instead.
+ */
+static __always_inline void
+__pvr_cr_write64(struct pvr_device *pvr_dev, u32 reg, u64 val)
+{
+	iowrite64(val, pvr_dev->regs + reg);
+}
+
+/**
+ * pvr_cr_poll_reg32() - Wait for a 32-bit register to match a given value by
+ *                       polling
+ * @pvr_dev: Target PowerVR device.
+ * @reg_addr: Address of register.
+ * @reg_value: Expected register value (after masking).
+ * @reg_mask: Mask of bits valid for comparison with @reg_value.
+ * @timeout_usec: Timeout length, in us.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * -%ETIMEDOUT on timeout.
+ */
+static __always_inline int
+pvr_cr_poll_reg32(struct pvr_device *pvr_dev, u32 reg_addr, u32 reg_value,
+		  u32 reg_mask, u64 timeout_usec)
+{
+	u32 value;
+
+	return readl_poll_timeout(pvr_dev->regs + reg_addr, value,
+		(value & reg_mask) == reg_value, 0, timeout_usec);
+}
+
+/**
+ * pvr_cr_poll_reg64() - Wait for a 64-bit register to match a given value by
+ *                       polling
+ * @pvr_dev: Target PowerVR device.
+ * @reg_addr: Address of register.
+ * @reg_value: Expected register value (after masking).
+ * @reg_mask: Mask of bits valid for comparison with @reg_value.
+ * @timeout_usec: Timeout length, in us.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * -%ETIMEDOUT on timeout.
+ */
+static __always_inline int
+pvr_cr_poll_reg64(struct pvr_device *pvr_dev, u32 reg_addr, u64 reg_value,
+		  u64 reg_mask, u64 timeout_usec)
+{
+	u64 value;
+
+	return readq_poll_timeout(pvr_dev->regs + reg_addr, value,
+		(value & reg_mask) == reg_value, 0, timeout_usec);
+}
+
+/**
+ * pvr_round_up_to_cacheline_size() - Round up a provided size to be cacheline
+ *                                    aligned
+ * @pvr_dev: Target PowerVR device.
+ * @size: Initial size, in bytes.
+ *
+ * Returns:
+ *  * Size aligned to cacheline size.
+ */
+static __always_inline size_t
+pvr_round_up_to_cacheline_size(struct pvr_device *pvr_dev, size_t size)
+{
+	u16 slc_cacheline_size_in_bits = 0;
+	u16 slc_cacheline_size_in_bytes;
+
+	WARN_ON(!PVR_HAS_FEATURE(pvr_dev, slc_cache_line_size_in_bits));
+	PVR_FEATURE_VALUE(pvr_dev, slc_cache_line_size_in_bits,
+			  &slc_cacheline_size_in_bits);
+	slc_cacheline_size_in_bytes = slc_cacheline_size_in_bits / 8;
+
+	return round_up(size, slc_cacheline_size_in_bytes);
+}
+
+/**
+ * DOC: IOCTL validation helpers
+ *
+ * To validate the constraints imposed on IOCTL argument structs, a collection
+ * of macros and helper functions exist in ``pvr_device.h``.
+ *
+ * Of the current helpers, it should only be necessary to call
+ * PVR_IOCTL_UNION_PADDING_CHECK() directly. This macro should be used once in
+ * every code path which extracts a union member from a struct passed from
+ * userspace.
+ */
+
+/**
+ * pvr_ioctl_union_padding_check() - Validate that the implicit padding between
+ * the end of a union member and the end of the union itself is zeroed.
+ * @instance: Pointer to the instance of the struct to validate.
+ * @union_offset: Offset into the type of @instance of the target union. Must
+ * be 64-bit aligned.
+ * @union_size: Size of the target union in the type of @instance. Must be
+ * 64-bit aligned.
+ * @member_size: Size of the target member in the target union specified by
+ * @union_offset and @union_size. It is assumed that the offset of the target
+ * member is zero relative to @union_offset. Must be 64-bit aligned.
+ *
+ * You probably want to use PVR_IOCTL_UNION_PADDING_CHECK() instead of calling
+ * this function directly, since that macro abstracts away much of the setup,
+ * and also provides some static validation. See its docs for details.
+ *
+ * Return:
+ *  * %true if every byte between the end of the used member of the union and
+ *    the end of that union is zeroed, or
+ *  * %false otherwise.
+ */
+static __always_inline bool
+pvr_ioctl_union_padding_check(void *instance, size_t union_offset,
+			      size_t union_size, size_t member_size)
+{
+	/*
+	 * void pointer arithmetic is technically illegal - cast to a byte
+	 * pointer so this addition works safely.
+	 */
+	void *padding_start = ((u8 *)instance) + union_offset + member_size;
+	size_t padding_size = union_size - member_size;
+
+	return !memchr_inv(padding_start, 0, padding_size);
+}
+
+/**
+ * PVR_STATIC_ASSERT_64BIT_ALIGNED() - Inline assertion for 64-bit alignment.
+ * @static_expr_: Target expression to evaluate.
+ *
+ * If @static_expr_ does not evaluate to a constant integer which would be a
+ * 64-bit aligned address (i.e. a multiple of 8), compilation will fail.
+ *
+ * Return:
+ * The value of @static_expr_.
+ */
+#define PVR_STATIC_ASSERT_64BIT_ALIGNED(static_expr_)                     \
+	({                                                                \
+		static_assert(((static_expr_) & (sizeof(u64) - 1)) == 0); \
+		(static_expr_);                                           \
+	})
+
+/**
+ * PVR_IOCTL_UNION_PADDING_CHECK() - Validate that the implicit padding between
+ * the end of a union member and the end of the union itself is zeroed.
+ * @struct_instance_: An expression which evaluates to a pointer to a UAPI data
+ * struct.
+ * @union_: The name of the union member of @struct_instance_ to check. If the
+ * union member is nested within the type of @struct_instance_, this may
+ * contain the member access operator (".").
+ * @member_: The name of the member of @union_ to assess.
+ *
+ * This is a wrapper around pvr_ioctl_union_padding_check() which performs
+ * alignment checks and simplifies things for the caller.
+ *
+ * Return:
+ *  * %true if every byte in @struct_instance_ between the end of @member_ and
+ *    the end of @union_ is zeroed, or
+ *  * %false otherwise.
+ */
+#define PVR_IOCTL_UNION_PADDING_CHECK(struct_instance_, union_, member_)     \
+	({                                                                   \
+		typeof(struct_instance_) __instance = (struct_instance_);    \
+		size_t __union_offset = PVR_STATIC_ASSERT_64BIT_ALIGNED(     \
+			offsetof(typeof(*__instance), union_));              \
+		size_t __union_size = PVR_STATIC_ASSERT_64BIT_ALIGNED(       \
+			sizeof(__instance->union_));                         \
+		size_t __member_size = PVR_STATIC_ASSERT_64BIT_ALIGNED(      \
+			sizeof(__instance->union_.member_));                 \
+		pvr_ioctl_union_padding_check(__instance, __union_offset,    \
+					      __union_size, __member_size);  \
+	})
+
+#define PVR_FW_PROCESSOR_TYPE_META  0
+#define PVR_FW_PROCESSOR_TYPE_MIPS  1
+#define PVR_FW_PROCESSOR_TYPE_RISCV 2
+
+#endif /* __PVR_DEVICE_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_device_info.c b/drivers/gpu/drm/imagination/pvr_device_info.c
new file mode 100644
index 000000000000..c512e7733c7b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_device_info.c
@@ -0,0 +1,223 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_device_info.h"
+
+#include <drm/drm_print.h>
+
+#include <linux/types.h>
+
+const struct pvr_device_features pvr_device_4_V_2_51 = {
+	.has_cdm_control_stream_format = true,
+	.has_cluster_grouping = true,
+	.has_common_store_size_in_dwords = true,
+	.has_compute = true,
+	.has_compute_morton_capable = true,
+	.has_compute_overlap = true,
+	.has_fbcdc_algorithm = true,
+	.has_isp_max_tiles_in_flight = true,
+	.has_isp_samples_per_pixel = true,
+	.has_max_partitions = true,
+	.has_meta = true,
+	.has_meta_coremem_size = true,
+	.has_num_clusters = true,
+	.has_num_isp_ipp_pipes = true,
+	.has_num_raster_pipes = true,
+	.has_phys_bus_width = true,
+	.has_slc_cache_line_size_in_bits = true,
+	.has_tile_size_x = true,
+	.has_tile_size_y = true,
+	.has_usc_min_output_registers_per_pix = true,
+	.has_virtual_address_space_bits = true,
+	.has_xt_top_infrastructure = true,
+	.has_zls_subtile = true,
+
+	.cdm_control_stream_format = 1,
+	.common_store_size_in_dwords = 1280U * 4U * 4U,
+	.fbcdc_algorithm = 2,
+	.isp_max_tiles_in_flight = 4,
+	.isp_samples_per_pixel = 2,
+	.max_partitions = 8,
+	.meta = true,
+	.meta_coremem_size = 32,
+	.num_clusters = 2,
+	.num_isp_ipp_pipes = 8,
+	.num_raster_pipes = 1,
+	.phys_bus_width = 40,
+	.slc_cache_line_size_in_bits = 512,
+	.tile_size_x = 32,
+	.tile_size_y = 32,
+	.usc_min_output_registers_per_pix = 2,
+	.virtual_address_space_bits = 40,
+};
+
+const struct pvr_device_quirks pvr_device_quirks_4_40_2_51 = {
+	.has_brn44079 = true,
+	.has_brn48492 = true,
+	.has_brn48545 = true,
+	.has_brn49927 = true,
+	.has_brn51764 = true,
+	.has_brn52354 = true,
+	.has_brn62269 = true,
+	.has_brn63142 = true,
+	.has_brn66011 = true,
+};
+
+struct pvr_device_enhancements pvr_device_enhancements_4_40_2_51 = {
+	.has_ern35421 = true,
+	.has_ern38020 = true,
+	.has_ern38748 = true,
+	.has_ern42064 = true,
+};
+
+const struct pvr_device_features pvr_device_33_V_11_3 = {
+	.has_cdm_control_stream_format = true,
+	.has_common_store_size_in_dwords = true,
+	.has_compute = true,
+	.has_isp_max_tiles_in_flight = true,
+	.has_isp_samples_per_pixel = true,
+	.has_max_partitions = true,
+	.has_mips = true,
+	.has_num_clusters = true,
+	.has_num_isp_ipp_pipes = true,
+	.has_num_raster_pipes = true,
+	.has_phys_bus_width = true,
+	.has_roguexe = true,
+	.has_simple_internal_parameter_format = true,
+	.has_slc_cache_line_size_in_bits = true,
+	.has_sys_bus_secure_reset = true,
+	.has_tile_size_x = true,
+	.has_tile_size_y = true,
+	.has_usc_min_output_registers_per_pix = true,
+	.has_virtual_address_space_bits = true,
+	.has_xe_memory_hierarchy = true,
+
+	.cdm_control_stream_format = 1,
+	.common_store_size_in_dwords = 512U * 4U * 4U,
+	.isp_max_tiles_in_flight = 1,
+	.isp_samples_per_pixel = 1,
+	.max_partitions = 4,
+	.mips = true,
+	.num_clusters = 1,
+	.num_isp_ipp_pipes = 1,
+	.num_raster_pipes = 1,
+	.phys_bus_width = 36,
+	.simple_internal_parameter_format = 2,
+	.slc_cache_line_size_in_bits = 512,
+	.tile_size_x = 16,
+	.tile_size_y = 16,
+	.usc_min_output_registers_per_pix = 1,
+	.virtual_address_space_bits = 40,
+};
+
+const struct pvr_device_quirks pvr_device_quirks_33_15_11_3 = {
+	.has_brn63553 = true,
+};
+
+struct pvr_device_enhancements pvr_device_enhancements_33_15_11_3 = {
+	.has_ern35421 = true,
+	.has_ern38748 = true,
+};
+
+const struct pvr_device_features pvr_device_36_V_104_796 = {
+	.has_cdm_control_stream_format = true,
+	.has_common_store_size_in_dwords = true,
+	.has_compute = true,
+	.has_compute_overlap = true,
+	.has_fbcdc_algorithm = true,
+	.has_gpu_multicore_support = true,
+	.has_isp_max_tiles_in_flight = true,
+	.has_isp_samples_per_pixel = true,
+	.has_max_partitions = true,
+	.has_num_clusters = true,
+	.has_num_isp_ipp_pipes = true,
+	.has_num_raster_pipes = true,
+	.has_phys_bus_width = true,
+	.has_riscv_fw_processor = true,
+	.has_roguexe = true,
+	.has_simple_internal_parameter_format = true,
+	.has_slc_cache_line_size_in_bits = true,
+	.has_sys_bus_secure_reset = true,
+	.has_tile_size_x = true,
+	.has_tile_size_y = true,
+	.has_tpu_dm_global_registers = true,
+	.has_usc_min_output_registers_per_pix = true,
+	.has_virtual_address_space_bits = true,
+	.has_xe_memory_hierarchy = true,
+	.has_xpu_max_slaves = true,
+
+	.cdm_control_stream_format = 1,
+	.common_store_size_in_dwords = 1344U * 4U * 4U,
+	.fbcdc_algorithm = 50,
+	.isp_max_tiles_in_flight = 6,
+	.isp_samples_per_pixel = 4,
+	.max_partitions = 16,
+	.num_clusters = 1,
+	.num_isp_ipp_pipes = 6,
+	.num_raster_pipes = 1,
+	.phys_bus_width = 36,
+	.riscv_fw_processor = true,
+	.simple_internal_parameter_format = 2,
+	.slc_cache_line_size_in_bits = 512,
+	.tile_size_x = 16,
+	.tile_size_y = 16,
+	.usc_min_output_registers_per_pix = 2,
+	.virtual_address_space_bits = 40,
+	.xpu_max_slaves = 3,
+};
+
+const struct pvr_device_quirks pvr_device_quirks_36_53_104_796 = {
+	.has_brn44079 = true,
+};
+
+struct pvr_device_enhancements pvr_device_enhancements_36_53_104_796 = {
+	.has_ern35421 = true,
+	.has_ern38748 = true,
+};
+
+/**
+ * pvr_device_info_init() - Initialize a PowerVR device's hardware features and quirks
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function relies on &pvr_dev.gpu_id having already been initialized. If
+ * PowerVR device version is supported then sets &pvr_dev.features and &pvr_dev.quirks.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * -%ENODEV if the device is not supported.
+ */
+int
+pvr_device_info_init(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id;
+	const u64 bvnc = pvr_gpu_id_to_packed_bvnc(gpu_id);
+
+	/*
+	 * This macro results in a "Macros with multiple statements should be
+	 * enclosed in a do - while loop" checkpatch error. However, following
+	 * this advice would make the macro look a bit odd and isn't necessary
+	 * in this particular case, as the macro has a very specific use and a
+	 * very limited lifetime. The error can therefore be ignored.
+	 */
+#define CASE_PACKED_BVNC_DEVICE_INFO(b, v, n, c)                  \
+	case PVR_PACKED_BVNC(b, v, n, c):                         \
+		pvr_dev->features = pvr_device_##b##_V_##n##_##c; \
+		pvr_dev->quirks = pvr_device_quirks_##b##_##v##_##n##_##c; \
+		pvr_dev->enhancements = pvr_device_enhancements_##b##_##v##_##n##_##c; \
+		return 0
+
+	switch (bvnc) {
+		CASE_PACKED_BVNC_DEVICE_INFO(4, 40, 2, 51);
+		CASE_PACKED_BVNC_DEVICE_INFO(33, 15, 11, 3);
+		CASE_PACKED_BVNC_DEVICE_INFO(36, 53, 104, 796);
+	}
+
+#undef CASE_PACKED_BVNC_DEVICE_INFO
+
+	drm_warn(drm_dev, "Unsupported BVNC: %u.%u.%u.%u\n", gpu_id->b,
+		 gpu_id->v, gpu_id->n, gpu_id->c);
+
+	return -ENODEV;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_device_info.h b/drivers/gpu/drm/imagination/pvr_device_info.h
new file mode 100644
index 000000000000..fc8018ca2060
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_device_info.h
@@ -0,0 +1,133 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_DEVICE_INFO_H__
+#define __PVR_DEVICE_INFO_H__
+
+#include <linux/types.h>
+
+struct pvr_device;
+
+/**
+ * struct pvr_device_features - Hardware feature information
+ */
+struct pvr_device_features {
+	bool has_cdm_control_stream_format : 1;
+	bool has_cluster_grouping : 1;
+	bool has_common_store_size_in_dwords : 1;
+	bool has_compute : 1;
+	bool has_compute_morton_capable : 1;
+	bool has_compute_overlap : 1;
+	bool has_fb_cdc_v4 : 1;
+	bool has_fbcdc_algorithm : 1;
+	bool has_gpu_multicore_support : 1;
+	bool has_isp_max_tiles_in_flight : 1;
+	bool has_isp_samples_per_pixel : 1;
+	bool has_isp_zls_d24_s8_packing_ogl_mode : 1;
+	bool has_max_partitions : 1;
+	bool has_meta : 1;
+	bool has_meta_coremem_size : 1;
+	bool has_mips : 1;
+	bool has_num_clusters : 1;
+	bool has_num_isp_ipp_pipes : 1;
+	bool has_num_raster_pipes : 1;
+	bool has_phys_bus_width : 1;
+	bool has_riscv_fw_processor : 1;
+	bool has_roguexe : 1;
+	bool has_s7_top_infrastructure : 1;
+	bool has_simple_internal_parameter_format : 1;
+	bool has_slc_cache_line_size_in_bits : 1;
+	bool has_sys_bus_secure_reset : 1;
+	bool has_tessellation : 1;
+	bool has_tile_size_x : 1;
+	bool has_tile_size_y : 1;
+	bool has_tpu_dm_global_registers : 1;
+	bool has_usc_min_output_registers_per_pix : 1;
+	bool has_vdm_drawindirect : 1;
+	bool has_vdm_object_level_lls : 1;
+	bool has_virtual_address_space_bits : 1;
+	bool has_xe_memory_hierarchy : 1;
+	bool has_xpu_max_slaves : 1;
+	bool has_xt_top_infrastructure : 1;
+	bool has_zls_subtile : 1;
+
+	u8 cdm_control_stream_format;
+	u32 common_store_size_in_dwords;
+	u8 fbcdc_algorithm;
+	u16 isp_max_tiles_in_flight;
+	bool isp_samples_per_pixel;
+	u16 max_partitions;
+	bool meta;
+	u32 meta_coremem_size;
+	bool mips;
+	u16 num_clusters;
+	u8 num_isp_ipp_pipes;
+	u8 num_raster_pipes;
+	u16 phys_bus_width;
+	bool riscv_fw_processor;
+	u32 simple_internal_parameter_format;
+	u16 slc_cache_line_size_in_bits;
+	u16 tile_size_x;
+	u16 tile_size_y;
+	u16 usc_min_output_registers_per_pix;
+	u16 virtual_address_space_bits;
+	u8 xpu_max_slaves;
+};
+
+/**
+ * struct pvr_device_quirks - Hardware quirk information
+ */
+struct pvr_device_quirks {
+	bool has_brn44079 : 1;
+	bool has_brn47217 : 1;
+	bool has_brn48492 : 1;
+	bool has_brn48545 : 1;
+	bool has_brn49927 : 1;
+	bool has_brn51764 : 1;
+	bool has_brn52354 : 1;
+	bool has_brn62269 : 1;
+	bool has_brn63142 : 1;
+	bool has_brn63553 : 1;
+	bool has_brn66011 : 1;
+};
+
+/**
+ * struct pvr_device_enhancements - Hardware enhancement information
+ */
+struct pvr_device_enhancements {
+	bool has_ern35421 : 1;
+	bool has_ern38020 : 1;
+	bool has_ern38748 : 1;
+	bool has_ern42064 : 1;
+};
+
+int pvr_device_info_init(struct pvr_device *pvr_dev);
+
+/*
+ * Meta cores
+ *
+ * These are the values for the 'meta' feature when the feature is present
+ * (as per @pvr_device_features)/
+ */
+#define PVR_META_MTP218 (1)
+#define PVR_META_MTP219 (2)
+#define PVR_META_LTP218 (3)
+#define PVR_META_LTP217 (4)
+
+enum {
+	PVR_FEATURE_CDM_USER_MODE_QUEUE,
+	PVR_FEATURE_CLUSTER_GROUPING,
+	PVR_FEATURE_COMPUTE_MORTON_CAPABLE,
+	PVR_FEATURE_FB_CDC_V4,
+	PVR_FEATURE_GPU_MULTICORE_SUPPORT,
+	PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE,
+	PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP,
+	PVR_FEATURE_S7_TOP_INFRASTRUCTURE,
+	PVR_FEATURE_TESSELLATION,
+	PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS,
+	PVR_FEATURE_VDM_DRAWINDIRECT,
+	PVR_FEATURE_VDM_OBJECT_LEVEL_LLS,
+	PVR_FEATURE_ZLS_SUBTILE,
+};
+
+#endif /* __PVR_DEVICE_INFO_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
new file mode 100644
index 000000000000..34d715d42d30
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -0,0 +1,1634 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_context.h"
+#include "pvr_debugfs.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_free_list.h"
+#include "pvr_fw.h"
+#include "pvr_gem.h"
+#include "pvr_hwrt.h"
+#include "pvr_job.h"
+#include "pvr_power.h"
+#include "pvr_rogue_fwif_client.h"
+#include "pvr_rogue_fwif_shared.h"
+#include "pvr_vm.h"
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/xarray.h>
+
+/**
+ * DOC: PowerVR Graphics Driver
+ *
+ * This driver supports the following PowerVR graphics cores from Imagination
+ * Technologies:
+ *
+ * * GX6250 (found in MediaTek MT8173)
+ * * AXE-1-16M (found in Texas Instruments AM62)
+ */
+
+/**
+ * pvr_ioctl_create_bo() - IOCTL to create a GEM buffer object.
+ * @drm_dev: [IN] Target DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ * &struct drm_pvr_ioctl_create_bo_args.
+ * @file: [IN] DRM file-private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_BO.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%EINVAL if the value of &drm_pvr_ioctl_create_bo_args.size is zero
+ *    or wider than &typedef size_t,
+ *  * -%EINVAL if any bits in &drm_pvr_ioctl_create_bo_args.flags that are
+ *    reserved or undefined are set,
+ *  * -%EINVAL if any padding fields in &drm_pvr_ioctl_create_bo_args are not
+ *    zero,
+ *  * Any error encountered while creating the object (see
+ *    pvr_gem_object_create()), or
+ *  * Any error encountered while transferring ownership of the object into a
+ *    userspace-accessible handle (see pvr_gem_object_into_handle()).
+ */
+int
+pvr_ioctl_create_bo(struct drm_device *drm_dev, void *raw_args,
+		    struct drm_file *file)
+{
+	struct drm_pvr_ioctl_create_bo_args *args = raw_args;
+	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+	struct pvr_file *pvr_file = to_pvr_file(file);
+
+	struct pvr_gem_object *pvr_obj;
+	size_t sanitized_size;
+	size_t real_size;
+
+	int err;
+
+	/* All padding fields must be zeroed. */
+	if (args->_padding_c != 0)
+		return -EINVAL;
+
+	/*
+	 * On 64-bit platforms (our primary target), size_t is a u64. However,
+	 * on other architectures we have to check for overflow when casting
+	 * down to size_t from u64.
+	 *
+	 * We also disallow zero-sized allocations, and reserved (kernel-only)
+	 * flags.
+	 */
+	if (args->size > SIZE_MAX || args->size == 0 ||
+	    args->flags & PVR_BO_RESERVED_MASK) {
+		return -EINVAL;
+	}
+
+	sanitized_size = (size_t)args->size;
+
+	/*
+	 * Create a buffer object and transfer ownership to a userspace-
+	 * accessible handle.
+	 */
+	pvr_obj = pvr_gem_object_create(pvr_dev, sanitized_size, args->flags);
+	if (IS_ERR(pvr_obj)) {
+		err = PTR_ERR(pvr_obj);
+		goto err_out;
+	}
+
+	/*
+	 * Store the actual size of the created buffer object. We can't fetch
+	 * this after this point because we will no longer have a reference to
+	 * &pvr_obj.
+	 */
+	real_size = pvr_gem_object_size(pvr_obj);
+
+	/* This function will not modify &args->handle unless it succeeds. */
+	err = pvr_gem_object_into_handle(pvr_obj, pvr_file, &args->handle);
+	if (err)
+		goto err_destroy_obj;
+
+	/*
+	 * Now write the real size back to the args struct, after no further
+	 * errors can occur.
+	 */
+	args->size = real_size;
+
+	return 0;
+
+err_destroy_obj:
+	/*
+	 * GEM objects are refcounted, so there is no explicit destructor
+	 * function. Instead, we release the singular reference we currently
+	 * hold on the object and let GEM take care of the rest.
+	 */
+	pvr_gem_object_put(pvr_obj);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_ioctl_get_bo_mmap_offset() - IOCTL to generate a "fake" offset to be
+ * used when calling mmap() from userspace to map the given GEM buffer object
+ * @drm_dev: [IN] DRM device (unused).
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ *                     &struct drm_pvr_ioctl_get_bo_mmap_offset_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET.
+ *
+ * This IOCTL does *not* perform an mmap. See the docs on
+ * &struct drm_pvr_ioctl_get_bo_mmap_offset_args for details.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%ENOENT if the handle does not reference a valid GEM buffer object,
+ *  * -%EINVAL if any padding fields in &struct
+ *    drm_pvr_ioctl_get_bo_mmap_offset_args are not zero, or
+ *  * Any error returned by drm_gem_create_mmap_offset().
+ */
+int
+pvr_ioctl_get_bo_mmap_offset(__always_unused struct drm_device *drm_dev,
+			     void *raw_args, struct drm_file *file)
+{
+	struct drm_pvr_ioctl_get_bo_mmap_offset_args *args = raw_args;
+	struct pvr_file *pvr_file = to_pvr_file(file);
+
+	struct pvr_gem_object *pvr_obj;
+	struct drm_gem_object *gem_obj;
+	int ret;
+
+	/* All padding fields must be zeroed. */
+	if (args->_padding_4 != 0)
+		return -EINVAL;
+
+	/*
+	 * Obtain a kernel reference to the buffer object. This reference is
+	 * counted and must be manually dropped before returning. If a buffer
+	 * object cannot be found for the specified handle, return -%ENOENT (No
+	 * such file or directory).
+	 */
+	pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
+	if (!pvr_obj)
+		return -ENOENT;
+
+	gem_obj = from_pvr_gem_object(pvr_obj);
+
+	/*
+	 * Allocate a fake offset which can be used in userspace calls to mmap
+	 * on the DRM device file. If this fails, return the error code. This
+	 * operation is idempotent.
+	 */
+	ret = drm_gem_create_mmap_offset(gem_obj);
+	if (ret != 0) {
+		/* Drop our reference to the buffer object. */
+		drm_gem_object_put(gem_obj);
+		return ret;
+	}
+
+	/*
+	 * Read out the fake offset allocated by the earlier call to
+	 * drm_gem_create_mmap_offset.
+	 */
+	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+
+	/* Drop our reference to the buffer object. */
+	pvr_gem_object_put(pvr_obj);
+
+	return 0;
+}
+
+static __always_inline u64
+pvr_fw_version_packed(u32 major, u32 minor)
+{
+	return ((u64)major << 32) | minor;
+}
+
+static u32
+rogue_get_common_store_partition_space_size(struct pvr_device *pvr_dev)
+{
+	u32 max_partitions = 0;
+	u32 tile_size_x = 0;
+	u32 tile_size_y = 0;
+
+	PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &tile_size_x);
+	PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &tile_size_y);
+	PVR_FEATURE_VALUE(pvr_dev, max_partitions, &max_partitions);
+
+	if (tile_size_x == 16 && tile_size_y == 16) {
+		u32 usc_min_output_registers_per_pix = 0;
+
+		PVR_FEATURE_VALUE(pvr_dev, usc_min_output_registers_per_pix,
+				  &usc_min_output_registers_per_pix);
+
+		return tile_size_x * tile_size_y * max_partitions *
+		       usc_min_output_registers_per_pix;
+	}
+
+	return max_partitions * 1024;
+}
+
+static u32
+rogue_get_common_store_alloc_region_size(struct pvr_device *pvr_dev)
+{
+	u32 common_store_size_in_dwords = 512 * 4 * 4;
+	u32 alloc_region_size;
+
+	PVR_FEATURE_VALUE(pvr_dev, common_store_size_in_dwords, &common_store_size_in_dwords);
+
+	alloc_region_size = common_store_size_in_dwords - (256U * 4U) -
+			    rogue_get_common_store_partition_space_size(pvr_dev);
+
+	if (PVR_HAS_QUIRK(pvr_dev, 44079)) {
+		u32 common_store_split_point = (768U * 4U * 4U);
+
+		return min(common_store_split_point - (256U * 4U), alloc_region_size);
+	}
+
+	return alloc_region_size;
+}
+
+static inline u32
+rogue_get_num_phantoms(struct pvr_device *pvr_dev)
+{
+	u32 num_clusters = 1;
+
+	PVR_FEATURE_VALUE(pvr_dev, num_clusters, &num_clusters);
+
+	return ROGUE_REQ_NUM_PHANTOMS(num_clusters);
+}
+
+static inline u32
+rogue_get_max_coeffs(struct pvr_device *pvr_dev)
+{
+	u32 max_coeff_additional_portion = ROGUE_MAX_VERTEX_SHARED_REGISTERS;
+	u32 pending_allocation_shared_regs = 2U * 1024U;
+	u32 pending_allocation_coeff_regs = 0U;
+	u32 num_phantoms = rogue_get_num_phantoms(pvr_dev);
+	u32 tiles_in_flight = 0;
+	u32 max_coeff_pixel_portion;
+
+	PVR_FEATURE_VALUE(pvr_dev, isp_max_tiles_in_flight, &tiles_in_flight);
+	max_coeff_pixel_portion = DIV_ROUND_UP(tiles_in_flight, num_phantoms);
+	max_coeff_pixel_portion *= ROGUE_MAX_PIXEL_SHARED_REGISTERS;
+
+	/*
+	 * Compute tasks on cores with BRN48492 and without compute overlap may lock
+	 * up without two additional lines of coeffs.
+	 */
+	if (PVR_HAS_QUIRK(pvr_dev, 48492) && !PVR_HAS_FEATURE(pvr_dev, compute_overlap))
+		pending_allocation_coeff_regs = 2U * 1024U;
+
+	if (PVR_HAS_ENHANCEMENT(pvr_dev, 38748))
+		pending_allocation_shared_regs = 0;
+
+	if (PVR_HAS_ENHANCEMENT(pvr_dev, 38020))
+		max_coeff_additional_portion += ROGUE_MAX_COMPUTE_SHARED_REGISTERS;
+
+	return rogue_get_common_store_alloc_region_size(pvr_dev) + pending_allocation_coeff_regs -
+		(max_coeff_pixel_portion + max_coeff_additional_portion +
+		 pending_allocation_shared_regs);
+}
+
+static inline u32
+rogue_get_cdm_max_local_mem_size_regs(struct pvr_device *pvr_dev)
+{
+	u32 available_coeffs_in_dwords = rogue_get_max_coeffs(pvr_dev);
+
+	if (PVR_HAS_QUIRK(pvr_dev, 48492) && PVR_HAS_FEATURE(pvr_dev, roguexe) &&
+	    !PVR_HAS_FEATURE(pvr_dev, compute_overlap)) {
+		/* Driver must not use the 2 reserved lines. */
+		available_coeffs_in_dwords -= ROGUE_CSRM_LINE_SIZE_IN_DWORDS * 2;
+	}
+
+	/*
+	 * The maximum amount of local memory available to a kernel is the minimum
+	 * of the total number of coefficient registers available and the max common
+	 * store allocation size which can be made by the CDM.
+	 *
+	 * If any coeff lines are reserved for tessellation or pixel then we need to
+	 * subtract those too.
+	 */
+	return min(available_coeffs_in_dwords, (u32)ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS);
+}
+
+/**
+ * pvr_dev_query_gpu_info_get()
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ *        struct drm_pvr_dev_query_gpu_info.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ *
+ * Returns:
+ *  * 0 on success, or if size is requested using a NULL pointer, or
+ *  * -%E2BIG if the indicated length of the allocation is less than is
+ *    required to contain the copied data, or
+ *  * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_gpu_info_get(struct pvr_device *pvr_dev,
+			   struct drm_pvr_ioctl_dev_query_args *args)
+{
+	struct drm_pvr_dev_query_gpu_info gpu_info = {0};
+	int err;
+
+	if (!args->pointer) {
+		args->size = sizeof(struct drm_pvr_dev_query_gpu_info);
+		return 0;
+	}
+
+	gpu_info.gpu_id =
+		pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id);
+	gpu_info.num_phantoms = rogue_get_num_phantoms(pvr_dev);
+
+	err = PVR_UOBJ_SET(args->pointer, args->size, gpu_info);
+	if (err < 0)
+		return err;
+
+	if (args->size > sizeof(gpu_info))
+		args->size = sizeof(gpu_info);
+	return 0;
+}
+
+/**
+ * pvr_dev_query_runtime_info_get()
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ *        struct drm_pvr_dev_query_runtime_info.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ *
+ * Returns:
+ *  * 0 on success, or if size is requested using a NULL pointer, or
+ *  * -%E2BIG if the indicated length of the allocation is less than is
+ *    required to contain the copied data, or
+ *  * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_runtime_info_get(struct pvr_device *pvr_dev,
+			       struct drm_pvr_ioctl_dev_query_args *args)
+{
+	struct drm_pvr_dev_query_runtime_info runtime_info = {0};
+	int err;
+
+	if (!args->pointer) {
+		args->size = sizeof(struct drm_pvr_dev_query_runtime_info);
+		return 0;
+	}
+
+	runtime_info.free_list_min_pages =
+		pvr_get_free_list_min_pages(pvr_dev);
+	runtime_info.free_list_max_pages =
+		ROGUE_PM_MAX_FREELIST_SIZE / ROGUE_PM_PAGE_SIZE;
+	runtime_info.common_store_alloc_region_size =
+		rogue_get_common_store_alloc_region_size(pvr_dev);
+	runtime_info.common_store_partition_space_size =
+		rogue_get_common_store_partition_space_size(pvr_dev);
+	runtime_info.max_coeffs = rogue_get_max_coeffs(pvr_dev);
+	runtime_info.cdm_max_local_mem_size_regs =
+		rogue_get_cdm_max_local_mem_size_regs(pvr_dev);
+
+	err = PVR_UOBJ_SET(args->pointer, args->size, runtime_info);
+	if (err < 0)
+		return err;
+
+	if (args->size > sizeof(runtime_info))
+		args->size = sizeof(runtime_info);
+	return 0;
+}
+
+/**
+ * pvr_dev_query_hwrt_info_get()
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ *        struct drm_pvr_dev_query_hwrt_info.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ *
+ * Returns:
+ *  * 0 on success, or if size is requested using a NULL pointer, or
+ *  * -%E2BIG if the indicated length of the allocation is less than is
+ *    required to contain the copied data, or
+ *  * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_hwrt_info_get(struct pvr_device *pvr_dev,
+			    struct drm_pvr_ioctl_dev_query_args *args)
+{
+	struct drm_pvr_dev_query_hwrt_info hwrt_info = {0};
+	int err;
+
+	if (!args->pointer) {
+		args->size = sizeof(struct drm_pvr_dev_query_hwrt_info);
+		return 0;
+	}
+
+	hwrt_info.num_geomdatas = ROGUE_FWIF_NUM_GEOMDATAS;
+	hwrt_info.num_rtdatas = ROGUE_FWIF_NUM_RTDATAS;
+	hwrt_info.num_freelists = ROGUE_FWIF_NUM_RTDATA_FREELISTS;
+
+	err = PVR_UOBJ_SET(args->pointer, args->size, hwrt_info);
+	if (err < 0)
+		return err;
+
+	if (args->size > sizeof(hwrt_info))
+		args->size = sizeof(hwrt_info);
+	return 0;
+}
+
+/**
+ * pvr_dev_query_quirks_get() - Unpack array of quirks at the address given
+ * in a struct drm_pvr_dev_query_quirks, or gets the amount of space required
+ * for it.
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ *        struct drm_pvr_dev_query_query_quirks.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ * If the userspace pointer in the query object is NULL, or the count is
+ * short, no data is copied.
+ * The count field will be updated to that copied, or if either pointer is
+ * NULL, that which would have been copied.
+ * The size field in the query object will be updated to the size copied.
+ *
+ * Returns:
+ *  * 0 on success, or if size/count is requested using a NULL pointer, or
+ *  * -%EINVAL if args contained non-zero reserved fields, or
+ *  * -%E2BIG if the indicated length of the allocation is less than is
+ *    required to contain the copied data, or
+ *  * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_quirks_get(struct pvr_device *pvr_dev,
+			 struct drm_pvr_ioctl_dev_query_args *args)
+{
+	/*
+	 * @FIXME - hardcoding of numbers here is intended as an
+	 * intermediate step so the UAPI can be fixed, but requires a
+	 * a refactor in the future to store them in a more appropriate
+	 * location
+	 */
+	static const u32 umd_quirks_musthave[] = {
+		47217,
+		49927,
+		62269,
+	};
+	static const u32 umd_quirks[] = {
+		48545,
+		51764,
+	};
+	struct drm_pvr_dev_query_quirks query;
+	u32 out[ARRAY_SIZE(umd_quirks_musthave) + ARRAY_SIZE(umd_quirks)];
+	size_t out_musthave_count = 0;
+	size_t out_count = 0;
+	int err;
+
+	if (!args->pointer) {
+		args->size = sizeof(struct drm_pvr_dev_query_quirks);
+		return 0;
+	}
+
+	err = PVR_UOBJ_GET(query, args->size, args->pointer);
+
+	if (err < 0)
+		return err;
+	if (query._padding_c)
+		return -EINVAL;
+
+	for (int i = 0; i < ARRAY_SIZE(umd_quirks_musthave); i++) {
+		if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks_musthave[i])) {
+			out[out_count++] = umd_quirks_musthave[i];
+			out_musthave_count++;
+		}
+	}
+
+	for (int i = 0; i < ARRAY_SIZE(umd_quirks); i++) {
+		if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks[i]))
+			out[out_count++] = umd_quirks[i];
+	}
+
+	if (!query.quirks)
+		goto copy_out;
+	if (query.count < out_count)
+		return -E2BIG;
+
+	if (copy_to_user(u64_to_user_ptr(query.quirks), out,
+			 out_count * sizeof(u32))) {
+		return -EFAULT;
+	}
+
+	query.musthave_count = out_musthave_count;
+
+copy_out:
+	query.count = out_count;
+	err = PVR_UOBJ_SET(args->pointer, args->size, query);
+	if (err < 0)
+		return err;
+
+	args->size = sizeof(query);
+	return 0;
+}
+
+/**
+ * pvr_dev_query_enhancements_get() - Unpack array of enhancements at the
+ * address given in a struct drm_pvr_dev_query_enhancements, or gets the amount
+ * of space required for it.
+ * @pvr_dev: Device pointer.
+ * @args: [IN] Device query arguments containing a pointer to a userspace
+ *        struct drm_pvr_dev_query_enhancements.
+ *
+ * If the query object pointer is NULL, the size field is updated with the
+ * expected size of the query object.
+ * If the userspace pointer in the query object is NULL, or the count is
+ * short, no data is copied.
+ * The count field will be updated to that copied, or if either pointer is
+ * NULL, that which would have been copied.
+ * The size field in the query object will be updated to the size copied.
+ *
+ * Returns:
+ *  * 0 on success, or if size/count is requested using a NULL pointer, or
+ *  * -%EINVAL if args contained non-zero reserved fields, or
+ *  * -%E2BIG if the indicated length of the allocation is less than is
+ *    required to contain the copied data, or
+ *  * -%EFAULT if local memory could not be copied to userspace.
+ */
+static int
+pvr_dev_query_enhancements_get(struct pvr_device *pvr_dev,
+			       struct drm_pvr_ioctl_dev_query_args *args)
+{
+	/*
+	 * @FIXME - hardcoding of numbers here is intended as an
+	 * intermediate step so the UAPI can be fixed, but requires a
+	 * a refactor in the future to store them in a more appropriate
+	 * location
+	 */
+	const u32 umd_enhancements[] = {
+		35421,
+		42064,
+	};
+	struct drm_pvr_dev_query_enhancements query;
+	u32 out[ARRAY_SIZE(umd_enhancements)];
+	size_t out_idx = 0;
+	int err;
+
+	if (!args->pointer) {
+		args->size = sizeof(struct drm_pvr_dev_query_enhancements);
+		return 0;
+	}
+
+	err = PVR_UOBJ_GET(query, args->size, args->pointer);
+
+	if (err < 0)
+		return err;
+	if (query._padding_a)
+		return -EINVAL;
+	if (query._padding_c)
+		return -EINVAL;
+
+	for (int i = 0; i < ARRAY_SIZE(umd_enhancements); i++) {
+		if (pvr_device_has_uapi_enhancement(pvr_dev, umd_enhancements[i]))
+			out[out_idx++] = umd_enhancements[i];
+	}
+
+	if (!query.enhancements)
+		goto copy_out;
+	if (query.count < out_idx)
+		return -E2BIG;
+
+	if (copy_to_user(u64_to_user_ptr(query.enhancements), out,
+			 out_idx * sizeof(u32))) {
+		return -EFAULT;
+	}
+
+copy_out:
+	query.count = out_idx;
+	err = PVR_UOBJ_SET(args->pointer, args->size, query);
+	if (err < 0)
+		return err;
+
+	args->size = sizeof(query);
+	return 0;
+}
+
+/**
+ * pvr_ioctl_dev_query() - IOCTL to copy information about a device
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ *                     &struct drm_pvr_ioctl_dev_query_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_DEV_QUERY.
+ * If the given receiving struct pointer is NULL, or the indicated size is too
+ * small, the expected size of the struct type will be returned in the size
+ * argument field.
+ *
+ * Return:
+ *  * 0 on success or when fetching the size with args->pointer == NULL, or
+ *  * -%E2BIG if the indicated size of the receiving struct is less than is
+ *    required to contain the copied data, or
+ *  * -%EINVAL if the indicated struct type is unknown, or
+ *  * -%ENOMEM if local memory could not be allocated, or
+ *  * -%EFAULT if local memory could not be copied to userspace.
+ */
+int
+pvr_ioctl_dev_query(struct drm_device *drm_dev, void *raw_args,
+		    struct drm_file *file)
+{
+	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+	struct drm_pvr_ioctl_dev_query_args *args = raw_args;
+
+	switch ((enum drm_pvr_dev_query)args->type) {
+	case DRM_PVR_DEV_QUERY_GPU_INFO_GET:
+		return pvr_dev_query_gpu_info_get(pvr_dev, args);
+
+	case DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET:
+		return pvr_dev_query_runtime_info_get(pvr_dev, args);
+
+	case DRM_PVR_DEV_QUERY_HWRT_INFO_GET:
+		return pvr_dev_query_hwrt_info_get(pvr_dev, args);
+
+	case DRM_PVR_DEV_QUERY_QUIRKS_GET:
+		return pvr_dev_query_quirks_get(pvr_dev, args);
+
+	case DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET:
+		return pvr_dev_query_enhancements_get(pvr_dev, args);
+
+	case DRM_PVR_DEV_QUERY_HEAP_INFO_GET:
+		return pvr_heap_info_get(pvr_dev, args);
+
+	case DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET:
+		return pvr_static_data_areas_get(pvr_dev, args);
+	}
+
+	return -EINVAL;
+}
+
+/**
+ * pvr_ioctl_create_context() - IOCTL to create a context
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ *                     &struct drm_pvr_ioctl_create_context_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_CONTEXT.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * -%EINVAL if provided arguments are invalid, or
+ *  * -%EFAULT if arguments can't be copied from userspace, or
+ *  * Any error returned by pvr_create_render_context().
+ */
+int
+pvr_ioctl_create_context(struct drm_device *drm_dev, void *raw_args,
+			 struct drm_file *file)
+{
+	struct drm_pvr_ioctl_create_context_args *args = raw_args;
+	struct pvr_file *pvr_file = file->driver_priv;
+	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+	struct pvr_context *ctx = NULL;
+	u32 handle;
+	void *old;
+	int err;
+	u32 id;
+
+	if (args->flags) {
+		/* Context creation flags are currently unused and must be zero. */
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	/*
+	 * Allocate global ID for firmware. We will update this with the context once it is created.
+	 */
+	err = xa_alloc(&pvr_dev->ctx_ids, &id, NULL, xa_limit_32b,
+		       GFP_KERNEL);
+	if (err < 0)
+		goto err_out;
+
+	/*
+	 * Allocate context handle for userspace. We will update this with the context once it
+	 * is created.
+	 */
+	err = xa_alloc(&pvr_file->ctx_handles, &handle, NULL, xa_limit_32b,
+		       GFP_KERNEL);
+	if (err < 0)
+		goto err_id_xa_erase;
+
+	switch (args->type) {
+	case DRM_PVR_CTX_TYPE_RENDER: {
+		ctx = pvr_create_render_context(pvr_file, args, id);
+		break;
+	}
+
+	case DRM_PVR_CTX_TYPE_COMPUTE: {
+		ctx = pvr_create_compute_context(pvr_file, args, id);
+		break;
+	}
+
+	case DRM_PVR_CTX_TYPE_TRANSFER_FRAG: {
+		ctx = pvr_create_transfer_context(pvr_file, args, id);
+		break;
+	}
+
+	default:
+		ctx = ERR_PTR(-EINVAL);
+		break;
+	}
+
+	if (IS_ERR(ctx)) {
+		err = PTR_ERR(ctx);
+		goto err_handle_xa_erase;
+	}
+
+	old = xa_store(&pvr_dev->ctx_ids, id, ctx, GFP_KERNEL);
+	if (xa_is_err(old)) {
+		err = xa_err(old);
+		goto err_context_destroy;
+	}
+
+	old = xa_store(&pvr_file->ctx_handles, handle, ctx, GFP_KERNEL);
+	if (xa_is_err(old)) {
+		err = xa_err(old);
+		goto err_context_destroy;
+	}
+
+	args->handle = handle;
+
+	return 0;
+
+err_context_destroy:
+	pvr_context_destroy(pvr_file, handle);
+
+err_handle_xa_erase:
+	xa_erase(&pvr_file->ctx_handles, handle);
+
+err_id_xa_erase:
+	xa_erase(&pvr_dev->ctx_ids, id);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_ioctl_destroy_context() - IOCTL to destroy a context
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ *                     &struct drm_pvr_ioctl_destroy_context_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_DESTROY_CONTEXT.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * -%EINVAL if context not in context list.
+ */
+int
+pvr_ioctl_destroy_context(struct drm_device *drm_dev, void *raw_args,
+			  struct drm_file *file)
+{
+	struct drm_pvr_ioctl_destroy_context_args *args = raw_args;
+	struct pvr_file *pvr_file = file->driver_priv;
+
+	if (args->_padding_4)
+		return -EINVAL;
+
+	return pvr_context_destroy(pvr_file, args->handle);
+}
+
+/**
+ * pvr_ioctl_create_free_list() - IOCTL to create a free list
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ *                     &struct drm_pvr_ioctl_create_free_list_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_FREE_LIST.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_free_list_create().
+ */
+int
+pvr_ioctl_create_free_list(struct drm_device *drm_dev, void *raw_args,
+			   struct drm_file *file)
+{
+	struct drm_pvr_ioctl_create_free_list_args *args = raw_args;
+	struct pvr_file *pvr_file = to_pvr_file(file);
+	struct pvr_free_list *free_list;
+	int err;
+
+	free_list = pvr_free_list_create(pvr_file, args);
+	if (IS_ERR(free_list)) {
+		err = PTR_ERR(free_list);
+		goto err_out;
+	}
+
+	/* Allocate object handle for userspace. */
+	err = xa_alloc(&pvr_file->free_list_handles,
+		       &args->handle,
+		       free_list,
+		       xa_limit_32b,
+		       GFP_KERNEL);
+	if (err < 0)
+		goto err_cleanup;
+
+	return 0;
+
+err_cleanup:
+	pvr_free_list_put(free_list);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_ioctl_destroy_free_list() - IOCTL to destroy a free list
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ *                 &struct drm_pvr_ioctl_destroy_free_list_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_DESTROY_FREE_LIST.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * -%EINVAL if free list not in object list.
+ */
+int
+pvr_ioctl_destroy_free_list(struct drm_device *drm_dev, void *raw_args,
+			    struct drm_file *file)
+{
+	struct drm_pvr_ioctl_destroy_free_list_args *args = raw_args;
+	struct pvr_file *pvr_file = to_pvr_file(file);
+	struct pvr_free_list *free_list;
+
+	if (args->_padding_4)
+		return -EINVAL;
+
+	free_list = xa_erase(&pvr_file->free_list_handles, args->handle);
+	if (!free_list)
+		return -EINVAL;
+
+	pvr_free_list_put(free_list);
+	return 0;
+}
+
+/**
+ * pvr_ioctl_create_hwrt_dataset() - IOCTL to create a HWRT dataset
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ *                     &struct drm_pvr_ioctl_create_hwrt_dataset_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_HWRT_DATASET.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_hwrt_dataset_create().
+ */
+int
+pvr_ioctl_create_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
+			      struct drm_file *file)
+{
+	struct drm_pvr_ioctl_create_hwrt_dataset_args *args = raw_args;
+	struct pvr_file *pvr_file = to_pvr_file(file);
+	struct pvr_hwrt_dataset *hwrt;
+	int err;
+
+	hwrt = pvr_hwrt_dataset_create(pvr_file, args);
+	if (IS_ERR(hwrt)) {
+		err = PTR_ERR(hwrt);
+		goto err_out;
+	}
+
+	/* Allocate object handle for userspace. */
+	err = xa_alloc(&pvr_file->hwrt_handles,
+		       &args->handle,
+		       hwrt,
+		       xa_limit_32b,
+		       GFP_KERNEL);
+	if (err < 0)
+		goto err_cleanup;
+
+	return 0;
+
+err_cleanup:
+	pvr_hwrt_dataset_put(hwrt);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_ioctl_destroy_hwrt_dataset() - IOCTL to destroy a HWRT dataset
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ *                 &struct drm_pvr_ioctl_destroy_hwrt_dataset_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * -%EINVAL if HWRT dataset not in object list.
+ */
+int
+pvr_ioctl_destroy_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
+			       struct drm_file *file)
+{
+	struct drm_pvr_ioctl_destroy_hwrt_dataset_args *args = raw_args;
+	struct pvr_file *pvr_file = to_pvr_file(file);
+	struct pvr_hwrt_dataset *hwrt;
+
+	if (args->_padding_4)
+		return -EINVAL;
+
+	hwrt = xa_erase(&pvr_file->hwrt_handles, args->handle);
+	if (!hwrt)
+		return -EINVAL;
+
+	pvr_hwrt_dataset_put(hwrt);
+	return 0;
+}
+
+/**
+ * pvr_ioctl_create_vm_context() - IOCTL to create a VM context
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
+ *                     &struct drm_pvr_ioctl_create_vm_context_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_CREATE_VM_CONTEXT.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_vm_create_context().
+ */
+int
+pvr_ioctl_create_vm_context(struct drm_device *drm_dev, void *raw_args,
+			    struct drm_file *file)
+{
+	struct drm_pvr_ioctl_create_vm_context_args *args = raw_args;
+	struct pvr_file *pvr_file = to_pvr_file(file);
+	struct pvr_vm_context *vm_ctx;
+	int err;
+
+	if (args->_padding_4)
+		return -EINVAL;
+
+	vm_ctx = pvr_vm_create_context(pvr_file->pvr_dev, true);
+	if (IS_ERR(vm_ctx))
+		return PTR_ERR(vm_ctx);
+
+	/* Allocate object handle for userspace. */
+	err = xa_alloc(&pvr_file->vm_ctx_handles,
+		       &args->handle,
+		       vm_ctx,
+		       xa_limit_32b,
+		       GFP_KERNEL);
+	if (err < 0)
+		goto err_cleanup;
+
+	return 0;
+
+err_cleanup:
+	pvr_vm_context_put(vm_ctx);
+
+	return err;
+}
+
+/**
+ * pvr_ioctl_destroy_vm_context() - IOCTL to destroy a VM context
+* @drm_dev: [IN] DRM device.
+* @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+*                 &struct drm_pvr_ioctl_destroy_vm_context_args.
+* @file: [IN] DRM file private data.
+*
+* Called from userspace with %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT.
+*
+* Return:
+*  * 0 on success, or
+*  * -%EINVAL if object not in object list.
+ */
+int
+pvr_ioctl_destroy_vm_context(struct drm_device *drm_dev, void *raw_args,
+			     struct drm_file *file)
+{
+	struct drm_pvr_ioctl_destroy_vm_context_args *args = raw_args;
+	struct pvr_file *pvr_file = to_pvr_file(file);
+	struct pvr_vm_context *vm_ctx;
+
+	if (args->_padding_4)
+		return -EINVAL;
+
+	vm_ctx = xa_erase(&pvr_file->vm_ctx_handles, args->handle);
+	if (!vm_ctx)
+		return -EINVAL;
+
+	pvr_vm_context_put(vm_ctx);
+	return 0;
+}
+
+/**
+ * pvr_ioctl_vm_map() - IOCTL to map buffer to GPU address space.
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ *                 &struct drm_pvr_ioctl_vm_map_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_VM_MAP.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%EINVAL if &drm_pvr_ioctl_vm_op_map_args.flags is not zero,
+ *  * -%EINVAL if the bounds specified by &drm_pvr_ioctl_vm_op_map_args.offset
+ *    and &drm_pvr_ioctl_vm_op_map_args.size are not valid or do not fall
+ *    within the buffer object specified by
+ *    &drm_pvr_ioctl_vm_op_map_args.handle,
+ *  * -%EINVAL if the bounds specified by
+ *    &drm_pvr_ioctl_vm_op_map_args.device_addr and
+ *    &drm_pvr_ioctl_vm_op_map_args.size do not form a valid device-virtual
+ *    address range which falls entirely within a single heap, or
+ *  * -%ENOENT if &drm_pvr_ioctl_vm_op_map_args.handle does not refer to a
+ *    valid PowerVR buffer object.
+ */
+static int
+pvr_ioctl_vm_map(struct drm_device *drm_dev, void *raw_args,
+		 struct drm_file *file)
+{
+	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+	struct drm_pvr_ioctl_vm_map_args *args = raw_args;
+	struct pvr_file *pvr_file = to_pvr_file(file);
+	struct pvr_vm_context *vm_ctx;
+
+	struct pvr_gem_object *pvr_obj;
+	size_t pvr_obj_size;
+
+	u64 offset_plus_size;
+	int err;
+
+	/* Initial validation of args. */
+	if (args->_padding_14)
+		return -EINVAL;
+
+	if (args->flags != 0 ||
+	    check_add_overflow(args->offset, args->size, &offset_plus_size) ||
+	    !pvr_find_heap_containing(pvr_dev, args->device_addr, args->size)) {
+		return -EINVAL;
+	}
+
+	vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
+	if (!vm_ctx)
+		return -EINVAL;
+
+	pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
+	if (!pvr_obj) {
+		err = -ENOENT;
+		goto err_put_vm_context;
+	}
+
+	pvr_obj_size = pvr_gem_object_size(pvr_obj);
+
+	/*
+	 * Validate offset and size args. The alignment of these will be
+	 * checked when mapping; for now just check that they're within valid
+	 * bounds
+	 */
+	if (args->offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) {
+		err = -EINVAL;
+		goto err_put_pvr_object;
+	}
+
+	err = pvr_vm_map(vm_ctx, pvr_obj, args->offset,
+			 args->device_addr, args->size);
+	if (err)
+		goto err_put_pvr_object;
+
+	/*
+	 * In order to set up the mapping, we needed a reference to &pvr_obj.
+	 * However, pvr_vm_map() obtains and stores its own reference, so we
+	 * must release ours before returning.
+	 */
+	err = 0;
+	goto err_put_pvr_object;
+
+err_put_pvr_object:
+	pvr_gem_object_put(pvr_obj);
+
+err_put_vm_context:
+	pvr_vm_context_put(vm_ctx);
+
+	return err;
+}
+
+/**
+ * pvr_ioctl_vm_unmap() - IOCTL to unmap buffer from GPU address space.
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ *                 &struct drm_pvr_ioctl_vm_unmap_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_VM_UNMAP.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%EINVAL if &drm_pvr_ioctl_vm_op_unmap_args.device_addr is not a valid
+ *    device page-aligned device-virtual address, or
+ *  * -%ENOENT if there is currently no PowerVR buffer object mapped at
+ *    &drm_pvr_ioctl_vm_op_unmap_args.device_addr.
+ */
+static int
+pvr_ioctl_vm_unmap(struct drm_device *drm_dev, void *raw_args,
+		   struct drm_file *file)
+{
+	struct drm_pvr_ioctl_vm_unmap_args *args = raw_args;
+	struct pvr_file *pvr_file = to_pvr_file(file);
+	struct pvr_vm_context *vm_ctx;
+	int err;
+
+	/* Initial validation of args. */
+	if (args->_padding_4)
+		return -EINVAL;
+
+	vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
+	if (!vm_ctx)
+		return -EINVAL;
+
+	err = pvr_vm_unmap(vm_ctx, args->device_addr);
+
+	pvr_vm_context_put(vm_ctx);
+
+	return err;
+}
+
+/*
+ * pvr_ioctl_submit_job() - IOCTL to submit a job to the GPU
+ * @drm_dev: [IN] DRM device.
+ * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
+ *                 &struct drm_pvr_ioctl_submit_job_args.
+ * @file: [IN] DRM file private data.
+ *
+ * Called from userspace with %DRM_IOCTL_PVR_SUBMIT_JOB.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * -%EINVAL if arguments are invalid.
+ */
+int
+pvr_ioctl_submit_jobs(struct drm_device *drm_dev, void *raw_args,
+		      struct drm_file *file)
+{
+	struct drm_pvr_ioctl_submit_jobs_args *args = raw_args;
+	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+	struct pvr_file *pvr_file = to_pvr_file(file);
+
+	return pvr_submit_jobs(pvr_dev, pvr_file, args);
+}
+
+int
+pvr_get_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, void *out)
+{
+	if (usr_stride < min_stride)
+		return -EINVAL;
+
+	return copy_struct_from_user(out, obj_size, u64_to_user_ptr(usr_ptr), usr_stride);
+}
+
+int
+pvr_set_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, const void *in)
+{
+	if (usr_stride < min_stride)
+		return -EINVAL;
+
+	if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_stride, obj_size)))
+		return -EFAULT;
+
+	if (usr_stride > obj_size &&
+	    clear_user(u64_to_user_ptr(usr_ptr + obj_size), usr_stride - obj_size)) {
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int
+pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size, void **out)
+{
+	int ret = 0;
+	void *out_alloc;
+
+	if (in->stride < min_stride)
+		return -EINVAL;
+
+	if (!in->count)
+		return 0;
+
+	out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
+	if (!out_alloc)
+		return -ENOMEM;
+
+	if (obj_size == in->stride) {
+		if (copy_from_user(out_alloc, u64_to_user_ptr(in->array), obj_size * in->count))
+			ret = -EFAULT;
+	} else {
+		void __user *in_ptr = u64_to_user_ptr(in->array);
+		void *out_ptr = out_alloc;
+
+		for (u32 i = 0; i < in->count; i++) {
+			ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
+			if (ret)
+				break;
+
+			out_ptr += obj_size;
+			in_ptr += in->stride;
+		}
+	}
+
+	if (ret) {
+		kvfree(out_alloc);
+		return ret;
+	}
+
+	*out = out_alloc;
+	return 0;
+}
+
+int
+pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size,
+		   const void *in)
+{
+	if (out->stride < min_stride)
+		return -EINVAL;
+
+	if (!out->count)
+		return 0;
+
+	if (obj_size == out->stride) {
+		if (copy_to_user(u64_to_user_ptr(out->array), in, obj_size * out->count))
+			return -EFAULT;
+	} else {
+		u32 cpy_elem_size = min_t(u32, out->stride, obj_size);
+		void __user *out_ptr = u64_to_user_ptr(out->array);
+		const void *in_ptr = in;
+
+		for (u32 i = 0; i < out->count; i++) {
+			if (copy_to_user(out_ptr, in_ptr, cpy_elem_size))
+				return -EFAULT;
+
+			out_ptr += obj_size;
+			in_ptr += out->stride;
+		}
+
+		if (out->stride > obj_size &&
+		    clear_user(u64_to_user_ptr(out->array + obj_size),
+			       out->stride - obj_size)) {
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+#define DRM_PVR_IOCTL(_name, _func, _flags) \
+	DRM_IOCTL_DEF_DRV(PVR_##_name, pvr_ioctl_##_func, _flags)
+
+/* clang-format off */
+
+static const struct drm_ioctl_desc pvr_drm_driver_ioctls[] = {
+	DRM_PVR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(GET_BO_MMAP_OFFSET, get_bo_mmap_offset, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(CREATE_VM_CONTEXT, create_vm_context, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(DESTROY_VM_CONTEXT, destroy_vm_context, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(VM_MAP, vm_map, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(VM_UNMAP, vm_unmap, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(CREATE_CONTEXT, create_context, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(DESTROY_CONTEXT, destroy_context, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(CREATE_FREE_LIST, create_free_list, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(DESTROY_FREE_LIST, destroy_free_list, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(CREATE_HWRT_DATASET, create_hwrt_dataset, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(DESTROY_HWRT_DATASET, destroy_hwrt_dataset, DRM_RENDER_ALLOW),
+	DRM_PVR_IOCTL(SUBMIT_JOBS, submit_jobs, DRM_RENDER_ALLOW),
+};
+
+/* clang-format on */
+
+#undef DRM_PVR_IOCTL
+
+/**
+ * pvr_drm_driver_open() - Driver callback when a new &struct drm_file is opened
+ * @drm_dev: [IN] DRM device.
+ * @file: [IN] DRM file private data.
+ *
+ * Allocates powervr-specific file private data (&struct pvr_file).
+ *
+ * Registered in &pvr_drm_driver.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%ENOMEM if the allocation of a &struct ipvr_file fails, or
+ *  * Any error returned by pvr_memory_context_init().
+ */
+static int
+pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file)
+{
+	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+	struct pvr_file *pvr_file;
+
+	int err;
+
+	pvr_file = kzalloc(sizeof(*pvr_file), GFP_KERNEL);
+	if (!pvr_file) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	/*
+	 * Store reference to base DRM file private data for use by
+	 * from_pvr_file.
+	 */
+	pvr_file->file = file;
+
+	/*
+	 * Store reference to powervr-specific outer device struct in file
+	 * private data for convenient access.
+	 */
+	pvr_file->pvr_dev = pvr_dev;
+
+	xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1);
+	xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1);
+	xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1);
+	xa_init_flags(&pvr_file->vm_ctx_handles, XA_FLAGS_ALLOC1);
+
+	/*
+	 * Store reference to powervr-specific file private data in DRM file
+	 * private data.
+	 */
+	file->driver_priv = pvr_file;
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_drm_driver_postclose() - One of the driver callbacks when a &struct
+ * drm_file is closed.
+ * @drm_dev: [IN] DRM device (unused).
+ * @file: [IN] DRM file private data.
+ *
+ * Frees powervr-specific file private data (&struct pvr_file).
+ *
+ * Registered in &pvr_drm_driver.
+ */
+static void
+pvr_drm_driver_postclose(__always_unused struct drm_device *drm_dev,
+			 struct drm_file *file)
+{
+	struct pvr_file *pvr_file = to_pvr_file(file);
+
+	/* Kill remaining contexts. */
+	pvr_destroy_contexts_for_file(pvr_file);
+
+	/* Drop references on any remaining objects. */
+	pvr_destroy_free_lists_for_file(pvr_file);
+	pvr_destroy_hwrt_datasets_for_file(pvr_file);
+	pvr_destroy_vm_contexts_for_file(pvr_file);
+
+	kfree(pvr_file);
+	file->driver_priv = NULL;
+}
+
+DEFINE_DRM_GEM_FOPS(pvr_drm_driver_fops);
+
+static struct drm_driver pvr_drm_driver = {
+	.driver_features = DRIVER_GEM | DRIVER_RENDER | DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
+	.open = pvr_drm_driver_open,
+	.postclose = pvr_drm_driver_postclose,
+	.ioctls = pvr_drm_driver_ioctls,
+	.num_ioctls = ARRAY_SIZE(pvr_drm_driver_ioctls),
+	.fops = &pvr_drm_driver_fops,
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = pvr_debugfs_init,
+#endif
+
+	.name = PVR_DRIVER_NAME,
+	.desc = PVR_DRIVER_DESC,
+	.date = PVR_DRIVER_DATE,
+	.major = PVR_DRIVER_MAJOR,
+	.minor = PVR_DRIVER_MINOR,
+	.patchlevel = PVR_DRIVER_PATCHLEVEL,
+
+	/*
+	 * These three (four) helper functions implement PRIME buffer sharing
+	 * for us. The last is set implicitly when not assigned here. The only
+	 * additional requirement to make PRIME work is to call dma_set_mask()
+	 * in pvr_probe() to tell DMA that we can read from more than the first
+	 * 4GB (32 bits) of memory address space. The subsequent call to
+	 * dma_set_max_seg_size() is not strictly required, but prevents some
+	 * warnings from appearing when CONFIG_DMA_API_DEBUG_SG is enabled.
+	 */
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.gem_prime_import_sg_table = __pvr_gem_prime_import_sg_table,
+	/* .gem_prime_import = drm_gem_prime_import, */
+};
+
+static int
+pvr_probe(struct platform_device *plat_dev)
+{
+	struct pvr_device *pvr_dev;
+	struct drm_device *drm_dev;
+	int err;
+
+	pvr_dev = devm_drm_dev_alloc(&plat_dev->dev, &pvr_drm_driver,
+				     struct pvr_device, base);
+	if (IS_ERR(pvr_dev)) {
+		err = IS_ERR(pvr_dev);
+		goto err_out;
+	}
+	drm_dev = &pvr_dev->base;
+
+	platform_set_drvdata(plat_dev, drm_dev);
+
+	pvr_context_device_init(pvr_dev);
+
+	pm_runtime_enable(&plat_dev->dev);
+	pvr_power_init(pvr_dev);
+
+	pvr_dev->vendor.callbacks = of_device_get_match_data(&plat_dev->dev);
+
+	if (pvr_dev->vendor.callbacks && pvr_dev->vendor.callbacks->init) {
+		err = pvr_dev->vendor.callbacks->init(pvr_dev);
+		if (err)
+			goto err_pm_runtime_disable;
+	}
+
+	err = pvr_device_init(pvr_dev);
+	if (err)
+		goto err_vendor_fini;
+
+	err = drm_dev_register(drm_dev, 0);
+	if (err)
+		goto err_device_fini;
+
+	xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1);
+	xa_init_flags(&pvr_dev->free_list_ids, XA_FLAGS_ALLOC1);
+	xa_init_flags(&pvr_dev->job_ids, XA_FLAGS_ALLOC1);
+
+	return 0;
+
+err_device_fini:
+	pvr_device_fini(pvr_dev);
+
+err_vendor_fini:
+	if (pvr_dev->vendor.callbacks && pvr_dev->vendor.callbacks->fini)
+		pvr_dev->vendor.callbacks->fini(pvr_dev);
+
+err_pm_runtime_disable:
+	pm_runtime_disable(&plat_dev->dev);
+
+err_out:
+	return err;
+}
+
+static int
+pvr_remove(struct platform_device *plat_dev)
+{
+	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
+	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+
+	WARN_ON(!xa_empty(&pvr_dev->job_ids));
+	WARN_ON(!xa_empty(&pvr_dev->free_list_ids));
+	WARN_ON(!xa_empty(&pvr_dev->ctx_ids));
+
+	xa_destroy(&pvr_dev->job_ids);
+	xa_destroy(&pvr_dev->free_list_ids);
+	xa_destroy(&pvr_dev->ctx_ids);
+
+	drm_dev_unregister(drm_dev);
+	pvr_device_fini(pvr_dev);
+	if (pvr_dev->vendor.callbacks && pvr_dev->vendor.callbacks->fini)
+		pvr_dev->vendor.callbacks->fini(pvr_dev);
+	pm_runtime_disable(&plat_dev->dev);
+
+	return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+	{ .compatible = "mediatek,mt8173-gpu", .data = &pvr_mt8173_callbacks },
+	{ .compatible = "ti,am62-gpu", .data = NULL },
+	{ .compatible = "img,powervr-series6xt", .data = NULL },
+	{ .compatible = "img,powervr-seriesaxe", .data = NULL },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static int pvr_device_suspend(struct device *dev)
+{
+	struct platform_device *plat_dev = to_platform_device(dev);
+	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
+	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+	int err = 0;
+
+	if (pvr_dev->vendor.callbacks &&
+	    pvr_dev->vendor.callbacks->power_disable) {
+		err = pvr_dev->vendor.callbacks->power_disable(pvr_dev);
+		if (err)
+			goto err_out;
+	}
+
+	clk_disable(pvr_dev->mem_clk);
+	clk_disable(pvr_dev->sys_clk);
+	clk_disable(pvr_dev->core_clk);
+
+	if (pvr_dev->regulator)
+		regulator_disable(pvr_dev->regulator);
+
+err_out:
+	return err;
+}
+
+static int pvr_device_resume(struct device *dev)
+{
+	struct platform_device *plat_dev = to_platform_device(dev);
+	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
+	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+	int err;
+
+	if (pvr_dev->regulator) {
+		err = regulator_enable(pvr_dev->regulator);
+		if (err)
+			goto err_out;
+	}
+
+	clk_enable(pvr_dev->core_clk);
+	clk_enable(pvr_dev->sys_clk);
+	clk_enable(pvr_dev->mem_clk);
+
+	if (pvr_dev->vendor.callbacks &&
+	    pvr_dev->vendor.callbacks->power_enable) {
+		err = pvr_dev->vendor.callbacks->power_enable(pvr_dev);
+		if (err)
+			goto err_clk_disable;
+	}
+
+	return 0;
+
+err_clk_disable:
+	clk_disable(pvr_dev->mem_clk);
+	clk_disable(pvr_dev->sys_clk);
+	clk_disable(pvr_dev->core_clk);
+
+err_out:
+	return err;
+}
+
+static const struct dev_pm_ops pvr_pm_ops = {
+	SET_RUNTIME_PM_OPS(pvr_device_suspend, pvr_device_resume, NULL)
+};
+
+static struct platform_driver pvr_driver = {
+	.probe = pvr_probe,
+	.remove = pvr_remove,
+	.driver = {
+		.name = PVR_DRIVER_NAME,
+		.pm = &pvr_pm_ops,
+		.of_match_table = dt_match,
+	},
+};
+module_platform_driver(pvr_driver);
+
+MODULE_AUTHOR("Imagination Technologies Ltd.");
+MODULE_DESCRIPTION(PVR_DRIVER_DESC);
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_FIRMWARE("powervr/rogue_4.40.2.51_v1.fw");
+MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
diff --git a/drivers/gpu/drm/imagination/pvr_drv.h b/drivers/gpu/drm/imagination/pvr_drv.h
new file mode 100644
index 000000000000..2b7ac5bafc4a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_drv.h
@@ -0,0 +1,89 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_DRV_H__
+#define __PVR_DRV_H__
+
+#include "linux/compiler_attributes.h"
+#include <uapi/drm/pvr_drm.h>
+
+#define PVR_DRIVER_NAME "powervr"
+#define PVR_DRIVER_DESC "Imagination PowerVR Graphics"
+#define PVR_DRIVER_DATE "20220211"
+
+/*
+ * Driver interface version:
+ *  - 1.0: Initial interface
+ */
+#define PVR_DRIVER_MAJOR 1
+#define PVR_DRIVER_MINOR 0
+#define PVR_DRIVER_PATCHLEVEL 0
+
+int pvr_get_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 obj_size, void *out);
+int pvr_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 obj_size, const void *in);
+int pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size,
+		       void **out);
+int pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size,
+		       const void *in);
+
+#define PVR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \
+	(offsetof(_typename, _last_mandatory_field) + \
+	 sizeof(((_typename *)NULL)->_last_mandatory_field))
+
+#define PVR_UOBJ_DECL(_typename, _last_mandatory_field) \
+	, _typename : PVR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field)
+
+/**
+ * PVR user objects.
+ *
+ * Macros used to aid copying structured and array data to and from
+ * userspace. Objects can differ in size, provided the minimum size
+ * allowed is specified (using the last mandatory field in the struct).
+ * All types used with PVR_UOBJ_GET/SET macros must be listed here under
+ * PVR_UOBJ_MIN_SIZE, with the last mandatory struct field specified.
+ */
+#define PVR_UOBJ_MIN_SIZE(_obj_name) _Generic(_obj_name \
+	PVR_UOBJ_DECL(struct drm_pvr_job, hwrt) \
+	PVR_UOBJ_DECL(struct drm_pvr_sync_op, value) \
+	PVR_UOBJ_DECL(struct drm_pvr_dev_query_gpu_info, num_phantoms) \
+	PVR_UOBJ_DECL(struct drm_pvr_dev_query_runtime_info, cdm_max_local_mem_size_regs) \
+	PVR_UOBJ_DECL(struct drm_pvr_dev_query_hwrt_info, _padding_4) \
+	PVR_UOBJ_DECL(struct drm_pvr_dev_query_quirks, _padding_c) \
+	PVR_UOBJ_DECL(struct drm_pvr_dev_query_enhancements, _padding_c) \
+	PVR_UOBJ_DECL(struct drm_pvr_heap, page_size_log2) \
+	PVR_UOBJ_DECL(struct drm_pvr_dev_query_heap_info, heaps) \
+	PVR_UOBJ_DECL(struct drm_pvr_static_data_area, offset) \
+	PVR_UOBJ_DECL(struct drm_pvr_dev_query_static_data_areas, static_data_areas) \
+	)
+
+/** PVR_UOBJ_GET() - Copies from _src_usr_ptr to &_dest_obj. */
+#define PVR_UOBJ_GET(_dest_obj, _usr_size, _src_usr_ptr) \
+	pvr_get_uobj(_src_usr_ptr, _usr_size, \
+		     PVR_UOBJ_MIN_SIZE(_dest_obj), \
+		     sizeof(_dest_obj), &_dest_obj)
+
+/** PVR_UOBJ_SET() - Copies from &_src_obj to _dest_usr_ptr. */
+#define PVR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \
+	pvr_set_uobj(_dest_usr_ptr, _usr_size, \
+		     PVR_UOBJ_MIN_SIZE(_src_obj), \
+		     sizeof(_src_obj), &_src_obj)
+
+/**
+ * PVR_UOBJ_GET_ARRAY() - Copies from _src_drm_pvr_obj_array.array to
+ * alloced memory and returns a pointer in _dest_array.
+ */
+#define PVR_UOBJ_GET_ARRAY(_dest_array, _src_drm_pvr_obj_array) \
+	pvr_get_uobj_array(_src_drm_pvr_obj_array, \
+			   PVR_UOBJ_MIN_SIZE(_dest_array[0]), \
+			   sizeof(_dest_array[0]), (void **)&_dest_array)
+
+/**
+ * PVR_UOBJ_SET_ARRAY() - Copies from _src_array to
+ * _dest_drm_pvr_obj_array.array.
+ */
+#define PVR_UOBJ_SET_ARRAY(_dest_drm_pvr_obj_array, _src_array) \
+	pvr_set_uobj_array(_dest_drm_pvr_obj_array, \
+			   PVR_UOBJ_MIN_SIZE(_src_array[0]), \
+			   sizeof(_src_array[0]), _src_array)
+
+#endif /* __PVR_DRV_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_dump.c b/drivers/gpu/drm/imagination/pvr_dump.c
new file mode 100644
index 000000000000..2a6b5bcb8120
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_dump.c
@@ -0,0 +1,353 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include <generated/utsrelease.h>
+#include <linux/devcoredump.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+#include <uapi/drm/pvr_drm.h>
+
+#include "pvr_context.h"
+#include "pvr_device.h"
+#include "pvr_dump.h"
+#include "pvr_rogue_fwif.h"
+
+static const struct {
+	u32 offset;
+	u32 flags;
+} pvr_dump_registers[] = {
+	{ROGUE_CR_EVENT_STATUS, PVR_COREDUMP_REGISTER_FLAG_SIZE_32BIT},
+	{ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS, PVR_COREDUMP_REGISTER_FLAG_SIZE_32BIT},
+	{ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS, PVR_COREDUMP_REGISTER_FLAG_SIZE_64BIT},
+	{ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS, PVR_COREDUMP_REGISTER_FLAG_SIZE_32BIT},
+	{ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS, PVR_COREDUMP_REGISTER_FLAG_SIZE_64BIT},
+	{ROGUE_CR_BIF_MMU_STATUS, PVR_COREDUMP_REGISTER_FLAG_SIZE_32BIT},
+	{ROGUE_CR_BIF_MMU_ENTRY, PVR_COREDUMP_REGISTER_FLAG_SIZE_32BIT},
+	{ROGUE_CR_BIF_MMU_ENTRY_STATUS, PVR_COREDUMP_REGISTER_FLAG_SIZE_64BIT},
+	{ROGUE_CR_BIF_STATUS_MMU, PVR_COREDUMP_REGISTER_FLAG_SIZE_32BIT},
+};
+
+static void
+pvr_coredump_write_header(u32 size, u8 **p)
+{
+	struct pvr_coredump_header *header = (struct pvr_coredump_header *)*p;
+
+	header->magic = cpu_to_le32(PVR_COREDUMP_HEADER_MAGIC);
+	header->major_version = cpu_to_le32(PVR_COREDUMP_HEADER_VERSION_MAJ);
+	header->minor_version = cpu_to_le32(PVR_COREDUMP_HEADER_VERSION_MIN);
+	header->size = cpu_to_le32(size);
+
+	*p = (u8 *)(header + 1);
+}
+
+static void
+pvr_coredump_write_block_header(u32 block_type, u32 size, u8 **p)
+{
+	struct pvr_coredump_block_header *block_header;
+
+	block_header = (struct pvr_coredump_block_header *)*p;
+	block_header->type = cpu_to_le32(block_type);
+	block_header->size = cpu_to_le32(size);
+	*p = (u8 *)(block_header + 1);
+}
+
+static void
+pvr_coredump_write_devinfo(struct pvr_device *pvr_dev, u32 context_id, u8 **p)
+{
+	struct pvr_coredump_block_devinfo *devinfo;
+
+	pvr_coredump_write_block_header(PVR_COREDUMP_BLOCK_TYPE_DEVINFO, sizeof(*devinfo), p);
+
+	devinfo = (struct pvr_coredump_block_devinfo *)*p;
+	devinfo->gpu_id = cpu_to_le64(pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id));
+	devinfo->fw_version.major = cpu_to_le32(pvr_dev->fw_version.major);
+	devinfo->fw_version.minor = cpu_to_le32(pvr_dev->fw_version.minor);
+	if (context_id) {
+		struct pvr_context *ctx = pvr_context_lookup_id(pvr_dev, context_id);
+
+		if (ctx) {
+			strscpy(devinfo->process_name, ctx->process_name,
+				sizeof(devinfo->process_name));
+
+			pvr_context_put(ctx);
+		}
+	}
+	strscpy(devinfo->kernel_version, UTS_RELEASE, sizeof(devinfo->kernel_version));
+	*p = (u8 *)(devinfo + 1);
+}
+
+static void
+pvr_coredump_write_registers(struct pvr_device *pvr_dev, u8 **p)
+{
+	struct pvr_coredump_block_register *reg;
+	u32 i;
+
+	pvr_coredump_write_block_header(PVR_COREDUMP_BLOCK_TYPE_REGISTERS,
+					sizeof(*reg) * ARRAY_SIZE(pvr_dump_registers), p);
+
+	reg = (struct pvr_coredump_block_register *)*p;
+
+	for (i = 0; i < ARRAY_SIZE(pvr_dump_registers); i++) {
+		reg->offset = cpu_to_le32(pvr_dump_registers[i].offset);
+		reg->flags = cpu_to_le32(pvr_dump_registers[i].flags);
+
+		switch (reg->flags & PVR_COREDUMP_REGISTER_FLAG_SIZE_MASK) {
+		case PVR_COREDUMP_REGISTER_FLAG_SIZE_32BIT:
+			reg->value = cpu_to_le32(__pvr_cr_read32(pvr_dev, reg->offset));
+			break;
+
+		case PVR_COREDUMP_REGISTER_FLAG_SIZE_64BIT:
+			reg->value = cpu_to_le64(__pvr_cr_read64(pvr_dev, reg->offset));
+			break;
+
+		default:
+			WARN_ON(1);
+			reg->value = 0;
+			break;
+		}
+
+		reg++;
+	}
+
+	*p = (u8 *)reg;
+}
+
+static void
+pvr_coredump_write_reset_data(struct rogue_fwif_fwccb_cmd_context_reset_data *fw_reset_data,
+			      u8 **p)
+{
+	struct pvr_coredump_block_reset_data *reset_data;
+
+	pvr_coredump_write_block_header(PVR_COREDUMP_BLOCK_TYPE_CONTEXT_RESET_DATA,
+					sizeof(*reset_data), p);
+
+	reset_data = (struct pvr_coredump_block_reset_data *)*p;
+	reset_data->context_id = cpu_to_le32(fw_reset_data->server_common_context_id);
+	reset_data->reset_reason = cpu_to_le32(fw_reset_data->reset_reason);
+	reset_data->dm = cpu_to_le32(fw_reset_data->dm);
+	reset_data->reset_job_ref = cpu_to_le32(fw_reset_data->reset_job_ref);
+	reset_data->flags = cpu_to_le32(fw_reset_data->flags);
+	reset_data->fault_address = cpu_to_le64(fw_reset_data->fault_address);
+	*p = (u8 *)(reset_data + 1);
+}
+
+static void
+pvr_coredump_write_hwrinfo(struct pvr_device *pvr_dev,
+			   u8 **p)
+{
+	struct rogue_fwif_hwrinfobuf *hwrinfobuf = pvr_dev->fw_dev.hwrinfobuf;
+	struct pvr_coredump_block_hwrinfo *hwrinfo;
+	struct rogue_hwrinfo *fw_hwrinfo;
+
+	pvr_coredump_write_block_header(PVR_COREDUMP_BLOCK_TYPE_HWRINFO, sizeof(*hwrinfo), p);
+
+	/* Record the most recent HWR. */
+	fw_hwrinfo = &hwrinfobuf->hwr_info[(hwrinfobuf->write_index - 1) &
+					   ROGUE_FWIF_HWINFO_LAST_INDEX];
+
+	hwrinfo = (struct pvr_coredump_block_hwrinfo *)*p;
+
+	hwrinfo->hwr_type = cpu_to_le32(fw_hwrinfo->hwr_type);
+	hwrinfo->dm = cpu_to_le32(fw_hwrinfo->dm);
+	hwrinfo->core_id = cpu_to_le32(fw_hwrinfo->core_id);
+	hwrinfo->event_status = cpu_to_le32(fw_hwrinfo->event_status);
+	hwrinfo->dm_state = cpu_to_le32(fw_hwrinfo->hwr_recovery_flags);
+	hwrinfo->active_hwrt_data = cpu_to_le32(fw_hwrinfo->active_hwrt_data);
+
+	switch (hwrinfo->hwr_type) {
+	case ROGUE_HWRTYPE_BIF0FAULT:
+	case ROGUE_HWRTYPE_BIF1FAULT:
+	case ROGUE_HWRTYPE_TEXASBIF0FAULT:
+	case ROGUE_HWRTYPE_MMURISCVFAULT:
+		hwrinfo->hwr_data.bif_info.bif_req_status =
+			cpu_to_le64(fw_hwrinfo->hwr_data.bif_info.bif_req_status);
+		hwrinfo->hwr_data.bif_info.bif_mmu_status =
+			cpu_to_le64(fw_hwrinfo->hwr_data.bif_info.bif_mmu_status);
+		break;
+
+	case ROGUE_HWRTYPE_ECCFAULT:
+		hwrinfo->hwr_data.ecc_info.fault_gpu =
+			cpu_to_le32(fw_hwrinfo->hwr_data.ecc_info.fault_gpu);
+		break;
+
+	case ROGUE_HWRTYPE_MMUFAULT:
+	case ROGUE_HWRTYPE_MMUMETAFAULT:
+		hwrinfo->hwr_data.mmu_info.mmu_status[0] =
+			cpu_to_le64(fw_hwrinfo->hwr_data.mmu_info.mmu_status[0]);
+		hwrinfo->hwr_data.mmu_info.mmu_status[1] =
+			cpu_to_le64(fw_hwrinfo->hwr_data.mmu_info.mmu_status[1]);
+		break;
+
+	case ROGUE_HWRTYPE_POLLFAILURE:
+		hwrinfo->hwr_data.poll_info.thread_num =
+			cpu_to_le32(fw_hwrinfo->hwr_data.poll_info.thread_num);
+		hwrinfo->hwr_data.poll_info.cr_poll_addr =
+			cpu_to_le32(fw_hwrinfo->hwr_data.poll_info.cr_poll_addr);
+		hwrinfo->hwr_data.poll_info.cr_poll_mask =
+			cpu_to_le32(fw_hwrinfo->hwr_data.poll_info.cr_poll_mask);
+		hwrinfo->hwr_data.poll_info.cr_poll_last_value =
+			cpu_to_le32(fw_hwrinfo->hwr_data.poll_info.cr_poll_last_value);
+		break;
+
+	case ROGUE_HWRTYPE_MIPSTLBFAULT:
+		hwrinfo->hwr_data.tlb_info.bad_addr =
+			cpu_to_le32(fw_hwrinfo->hwr_data.tlb_info.bad_addr);
+		hwrinfo->hwr_data.tlb_info.entry_lo =
+			cpu_to_le32(fw_hwrinfo->hwr_data.tlb_info.entry_lo);
+		break;
+
+	case ROGUE_HWRTYPE_OVERRUN:
+	case ROGUE_HWRTYPE_UNKNOWNFAILURE:
+		/* Nothing to save. */
+	default:
+		break;
+	}
+
+	*p = (u8 *)(hwrinfo + 1);
+}
+
+static void
+pvr_coredump(struct pvr_device *pvr_dev,
+	     struct rogue_fwif_fwccb_cmd_context_reset_data *fw_reset_data,
+	     u32 context_id)
+{
+	u8 *data;
+	u32 size;
+	u8 *p;
+
+	size = sizeof(struct pvr_coredump_header);
+
+	size += sizeof(struct pvr_coredump_block_header) +
+		sizeof(struct pvr_coredump_block_devinfo);
+
+	size += sizeof(struct pvr_coredump_block_header) +
+		sizeof(struct pvr_coredump_block_register) * ARRAY_SIZE(pvr_dump_registers);
+
+	size += sizeof(struct pvr_coredump_block_header) +
+		sizeof(struct pvr_coredump_block_hwrinfo);
+
+	if (fw_reset_data) {
+		size += sizeof(struct pvr_coredump_block_header) +
+			sizeof(struct pvr_coredump_block_reset_data);
+	}
+
+	/*
+	 * Add flags to avoid triggering the OOM killer or error reporting, we should just warn if
+	 * we can't allocate the buffer.
+	 */
+	data = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY);
+	if (!data) {
+		drm_warn(from_pvr_device(pvr_dev), "Failed to allocate devcoredump buffer\n");
+		return;
+	}
+
+	p = data;
+
+	pvr_coredump_write_header(size, &p);
+	pvr_coredump_write_devinfo(pvr_dev, context_id, &p);
+	pvr_coredump_write_registers(pvr_dev, &p);
+	pvr_coredump_write_hwrinfo(pvr_dev, &p);
+
+	if (fw_reset_data)
+		pvr_coredump_write_reset_data(fw_reset_data, &p);
+
+	dev_coredumpv(from_pvr_device(pvr_dev)->dev, data, size, GFP_KERNEL);
+}
+
+static const char *
+get_reset_reason_desc(enum rogue_context_reset_reason reason)
+{
+	switch (reason) {
+	case ROGUE_CONTEXT_RESET_REASON_NONE:
+		return "None";
+	case ROGUE_CONTEXT_RESET_REASON_GUILTY_LOCKUP:
+		return "Guilty lockup";
+	case ROGUE_CONTEXT_RESET_REASON_INNOCENT_LOCKUP:
+		return "Innocent lockup";
+	case ROGUE_CONTEXT_RESET_REASON_GUILTY_OVERRUNING:
+		return "Guilty overrunning";
+	case ROGUE_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING:
+		return "Innocent overrunning";
+	case ROGUE_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH:
+		return "Hard context switch";
+	case ROGUE_CONTEXT_RESET_REASON_FW_WATCHDOG:
+		return "Firmware watchdog";
+	case ROGUE_CONTEXT_RESET_REASON_FW_PAGEFAULT:
+		return "Firmware pagefault";
+	case ROGUE_CONTEXT_RESET_REASON_FW_EXEC_ERR:
+		return "Firmware execution error";
+	case ROGUE_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR:
+		return "Host watchdog";
+	case ROGUE_CONTEXT_GEOM_OOM_DISABLED:
+		return "Geometry OOM disabled";
+
+	default:
+		return "Unknown";
+	}
+}
+
+static const char *
+get_dm_name(u32 dm)
+{
+	switch (dm) {
+	case PVR_FWIF_DM_GP:
+		return "General purpose";
+	case PVR_FWIF_DM_2D:
+		return "2D";
+	case PVR_FWIF_DM_GEOM:
+		return "Geometry";
+	case PVR_FWIF_DM_FRAG:
+		return "Fragment";
+	case PVR_FWIF_DM_CDM:
+		return "Compute";
+	case PVR_FWIF_DM_RAY:
+		return "Raytracing";
+	case PVR_FWIF_DM_GEOM2:
+		return "Geometry 2";
+	case PVR_FWIF_DM_GEOM3:
+		return "Geometry 3";
+	case PVR_FWIF_DM_GEOM4:
+		return "Geometry 4";
+
+	default:
+		return "Unknown";
+	}
+}
+
+/**
+ * pvr_context_reset_notification() - Handle context reset notification from FW
+ * @pvr_dev: Device pointer.
+ * @data: Data provided by FW.
+ *
+ * This will decode the data structure provided by FW and print the results via drm_info().
+ */
+void
+pvr_context_reset_notification(struct pvr_device *pvr_dev,
+			       struct rogue_fwif_fwccb_cmd_context_reset_data *data)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+
+	if (data->flags & ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) {
+		drm_info(drm_dev, "Received context reset notification for all contexts\n");
+	} else {
+		drm_info(drm_dev, "Received context reset notification on context %u\n",
+			 data->server_common_context_id);
+	}
+
+	drm_info(drm_dev, "  Reset reason=%u (%s)\n", data->reset_reason,
+		 get_reset_reason_desc(data->reset_reason));
+	drm_info(drm_dev, "  Data Master=%u (%s)\n", data->dm, get_dm_name(data->dm));
+	drm_info(drm_dev, "  Job ref=%u\n", data->reset_job_ref);
+
+	if (data->flags & ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF) {
+		drm_info(drm_dev, "  Page fault occurred, fault address=%llx\n",
+			 (unsigned long long)data->fault_address);
+	}
+
+	pvr_coredump(pvr_dev, data,
+		     (data->flags & ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) ? 0 :
+		      data->server_common_context_id);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_dump.h b/drivers/gpu/drm/imagination/pvr_dump.h
new file mode 100644
index 000000000000..75714aff0a5d
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_dump.h
@@ -0,0 +1,17 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_DUMP_H__
+#define __PVR_DUMP_H__
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_rogue_fwif.h. */
+struct rogue_fwif_fwccb_cmd_context_reset_data;
+
+void
+pvr_context_reset_notification(struct pvr_device *pvr_dev,
+			       struct rogue_fwif_fwccb_cmd_context_reset_data *data);
+
+#endif /* __PVR_DUMP_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c
new file mode 100644
index 000000000000..1a01cabffb89
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_free_list.c
@@ -0,0 +1,559 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_free_list.h"
+#include "pvr_gem.h"
+#include "pvr_hwrt.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_vm.h"
+
+#include <drm/drm_gem.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+#define FREE_LIST_ENTRY_SIZE sizeof(u32)
+
+#define FREE_LIST_ALIGNMENT \
+	((ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE / FREE_LIST_ENTRY_SIZE) - 1)
+
+#define FREE_LIST_MIN_PAGES 50
+#define FREE_LIST_MIN_PAGES_BRN66011 40
+#define FREE_LIST_MIN_PAGES_ROGUEXE 25
+
+/**
+ * pvr_get_free_list_min_pages() - Get minimum free list size for this device
+ * @pvr_dev: Device pointer.
+ *
+ * Returns:
+ *  * Minimum free list size, in PM physical pages.
+ */
+u32
+pvr_get_free_list_min_pages(struct pvr_device *pvr_dev)
+{
+	u32 value;
+
+	if (PVR_HAS_FEATURE(pvr_dev, roguexe)) {
+		if (PVR_HAS_QUIRK(pvr_dev, 66011))
+			value = FREE_LIST_MIN_PAGES_BRN66011;
+		else
+			value = FREE_LIST_MIN_PAGES_ROGUEXE;
+	} else {
+		value = FREE_LIST_MIN_PAGES;
+	}
+
+	return value;
+}
+
+static int
+free_list_create_kernel_structure(struct pvr_file *pvr_file,
+				  struct drm_pvr_ioctl_create_free_list_args *args,
+				  struct pvr_free_list *free_list)
+{
+	struct pvr_gem_object *free_list_obj;
+	struct pvr_vm_context *vm_ctx;
+	u64 free_list_size;
+	int err;
+
+	if (args->grow_threshold < 0 || args->grow_threshold > 100 ||
+	    args->initial_num_pages > args->max_num_pages ||
+	    args->grow_num_pages > args->max_num_pages ||
+	    args->max_num_pages == 0 ||
+	    (args->initial_num_pages < args->max_num_pages && !args->grow_num_pages) ||
+	    (args->initial_num_pages == args->max_num_pages && args->grow_num_pages)) {
+		err = -EINVAL;
+		goto err_out;
+	}
+	if ((args->initial_num_pages & FREE_LIST_ALIGNMENT) ||
+	    (args->max_num_pages & FREE_LIST_ALIGNMENT) ||
+	    (args->grow_num_pages & FREE_LIST_ALIGNMENT)) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
+	if (!vm_ctx) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	free_list_obj = pvr_vm_find_gem_object(vm_ctx, args->free_list_gpu_addr,
+					       NULL, &free_list_size);
+	if (!free_list_obj) {
+		err = -EINVAL;
+		goto err_put_vm_context;
+	}
+
+	if ((free_list_obj->flags & DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS) ||
+	    !(free_list_obj->flags & DRM_PVR_BO_DEVICE_PM_FW_PROTECT) ||
+	    free_list_size < (args->max_num_pages * FREE_LIST_ENTRY_SIZE)) {
+		err = -EINVAL;
+		goto err_put_free_list_obj;
+	}
+
+	free_list->pvr_dev = pvr_file->pvr_dev;
+	free_list->current_pages = 0;
+	free_list->max_pages = args->max_num_pages;
+	free_list->grow_pages = args->grow_num_pages;
+	free_list->grow_threshold = args->grow_threshold;
+	free_list->obj = free_list_obj;
+
+	err = pvr_gem_object_get_pages(free_list->obj);
+	if (err < 0)
+		goto err_put_free_list_obj;
+
+	pvr_vm_context_put(vm_ctx);
+
+	return 0;
+
+err_put_free_list_obj:
+	pvr_gem_object_put(free_list_obj);
+
+err_put_vm_context:
+	pvr_vm_context_put(vm_ctx);
+
+err_out:
+	return err;
+}
+
+static void
+free_list_destroy_kernel_structure(struct pvr_free_list *free_list)
+{
+	WARN_ON(!list_empty(&free_list->hwrt_list));
+
+	pvr_gem_object_put_pages(free_list->obj);
+	pvr_gem_object_put(free_list->obj);
+}
+
+/**
+ * calculate_free_list_ready_pages_locked() - Function to work out the number of free
+ *                                            list pages to reserve for growing within
+ *                                            the FW without having to wait for the
+ *                                            host to progress a grow request
+ * @free_list: Pointer to free list.
+ * @pages: Total pages currently in free list.
+ *
+ * If the threshold or grow size means less than the alignment size (4 pages on
+ * Rogue), then the feature is not used.
+ *
+ * Caller must hold &free_list->lock.
+ *
+ * Return: number of pages to reserve.
+ */
+static u32
+calculate_free_list_ready_pages_locked(struct pvr_free_list *free_list, u32 pages)
+{
+	u32 ready_pages;
+
+	lockdep_assert_held(&free_list->lock);
+
+	ready_pages = ((pages * free_list->grow_threshold) / 100);
+
+	/* The number of pages must be less than the grow size. */
+	ready_pages = min(ready_pages, free_list->grow_pages);
+
+	/*
+	 * The number of pages must be a multiple of the free list align size.
+	 */
+	ready_pages &= ~FREE_LIST_ALIGNMENT;
+
+	return ready_pages;
+}
+
+static u32
+calculate_free_list_ready_pages(struct pvr_free_list *free_list, u32 pages)
+{
+	u32 ret;
+
+	mutex_lock(&free_list->lock);
+
+	ret = calculate_free_list_ready_pages_locked(free_list, pages);
+
+	mutex_unlock(&free_list->lock);
+
+	return ret;
+}
+
+static int
+free_list_create_fw_structure(struct pvr_file *pvr_file,
+			      struct drm_pvr_ioctl_create_free_list_args *args,
+			      struct pvr_free_list *free_list)
+{
+	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+	struct rogue_fwif_freelist *fw_data;
+	u32 ready_pages;
+	int err;
+
+	/*
+	 * Create and map the FW structure so we can initialise it. This is not
+	 * accessed on the CPU side post-initialisation so the mapping lifetime
+	 * is only for this function.
+	 */
+	free_list->fw_data = pvr_gem_create_and_map_fw_object(pvr_dev, sizeof(*free_list->fw_data),
+							      PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							      DRM_PVR_BO_CREATE_ZEROED,
+							      &free_list->fw_obj);
+	if (IS_ERR(free_list->fw_data)) {
+		err = PTR_ERR(free_list->fw_data);
+		goto err_out;
+	}
+
+	/* Fill out FW structure */
+	ready_pages = calculate_free_list_ready_pages(free_list,
+						      args->initial_num_pages);
+
+	fw_data = free_list->fw_data;
+
+	fw_data->max_pages = free_list->max_pages;
+	fw_data->current_pages = args->initial_num_pages - ready_pages;
+	fw_data->grow_pages = free_list->grow_pages;
+	fw_data->ready_pages = ready_pages;
+	fw_data->freelist_id = free_list->fw_id;
+	fw_data->grow_pending = false;
+	fw_data->current_stack_top = fw_data->current_pages - 1;
+	fw_data->freelist_dev_addr = args->free_list_gpu_addr;
+	fw_data->current_dev_addr = (fw_data->freelist_dev_addr +
+				     ((fw_data->max_pages - fw_data->current_pages) *
+				      FREE_LIST_ENTRY_SIZE)) &
+				    ~((u64)ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE - 1);
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static void
+free_list_destroy_fw_structure(struct pvr_free_list *free_list)
+{
+	pvr_fw_object_vunmap(free_list->fw_obj, false);
+	pvr_fw_object_release(free_list->fw_obj);
+}
+
+static int
+pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list,
+				  struct sg_table *sgt, u32 offset, u32 num_pages)
+{
+	struct sg_dma_page_iter dma_iter;
+	u32 *page_list;
+	int err;
+
+	lockdep_assert_held(&free_list->lock);
+
+	page_list = pvr_gem_object_vmap(free_list->obj, false);
+	if (IS_ERR(page_list)) {
+		err = PTR_ERR(page_list);
+		goto err_out;
+	}
+
+	offset /= FREE_LIST_ENTRY_SIZE;
+	/* clang-format off */
+	for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+		dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
+		u64 dma_pfn = dma_addr >>
+			       ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+		u32 dma_addr_offset;
+
+		BUILD_BUG_ON(ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE > PAGE_SIZE);
+
+		for (dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE;
+		     dma_addr_offset += ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE) {
+			WARN_ON_ONCE(dma_pfn >> 32);
+
+			page_list[offset++] = (u32)dma_pfn;
+			dma_pfn++;
+
+			num_pages--;
+			if (!num_pages)
+				break;
+		}
+
+		if (!num_pages)
+			break;
+	};
+	/* clang-format on */
+
+	pvr_gem_object_vunmap(free_list->obj, true);
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static int
+pvr_free_list_insert_node_locked(struct pvr_free_list_node *free_list_node)
+{
+	struct pvr_free_list *free_list = free_list_node->free_list;
+	u32 start_page;
+	u32 offset;
+	int err;
+
+	lockdep_assert_held(&free_list->lock);
+
+	start_page = free_list->max_pages - free_list->current_pages -
+		     free_list_node->num_pages;
+	offset = (start_page * FREE_LIST_ENTRY_SIZE) &
+		  ~((u64)ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE - 1);
+
+	err = pvr_free_list_insert_pages_locked(free_list, free_list_node->mem_obj->sgt,
+						offset, free_list_node->num_pages);
+	if (!err)
+		free_list->current_pages += free_list_node->num_pages;
+
+	return err;
+}
+
+static int
+pvr_free_list_grow(struct pvr_free_list *free_list, u32 num_pages)
+{
+	struct pvr_device *pvr_dev = free_list->pvr_dev;
+	struct pvr_free_list_node *free_list_node;
+	int err;
+
+	mutex_lock(&free_list->lock);
+
+	if (num_pages & FREE_LIST_ALIGNMENT) {
+		err = -EINVAL;
+		goto err_unlock;
+	}
+
+	free_list_node = kzalloc(sizeof(*free_list_node), GFP_KERNEL);
+	if (!free_list_node) {
+		err = -ENOMEM;
+		goto err_unlock;
+	}
+
+	free_list_node->num_pages = num_pages;
+	free_list_node->free_list = free_list;
+
+	free_list_node->mem_obj = pvr_gem_object_create(pvr_dev,
+							num_pages <<
+							ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+							PVR_BO_FW_FLAGS_DEVICE_CACHED);
+	if (IS_ERR(free_list_node->mem_obj)) {
+		err = PTR_ERR(free_list_node->mem_obj);
+		goto err_free;
+	}
+
+	err = pvr_gem_object_get_pages(free_list_node->mem_obj);
+	if (err < 0)
+		goto err_destroy_gem_object;
+
+	err = pvr_free_list_insert_node_locked(free_list_node);
+	if (err)
+		goto err_put_pages;
+
+	list_add_tail(&free_list_node->node, &free_list->mem_block_list);
+
+	/*
+	 * Reserve a number ready pages to allow the FW to process OOM quickly
+	 * and asynchronously request a grow.
+	 */
+	free_list->ready_pages =
+		calculate_free_list_ready_pages_locked(free_list,
+						       free_list->current_pages);
+	free_list->current_pages -= free_list->ready_pages;
+
+	mutex_unlock(&free_list->lock);
+
+	return 0;
+
+err_put_pages:
+	pvr_gem_object_put_pages(free_list_node->mem_obj);
+
+err_destroy_gem_object:
+	pvr_gem_object_put(free_list_node->mem_obj);
+
+err_free:
+	kfree(free_list_node);
+
+err_unlock:
+	mutex_unlock(&free_list->lock);
+
+	return err;
+}
+
+static void
+pvr_free_list_free_node(struct pvr_free_list_node *free_list_node)
+{
+	pvr_gem_object_put_pages(free_list_node->mem_obj);
+	pvr_gem_object_put(free_list_node->mem_obj);
+
+	kfree(free_list_node);
+}
+
+/**
+ * pvr_free_list_create() - Create a new free list and return an object pointer
+ * @pvr_file: Pointer to pvr_file structure.
+ * @args: Creation arguments from userspace.
+ * @id: FW object ID.
+ *
+ * Return:
+ *  * Pointer to new free_list, or
+ *  * ERR_PTR(-%ENOMEM) on out of memory.
+ */
+struct pvr_free_list *
+pvr_free_list_create(struct pvr_file *pvr_file,
+		     struct drm_pvr_ioctl_create_free_list_args *args)
+{
+	struct pvr_free_list *free_list;
+	int err;
+
+	/* Create and fill out the kernel structure */
+	free_list = kzalloc(sizeof(*free_list), GFP_KERNEL);
+
+	if (!free_list)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&free_list->ref_count);
+	INIT_LIST_HEAD(&free_list->mem_block_list);
+	INIT_LIST_HEAD(&free_list->hwrt_list);
+	mutex_init(&free_list->lock);
+
+	err = free_list_create_kernel_structure(pvr_file, args, free_list);
+	if (err < 0)
+		goto err_free;
+
+	/* Allocate global object ID for firmware. */
+	err = xa_alloc(&pvr_file->pvr_dev->free_list_ids,
+		       &free_list->fw_id,
+		       free_list,
+		       xa_limit_32b,
+		       GFP_KERNEL);
+	if (err)
+		goto err_free;
+
+	err = free_list_create_fw_structure(pvr_file, args, free_list);
+	if (err < 0)
+		goto err_free;
+
+	err = pvr_free_list_grow(free_list, args->initial_num_pages);
+	if (err < 0)
+		goto err_free;
+
+	return free_list;
+
+err_free:
+	pvr_free_list_put(free_list);
+
+	return ERR_PTR(err);
+}
+
+void
+pvr_free_list_release(struct kref *ref_count)
+{
+	struct pvr_free_list *free_list =
+		container_of(ref_count, struct pvr_free_list, ref_count);
+	struct list_head *pos, *n;
+
+	xa_erase(&free_list->pvr_dev->free_list_ids, free_list->fw_id);
+
+	WARN_ON(pvr_fw_structure_cleanup(free_list->pvr_dev, ROGUE_FWIF_CLEANUP_FREELIST,
+					 free_list->fw_obj, 0));
+
+	/* clang-format off */
+	list_for_each_safe(pos, n, &free_list->mem_block_list) {
+		struct pvr_free_list_node *free_list_node =
+			container_of(pos, struct pvr_free_list_node, node);
+
+		list_del(pos);
+		pvr_free_list_free_node(free_list_node);
+	}
+	/* clang-format on */
+
+	free_list_destroy_kernel_structure(free_list);
+	free_list_destroy_fw_structure(free_list);
+	mutex_destroy(&free_list->lock);
+	kfree(free_list);
+}
+
+/**
+ * pvr_destroy_free_lists_for_file: Destroy any free lists associated with the
+ * given file.
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all free lists associated with @pvr_file from the device free_list
+ * list and drops initial references. Free lists will then be destroyed once
+ * all outstanding references are dropped.
+ */
+void pvr_destroy_free_lists_for_file(struct pvr_file *pvr_file)
+{
+	struct pvr_free_list *free_list;
+	unsigned long handle;
+
+	xa_for_each(&pvr_file->free_list_handles, handle, free_list) {
+		(void)free_list;
+		pvr_free_list_put(xa_erase(&pvr_file->free_list_handles, handle));
+	}
+}
+
+/**
+ * pvr_free_list_put() - Release reference on free list
+ * @free_list: Pointer to list to release reference on
+ */
+void
+pvr_free_list_put(struct pvr_free_list *free_list)
+{
+	if (free_list)
+		kref_put(&free_list->ref_count, pvr_free_list_release);
+}
+
+void pvr_free_list_add_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data)
+{
+	mutex_lock(&free_list->lock);
+
+	list_add_tail(&hwrt_data->freelist_node, &free_list->hwrt_list);
+
+	mutex_unlock(&free_list->lock);
+}
+
+void pvr_free_list_remove_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data)
+{
+	mutex_lock(&free_list->lock);
+
+	list_del(&hwrt_data->freelist_node);
+
+	mutex_unlock(&free_list->lock);
+}
+
+void
+pvr_free_list_reconstruct(struct pvr_device *pvr_dev, u32 freelist_id)
+{
+	struct pvr_free_list *free_list = pvr_free_list_lookup_id(pvr_dev, freelist_id);
+	struct pvr_free_list_node *free_list_node;
+	struct rogue_fwif_freelist *fw_data;
+	struct pvr_hwrt_data *hwrt_data;
+
+	if (!free_list)
+		return;
+
+	mutex_lock(&free_list->lock);
+
+	/* Rebuild the free list based on the memory block list. */
+	free_list->current_pages = 0;
+
+	list_for_each_entry(free_list_node, &free_list->mem_block_list, node)
+		WARN_ON(pvr_free_list_insert_node_locked(free_list_node));
+
+	/*
+	 * Remove the ready pages, which are reserved to allow the FW to process OOM quickly and
+	 * asynchronously request a grow.
+	 */
+	free_list->current_pages -= free_list->ready_pages;
+
+	fw_data = free_list->fw_data;
+	fw_data->current_stack_top = fw_data->current_pages - 1;
+	fw_data->allocated_page_count = 0;
+	fw_data->allocated_mmu_page_count = 0;
+
+	/* Reset the state of any associated HWRTs. */
+	list_for_each_entry(hwrt_data, &free_list->hwrt_list, freelist_node) {
+		hwrt_data->fw_data->state = ROGUE_FWIF_RTDATA_STATE_HWR;
+		hwrt_data->fw_data->hwrt_data_flags &= ~HWRTDATA_HAS_LAST_GEOM;
+	}
+
+	mutex_unlock(&free_list->lock);
+
+	pvr_free_list_put(free_list);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.h b/drivers/gpu/drm/imagination/pvr_free_list.h
new file mode 100644
index 000000000000..69834206ec78
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_free_list.h
@@ -0,0 +1,185 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_FREE_LIST_H__
+#define __PVR_FREE_LIST_H__
+
+#include <linux/compiler_attributes.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+#include "pvr_device.h"
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_gem_object;
+
+/* Forward declaration from pvr_hwrt.h. */
+struct pvr_hwrt_data;
+
+/**
+ * struct pvr_free_list_node - structure representing an allocation in the free
+ *                             list
+ */
+struct pvr_free_list_node {
+	/** @node: List node for &pvr_free_list.mem_block_list. */
+	struct list_head node;
+
+	/** @free_list: Pointer to owning free list. */
+	struct pvr_free_list *free_list;
+
+	/** @num_pages: Number of pages in this node. */
+	u32 num_pages;
+
+	/** @mem_obj: GEM object representing the pages in this node. */
+	struct pvr_gem_object *mem_obj;
+};
+
+/**
+ * struct pvr_free_list - structure representing a free list
+ */
+struct pvr_free_list {
+	/** @ref_count: Reference count of object. */
+	struct kref ref_count;
+
+	/** @pvr_dev: Pointer to device that owns this object. */
+	struct pvr_device *pvr_dev;
+
+	/** @obj: GEM object representing the free list. */
+	struct pvr_gem_object *obj;
+
+	/** @fw_obj: FW object representing the FW-side structure. */
+	struct pvr_fw_object *fw_obj;
+
+	/** &fw_data: Pointer to CPU mapping of the FW-side structure. */
+	struct rogue_fwif_freelist *fw_data;
+
+	/**
+	 * @lock: Mutex protecting modification of the free list. Must be held when accessing any
+	 *        of the members below.
+	 */
+	struct mutex lock;
+
+	/** @fw_id: Firmware ID for this object. */
+	u32 fw_id;
+
+	/** @current_pages: Current number of pages in free list. */
+	u32 current_pages;
+
+	/** @max_pages: Maximum number of pages in free list. */
+	u32 max_pages;
+
+	/** @grow_pages: Pages to grow free list by per request. */
+	u32 grow_pages;
+
+	/**
+	 * @grow_threshold: Percentage of FL memory used that should trigger a
+	 *                  new grow request.
+	 */
+	u32 grow_threshold;
+
+	/**
+	 * @ready_pages: Number of pages reserved for FW to use while a grow
+	 *               request is being processed.
+	 */
+	u32 ready_pages;
+
+	/** @mem_block_list: List of memory blocks in this free list. */
+	struct list_head mem_block_list;
+
+	/** @hwrt_list: List of HWRTs using this free list. */
+	struct list_head hwrt_list;
+};
+
+struct pvr_free_list *
+pvr_free_list_create(struct pvr_file *pvr_file,
+		     struct drm_pvr_ioctl_create_free_list_args *args);
+
+void
+pvr_destroy_free_lists_for_file(struct pvr_file *pvr_file);
+
+u32
+pvr_get_free_list_min_pages(struct pvr_device *pvr_dev);
+
+static __always_inline struct pvr_free_list *
+pvr_free_list_get(struct pvr_free_list *free_list)
+{
+	if (free_list)
+		kref_get(&free_list->ref_count);
+
+	return free_list;
+}
+
+/**
+ * pvr_free_list_lookup() - Lookup free list pointer from handle and file
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ *
+ * Takes reference on free list object. Call pvr_free_list_put() to release.
+ *
+ * Returns:
+ *  * The requested object on success, or
+ *  * %NULL on failure (object does not exist in list, is not a free list, or
+ *    does not belong to @pvr_file)
+ */
+static __always_inline struct pvr_free_list *
+pvr_free_list_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+	struct pvr_free_list *free_list;
+
+	xa_lock(&pvr_file->free_list_handles);
+	free_list = pvr_free_list_get(xa_load(&pvr_file->free_list_handles, handle));
+	xa_unlock(&pvr_file->free_list_handles);
+
+	return free_list;
+}
+
+/**
+ * pvr_free_list_lookup_id() - Lookup free list pointer from FW ID
+ * @pvr_device: Device pointer.
+ * @id: FW object ID.
+ *
+ * Takes reference on free list object. Call pvr_free_list_put() to release.
+ *
+ * Returns:
+ *  * The requested object on success, or
+ *  * %NULL on failure (object does not exist in list, or is not a free list)
+ */
+static __always_inline struct pvr_free_list *
+pvr_free_list_lookup_id(struct pvr_device *pvr_dev, u32 id)
+{
+	struct pvr_free_list *free_list;
+
+	xa_lock(&pvr_dev->free_list_ids);
+
+	/* Contexts are removed from the ctx_ids set in the context release path,
+	 * meaning the ref_count reached zero before they get removed. We need
+	 * to make sure we're not trying to acquire a context that's being
+	 * destroyed.
+	 */
+	free_list = xa_load(&pvr_dev->free_list_ids, id);
+	if (free_list && !kref_get_unless_zero(&free_list->ref_count))
+		free_list = NULL;
+	xa_unlock(&pvr_dev->free_list_ids);
+
+	return free_list;
+}
+
+void
+pvr_free_list_put(struct pvr_free_list *free_list);
+
+void
+pvr_free_list_add_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data);
+void
+pvr_free_list_remove_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data);
+
+void
+pvr_free_list_reconstruct(struct pvr_device *pvr_dev, u32 freelist_id);
+
+#endif /* __PVR_FREE_LIST_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c
new file mode 100644
index 000000000000..78bdf0ed3eed
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw.c
@@ -0,0 +1,1107 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_ccb.h"
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_info.h"
+#include "pvr_rogue_mips.h"
+#include "pvr_fw_startstop.h"
+#include "pvr_fw_trace.h"
+#include "pvr_gem.h"
+#include "pvr_power.h"
+#include "pvr_rogue_heap_config.h"
+
+#include <drm/drm_mm.h>
+#include <linux/firmware.h>
+#include <linux/minmax.h>
+#include <linux/sizes.h>
+
+#define FW_MAX_SUPPORTED_MAJOR_VERSION 1
+
+#define FW_BOOT_TIMEOUT_USEC 5000000
+
+/* Config heap occupies top 192k of the firmware heap. */
+#define PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY SZ_64K
+#define PVR_ROGUE_FW_CONFIG_HEAP_SIZE (3 * PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
+
+/* Main firmware allocations should come from the remainder of the heap. */
+#define PVR_ROGUE_FW_MAIN_HEAP_BASE ROGUE_FW_HEAP_BASE
+
+/* Offsets from start of configuration area of FW heap. */
+#define PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET 0
+#define PVR_ROGUE_FWIF_OSINIT_OFFSET \
+	(PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
+#define PVR_ROGUE_FWIF_SYSINIT_OFFSET \
+	(PVR_ROGUE_FWIF_OSINIT_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY)
+
+#define PVR_ROGUE_FAULT_PAGE_SIZE SZ_4K
+
+#define PVR_SYNC_OBJ_SIZE sizeof(u32)
+
+const struct pvr_fw_layout_entry *
+pvr_fw_find_layout_entry(const struct pvr_fw_layout_entry *layout_entries, u32 num_layout_entries,
+			 enum pvr_fw_section_id id)
+{
+	u32 entry;
+
+	for (entry = 0; entry < num_layout_entries; entry++) {
+		if (layout_entries[entry].id == id)
+			return &layout_entries[entry];
+	}
+
+	return NULL;
+}
+
+const struct pvr_fw_layout_entry *
+pvr_fw_find_private_data(const struct pvr_fw_layout_entry *layout_entries, u32 num_layout_entries)
+{
+	u32 entry;
+
+	for (entry = 0; entry < num_layout_entries; entry++) {
+		if (layout_entries[entry].id == META_PRIVATE_DATA ||
+		    layout_entries[entry].id == MIPS_PRIVATE_DATA ||
+		    layout_entries[entry].id == RISCV_PRIVATE_DATA)
+			return &layout_entries[entry];
+	}
+
+	return NULL;
+}
+
+/**
+ * pvr_fw_validate() - Parse firmware header and check compatibility
+ * @pvr_dev: Device pointer.
+ * @header_out: Pointer to location to write firmware header pointer.
+ * @layout_entries_out: Pointer to location to write layout table pointer.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * -EINVAL if firmware is incompatible.
+ */
+static int
+pvr_fw_validate(struct pvr_device *pvr_dev,
+		const struct pvr_fw_info_header **header_out,
+		const struct pvr_fw_layout_entry **layout_entries_out)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	const struct firmware *firmware = pvr_dev->fw_dev.firmware;
+	const u8 *fw = firmware->data;
+	u32 fw_offset = firmware->size - SZ_4K;
+	const struct pvr_fw_layout_entry *layout_entries;
+	const struct pvr_fw_info_header *header;
+	u32 layout_table_size;
+	u32 entry;
+	int err;
+
+	if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE)) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	header = (const struct pvr_fw_info_header *)&fw[fw_offset];
+
+	if (header->info_version != PVR_FW_INFO_VERSION) {
+		drm_err(drm_dev, "Unsupported fw info version %u\n",
+			header->info_version);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	if (header->header_len != sizeof(struct pvr_fw_info_header) ||
+	    header->layout_entry_size != sizeof(struct pvr_fw_layout_entry) ||
+	    header->layout_entry_num > PVR_FW_INFO_MAX_NUM_ENTRIES) {
+		drm_err(drm_dev, "FW info format mismatch\n");
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	if (!(header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ||
+	    header->fw_version_major > FW_MAX_SUPPORTED_MAJOR_VERSION ||
+	    header->fw_version_major == 0) {
+		drm_err(drm_dev, "Unsupported FW version %u.%u (build: %u%s)\n",
+			header->fw_version_major, header->fw_version_minor,
+			header->fw_version_build,
+			(header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ? " OS" : "");
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	if (pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id) != header->bvnc) {
+		struct pvr_gpu_id fw_gpu_id;
+
+		packed_bvnc_to_pvr_gpu_id(header->bvnc, &fw_gpu_id);
+		drm_err(drm_dev, "FW built for incorrect GPU ID %i.%i.%i.%i (expected %i.%i.%i.%i)\n",
+			fw_gpu_id.b, fw_gpu_id.v, fw_gpu_id.n, fw_gpu_id.c,
+			pvr_dev->gpu_id.b, pvr_dev->gpu_id.v, pvr_dev->gpu_id.n, pvr_dev->gpu_id.c);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	fw_offset += header->header_len;
+	layout_table_size =
+		header->layout_entry_size * header->layout_entry_num;
+	if ((fw_offset + layout_table_size) > firmware->size) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset];
+	for (entry = 0; entry < header->layout_entry_num; entry++) {
+		u32 start_addr = layout_entries[entry].base_addr;
+		u32 end_addr = start_addr + layout_entries[entry].alloc_size;
+
+		if (start_addr >= end_addr) {
+			err = -EINVAL;
+			goto err_out;
+		}
+	}
+
+	drm_info(drm_dev, "FW version v%u.%u (build %u OS)\n", header->fw_version_major,
+		 header->fw_version_minor, header->fw_version_build);
+
+	pvr_dev->fw_version.major = header->fw_version_major;
+	pvr_dev->fw_version.minor = header->fw_version_minor;
+
+	*header_out = header;
+	*layout_entries_out = layout_entries;
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static void
+layout_get_sizes(const struct pvr_fw_layout_entry *layout_entries,
+		 u32 num_layout_entries, u32 *code_alloc_size,
+		 u32 *data_alloc_size, u32 *core_code_alloc_size,
+		 u32 *core_data_alloc_size)
+{
+	u32 entry;
+
+	*code_alloc_size = 0;
+	*data_alloc_size = 0;
+	*core_code_alloc_size = 0;
+	*core_data_alloc_size = 0;
+
+	/* Extract section sizes from FW layout table. */
+	for (entry = 0; entry < num_layout_entries; entry++) {
+		switch (layout_entries[entry].type) {
+		case FW_CODE:
+			(*code_alloc_size) += layout_entries[entry].alloc_size;
+			break;
+		case FW_DATA:
+			(*data_alloc_size) += layout_entries[entry].alloc_size;
+			break;
+		case FW_COREMEM_CODE:
+			(*core_code_alloc_size) +=
+				layout_entries[entry].alloc_size;
+			break;
+		case FW_COREMEM_DATA:
+			(*core_data_alloc_size) +=
+				layout_entries[entry].alloc_size;
+			break;
+		case NONE:
+			break;
+		}
+	}
+}
+
+int
+pvr_fw_find_mmu_segment(u32 addr, u32 size, const struct pvr_fw_layout_entry *layout_entries,
+			u32 num_layout_entries, void *fw_code_ptr, void *fw_data_ptr,
+			void *fw_core_code_ptr, void *fw_core_data_ptr,
+			void **host_addr_out)
+{
+	u32 end_addr = addr + size;
+	int entry = 0;
+	int err;
+
+	/* Ensure requested range is not zero, and size is not causing addr to overflow. */
+	if (end_addr <= addr) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	for (entry = 0; entry < num_layout_entries; entry++) {
+		u32 entry_start_addr = layout_entries[entry].base_addr;
+		u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size;
+
+		if (addr >= entry_start_addr && addr < entry_end_addr &&
+		    end_addr > entry_start_addr && end_addr <= entry_end_addr) {
+			switch (layout_entries[entry].type) {
+			case FW_CODE:
+				*host_addr_out = fw_code_ptr;
+				break;
+
+			case FW_DATA:
+				*host_addr_out = fw_data_ptr;
+				break;
+
+			case FW_COREMEM_CODE:
+				*host_addr_out = fw_core_code_ptr;
+				break;
+
+			case FW_COREMEM_DATA:
+				*host_addr_out = fw_core_data_ptr;
+				break;
+
+			default:
+				err = -EINVAL;
+				goto err_out;
+			}
+			/* Direct Mem write to mapped memory */
+			addr -= layout_entries[entry].base_addr;
+			addr += layout_entries[entry].alloc_offset;
+
+			/*
+			 * Add offset to pointer to FW allocation only if that
+			 * allocation is available
+			 */
+			*(u8 **)host_addr_out += addr;
+			return 0;
+		}
+	}
+
+	err = -EINVAL;
+
+err_out:
+	return err;
+}
+
+static int
+pvr_fw_create_fwif_connection_ctl(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	int err;
+
+	fw_dev->fwif_connection_ctl =
+		pvr_gem_create_and_map_fw_object_offset(pvr_dev,
+							fw_dev->fw_heap_info.config_offset +
+							PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET,
+							sizeof(*fw_dev->fwif_connection_ctl),
+							PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							DRM_PVR_BO_CREATE_ZEROED,
+							&fw_dev->mem.fwif_connection_ctl_obj);
+	if (IS_ERR(fw_dev->fwif_connection_ctl)) {
+		drm_err(drm_dev,
+			"Unable to allocate FWIF connection control memory\n");
+		err = PTR_ERR(fw_dev->fwif_connection_ctl);
+		goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static void
+pvr_fw_fini_fwif_connection_ctl(struct pvr_device *pvr_dev)
+{
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+	pvr_fw_object_vunmap(fw_dev->mem.fwif_connection_ctl_obj, false);
+	pvr_fw_object_release(fw_dev->mem.fwif_connection_ctl_obj);
+}
+
+static int
+pvr_fw_create_os_structures(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+	struct rogue_fwif_osinit *fwif_osinit;
+	int err;
+
+	fw_dev->fwif_osinit =
+		pvr_gem_create_and_map_fw_object_offset(pvr_dev,
+							fw_dev->fw_heap_info.config_offset +
+							PVR_ROGUE_FWIF_OSINIT_OFFSET,
+							sizeof(*fw_dev->fwif_osinit),
+							PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							DRM_PVR_BO_CREATE_ZEROED,
+							&fw_mem->osinit_obj);
+	if (IS_ERR(fw_dev->fwif_osinit)) {
+		drm_err(drm_dev, "Unable to allocate FW OSINIT structure\n");
+		err = PTR_ERR(fw_dev->fwif_osinit);
+		goto err_out;
+	}
+	fwif_osinit = fw_dev->fwif_osinit;
+
+	fw_dev->fwif_osdata = pvr_gem_create_and_map_fw_object(pvr_dev,
+							       sizeof(*fw_dev->fwif_osdata),
+							       PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							       DRM_PVR_BO_CREATE_ZEROED,
+							       &fw_mem->osdata_obj);
+	if (IS_ERR(fw_dev->fwif_osdata)) {
+		drm_err(drm_dev, "Unable to allocate FW OSDATA structure\n");
+		err = PTR_ERR(fw_dev->fwif_osdata);
+		goto err_release_osinit;
+	}
+
+	fw_dev->power_sync = pvr_gem_create_and_map_fw_object(pvr_dev, sizeof(*fw_dev->power_sync),
+							      PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							      DRM_PVR_BO_CREATE_ZEROED,
+							      &fw_mem->power_sync_obj);
+	if (IS_ERR(fw_dev->power_sync)) {
+		drm_err(drm_dev, "Unable to allocate FW power_sync structure\n");
+		err = PTR_ERR(fw_dev->power_sync);
+		goto err_release_osdata;
+	}
+
+	fw_dev->hwrinfobuf = pvr_gem_create_and_map_fw_object(pvr_dev, sizeof(*fw_dev->hwrinfobuf),
+						      PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+						      DRM_PVR_BO_CREATE_ZEROED,
+						      &fw_mem->hwrinfobuf_obj);
+	if (IS_ERR(fw_dev->hwrinfobuf)) {
+		drm_err(drm_dev,
+			"Unable to allocate FW hwrinfobuf structure\n");
+		err = PTR_ERR(fw_dev->hwrinfobuf);
+		goto err_release_power_sync;
+	}
+
+	err = pvr_gem_create_fw_object(pvr_dev, PVR_SYNC_OBJ_SIZE,
+				       PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+				       DRM_PVR_BO_CREATE_ZEROED,
+				       &fw_mem->mmucache_sync_obj);
+	if (err) {
+		drm_err(drm_dev,
+			"Unable to allocate MMU cache sync object\n");
+		goto err_release_hwrinfobuf;
+	}
+
+	pvr_gem_get_fw_addr(fw_mem->power_sync_obj, &fw_dev->fwif_osdata->power_sync_fw_addr);
+
+	fwif_osinit->kernel_ccbctl_fw_addr = pvr_dev->kccb.ctrl_fw_addr;
+	fwif_osinit->kernel_ccb_fw_addr = pvr_dev->kccb.ccb_fw_addr;
+	pvr_gem_get_fw_addr(pvr_dev->kccb_rtn_obj, &fwif_osinit->kernel_ccb_rtn_slots_fw_addr);
+
+	fwif_osinit->firmware_ccbctl_fw_addr = pvr_dev->fwccb.ctrl_fw_addr;
+	fwif_osinit->firmware_ccb_fw_addr = pvr_dev->fwccb.ccb_fw_addr;
+
+	fwif_osinit->work_est_firmware_ccbctl_fw_addr = 0;
+	fwif_osinit->work_est_firmware_ccb_fw_addr = 0;
+
+	pvr_gem_get_fw_addr(fw_mem->hwrinfobuf_obj,
+			    &fwif_osinit->rogue_fwif_hwr_info_buf_ctl_fw_addr);
+	pvr_gem_get_fw_addr(fw_mem->osdata_obj, &fwif_osinit->fw_os_data_fw_addr);
+
+	fwif_osinit->hwr_debug_dump_limit = 0;
+
+	ROGUE_FWIF_COMPCHECKS_BVNC_INIT(fwif_osinit->rogue_comp_checks.hw_bvnc);
+	ROGUE_FWIF_COMPCHECKS_BVNC_INIT(fwif_osinit->rogue_comp_checks.fw_bvnc);
+
+	return 0;
+
+err_release_hwrinfobuf:
+	pvr_fw_object_vunmap(fw_mem->hwrinfobuf_obj, false);
+	pvr_fw_object_release(fw_mem->hwrinfobuf_obj);
+
+err_release_power_sync:
+	pvr_fw_object_vunmap(fw_mem->power_sync_obj, false);
+	pvr_fw_object_release(fw_mem->power_sync_obj);
+
+err_release_osdata:
+	pvr_fw_object_vunmap(fw_mem->osdata_obj, false);
+	pvr_fw_object_release(fw_mem->osdata_obj);
+
+err_release_osinit:
+	pvr_fw_object_vunmap(fw_mem->osinit_obj, false);
+	pvr_fw_object_release(fw_mem->osinit_obj);
+
+err_out:
+	return err;
+}
+
+static void
+pvr_fw_destroy_os_structures(struct pvr_device *pvr_dev)
+{
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+
+	pvr_fw_object_release(fw_mem->mmucache_sync_obj);
+	pvr_fw_object_vunmap(fw_mem->hwrinfobuf_obj, false);
+	pvr_fw_object_release(fw_mem->hwrinfobuf_obj);
+	pvr_fw_object_vunmap(fw_mem->power_sync_obj, false);
+	pvr_fw_object_release(fw_mem->power_sync_obj);
+	pvr_fw_object_vunmap(fw_mem->osdata_obj, false);
+	pvr_fw_object_release(fw_mem->osdata_obj);
+	pvr_fw_object_vunmap(fw_mem->osinit_obj, false);
+	pvr_fw_object_release(fw_mem->osinit_obj);
+}
+
+static int
+pvr_fw_create_dev_structures(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct rogue_fwif_gpu_util_fwcb *gpu_util_fwcb;
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+	struct rogue_fwif_runtime_cfg *runtime_cfg;
+	struct rogue_fwif_sysinit *fwif_sysinit;
+	u32 clock_speed_hz;
+	u32 *fault_page;
+	dma_addr_t fault_dma_addr;
+	int i;
+	int err;
+
+	fw_dev->fwif_sysinit =
+		pvr_gem_create_and_map_fw_object_offset(pvr_dev,
+							fw_dev->fw_heap_info.config_offset +
+							PVR_ROGUE_FWIF_SYSINIT_OFFSET,
+							sizeof(*fw_dev->fwif_sysinit),
+							PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							DRM_PVR_BO_CREATE_ZEROED,
+							&fw_mem->sysinit_obj);
+	if (IS_ERR(fw_dev->fwif_sysinit)) {
+		drm_err(drm_dev, "Unable to allocate FW SYSINIT structure\n");
+		err = PTR_ERR(fw_dev->fwif_sysinit);
+		goto err_out;
+	}
+	fwif_sysinit = fw_dev->fwif_sysinit;
+
+	fw_dev->fwif_sysdata = pvr_gem_create_and_map_fw_object(pvr_dev,
+								sizeof(*fw_dev->fwif_sysdata),
+								PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+								DRM_PVR_BO_CREATE_ZEROED,
+								&fw_mem->sysdata_obj);
+	if (IS_ERR(fw_dev->fwif_sysdata)) {
+		drm_err(drm_dev, "Unable to allocate FW SYSDATA structure\n");
+		err = PTR_ERR(fw_dev->fwif_sysdata);
+		goto err_release_sysinit;
+	}
+	fw_dev->fwif_sysdata->config_flags = 0;
+	fw_dev->fwif_sysdata->config_flags_ext = 0;
+
+	fault_page = pvr_gem_create_and_map_fw_object(pvr_dev, PVR_ROGUE_FAULT_PAGE_SIZE,
+						      PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
+						      &fw_mem->fault_page_obj);
+	if (IS_ERR(fault_page)) {
+		drm_err(drm_dev, "Unable to allocate FW fault page\n");
+		err = PTR_ERR(fault_page);
+		goto err_release_sysdata;
+	}
+	for (i = 0; i < PVR_ROGUE_FAULT_PAGE_SIZE / sizeof(*fault_page); i++)
+		fault_page[i] = 0xdeadbee0;
+	pvr_fw_object_vunmap(fw_mem->fault_page_obj, false);
+
+	gpu_util_fwcb = pvr_gem_create_and_map_fw_object(pvr_dev, sizeof(*gpu_util_fwcb),
+							 PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							 DRM_PVR_BO_CREATE_ZEROED,
+							 &fw_mem->gpu_util_fwcb_obj);
+	if (IS_ERR(gpu_util_fwcb)) {
+		drm_err(drm_dev, "Unable to allocate GPU util FWCB\n");
+		err = PTR_ERR(gpu_util_fwcb);
+		goto err_release_fault_page;
+	}
+	/* TODO : add timestamp. */
+	gpu_util_fwcb->last_word = PVR_FWIF_GPU_UTIL_STATE_IDLE;
+	pvr_fw_object_vunmap(fw_mem->gpu_util_fwcb_obj, false);
+
+	err = pvr_device_clk_core_get_freq(pvr_dev, &clock_speed_hz);
+	if (err) {
+		drm_err(drm_dev, "Unable to determine core clock frequency\n");
+		goto err_release_gpu_util_fwcb;
+	}
+
+	runtime_cfg = pvr_gem_create_and_map_fw_object(pvr_dev, sizeof(*runtime_cfg),
+						       PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+						       DRM_PVR_BO_CREATE_ZEROED,
+						       &fw_mem->runtime_cfg_obj);
+	if (IS_ERR(runtime_cfg)) {
+		drm_err(drm_dev, "Unable to allocate FW runtime config\n");
+		err = PTR_ERR(runtime_cfg);
+		goto err_release_gpu_util_fwcb;
+	}
+	runtime_cfg->core_clock_speed = clock_speed_hz;
+	runtime_cfg->active_pm_latency_ms = 0;
+	runtime_cfg->active_pm_latency_persistant = true;
+	WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters,
+				  &runtime_cfg->default_dusts_num_init) != 0);
+	pvr_fw_object_vunmap(fw_mem->runtime_cfg_obj, false);
+
+	err = pvr_fw_trace_init(pvr_dev);
+	if (err)
+		goto err_release_runtime_cfg;
+
+	err = pvr_fw_get_dma_addr(fw_mem->fault_page_obj, 0, &fault_dma_addr);
+	if (err) {
+		drm_err(drm_dev,
+			"Unable to get FW fault page physical address\n");
+		goto err_trace_fini;
+	}
+	fwif_sysinit->fault_phys_addr = (u64)fault_dma_addr;
+
+	fwif_sysinit->pds_exec_base = ROGUE_PDSCODEDATA_HEAP_BASE;
+	fwif_sysinit->usc_exec_base = ROGUE_USCCODE_HEAP_BASE;
+
+	pvr_gem_get_fw_addr(fw_mem->runtime_cfg_obj, &fwif_sysinit->runtime_cfg_fw_addr);
+	pvr_gem_get_fw_addr(fw_dev->fw_trace.tracebuf_ctrl_obj,
+			    &fwif_sysinit->trace_buf_ctl_fw_addr);
+	pvr_gem_get_fw_addr(fw_mem->sysdata_obj, &fwif_sysinit->fw_sys_data_fw_addr);
+	pvr_gem_get_fw_addr(fw_mem->gpu_util_fwcb_obj, &fwif_sysinit->gpu_util_fw_cb_ctl_fw_addr);
+	if (fw_mem->core_data_obj) {
+		pvr_gem_get_fw_addr(fw_mem->core_data_obj,
+				    &fwif_sysinit->coremem_data_store.fw_addr);
+	}
+
+	/* Currently unsupported. */
+	fwif_sysinit->counter_dump_ctl.buffer_fw_addr = 0;
+	fwif_sysinit->counter_dump_ctl.size_in_dwords = 0;
+
+	/* Skip alignment checks. */
+	fwif_sysinit->align_checks = 0;
+
+	fwif_sysinit->filter_flags = 0;
+	fwif_sysinit->hw_perf_filter = 0;
+	fwif_sysinit->firmware_perf = FW_PERF_CONF_NONE;
+	fwif_sysinit->initial_core_clock_speed = clock_speed_hz;
+	fwif_sysinit->active_pm_latency_ms = 0;
+	fwif_sysinit->gpio_validation_mode = ROGUE_FWIF_GPIO_VAL_OFF;
+	fwif_sysinit->firmware_started = false;
+	fwif_sysinit->marker_val = 1;
+
+	memset(&fwif_sysinit->bvnc_km_feature_flags, 0,
+	       sizeof(fwif_sysinit->bvnc_km_feature_flags));
+
+	return 0;
+
+err_trace_fini:
+	pvr_fw_trace_fini(pvr_dev);
+
+err_release_runtime_cfg:
+	pvr_fw_object_release(fw_mem->runtime_cfg_obj);
+
+err_release_gpu_util_fwcb:
+	pvr_fw_object_release(fw_mem->gpu_util_fwcb_obj);
+
+err_release_fault_page:
+	pvr_fw_object_release(fw_mem->fault_page_obj);
+
+err_release_sysdata:
+	pvr_fw_object_vunmap(fw_mem->sysdata_obj, false);
+	pvr_fw_object_release(fw_mem->sysdata_obj);
+
+err_release_sysinit:
+	pvr_fw_object_vunmap(fw_mem->sysinit_obj, false);
+	pvr_fw_object_release(fw_mem->sysinit_obj);
+
+err_out:
+	return err;
+}
+
+static void
+pvr_fw_destroy_dev_structures(struct pvr_device *pvr_dev)
+{
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	struct pvr_fw_mem *fw_mem = &fw_dev->mem;
+
+	pvr_fw_trace_fini(pvr_dev);
+	pvr_fw_object_release(fw_mem->runtime_cfg_obj);
+	pvr_fw_object_release(fw_mem->gpu_util_fwcb_obj);
+	pvr_fw_object_release(fw_mem->fault_page_obj);
+	pvr_fw_object_vunmap(fw_mem->sysdata_obj, false);
+	pvr_fw_object_release(fw_mem->sysdata_obj);
+	pvr_fw_object_vunmap(fw_mem->sysinit_obj, false);
+	pvr_fw_object_release(fw_mem->sysinit_obj);
+}
+
+/**
+ * pvr_fw_process() - Process firmware image, allocate FW memory and create boot
+ *                    arguments
+ * @pvr_dev: Device pointer.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_gem_create_and_map_fw_object_offset(), or
+ *  * Any error returned by pvr_gem_create_and_map_fw_object().
+ */
+static int
+pvr_fw_process(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+	const u8 *fw = pvr_dev->fw_dev.firmware->data;
+	const struct pvr_fw_info_header *header;
+	const struct pvr_fw_layout_entry *layout_entries;
+	const struct pvr_fw_layout_entry *private_data;
+	u32 code_alloc_size;
+	u32 data_alloc_size;
+	u32 core_code_alloc_size;
+	u32 core_data_alloc_size;
+	u8 *fw_code_ptr;
+	u8 *fw_data_ptr;
+	u8 *fw_core_code_ptr;
+	u8 *fw_core_data_ptr;
+	int err;
+
+	err = pvr_fw_validate(pvr_dev, &header, &layout_entries);
+	if (err)
+		goto err_out;
+
+	layout_get_sizes(layout_entries, header->layout_entry_num,
+			 &code_alloc_size, &data_alloc_size,
+			 &core_code_alloc_size, &core_data_alloc_size);
+
+	private_data = pvr_fw_find_private_data(layout_entries, header->layout_entry_num);
+	if (!private_data) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	/* Allocate and map memory for firmware sections. */
+
+	/*
+	 * Code allocation must be at the start of the firmware heap, otherwise
+	 * firmware processor will be unable to boot.
+	 *
+	 * This has the useful side-effect that for every other object in the
+	 * driver, a firmware address of 0 is invalid.
+	 */
+	fw_code_ptr = pvr_gem_create_and_map_fw_object_offset(pvr_dev, 0, code_alloc_size,
+							      PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							      DRM_PVR_BO_CREATE_ZEROED,
+							      &fw_mem->code_obj);
+	if (IS_ERR(fw_code_ptr)) {
+		drm_err(drm_dev, "Unable to allocate FW code memory\n");
+		err = PTR_ERR(fw_code_ptr);
+		goto err_out;
+	}
+
+	if (pvr_dev->fw_dev.funcs->has_fixed_data_addr()) {
+		u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask;
+
+		fw_data_ptr =
+			pvr_gem_create_and_map_fw_object_offset(pvr_dev, base_addr,
+								data_alloc_size,
+								PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+								DRM_PVR_BO_CREATE_ZEROED,
+								&fw_mem->data_obj);
+	} else {
+		fw_data_ptr = pvr_gem_create_and_map_fw_object(pvr_dev, data_alloc_size,
+							       PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							       DRM_PVR_BO_CREATE_ZEROED,
+							       &fw_mem->data_obj);
+	}
+	if (IS_ERR(fw_data_ptr)) {
+		drm_err(drm_dev, "Unable to allocate FW data memory\n");
+		err = PTR_ERR(fw_data_ptr);
+		goto err_free_fw_code_obj;
+	}
+
+	/* Core code and data sections are optional. */
+	if (core_code_alloc_size) {
+		fw_core_code_ptr =
+			pvr_gem_create_and_map_fw_object(pvr_dev, core_code_alloc_size,
+							 PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							 DRM_PVR_BO_CREATE_ZEROED,
+							 &fw_mem->core_code_obj);
+		if (IS_ERR(fw_core_code_ptr)) {
+			drm_err(drm_dev,
+				"Unable to allocate FW core code memory\n");
+			err = PTR_ERR(fw_core_code_ptr);
+			goto err_free_fw_data_obj;
+		}
+	} else {
+		fw_core_code_ptr = NULL;
+	}
+
+	if (core_data_alloc_size) {
+		fw_core_data_ptr =
+			pvr_gem_create_and_map_fw_object(pvr_dev, core_data_alloc_size,
+							 PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							 DRM_PVR_BO_CREATE_ZEROED,
+							 &fw_mem->core_data_obj);
+		if (IS_ERR(fw_core_data_ptr)) {
+			drm_err(drm_dev,
+				"Unable to allocate FW core data memory\n");
+			err = PTR_ERR(fw_core_data_ptr);
+			goto err_free_fw_core_code_obj;
+		}
+	} else {
+		fw_core_data_ptr = NULL;
+	}
+
+	err = pvr_dev->fw_dev.funcs->fw_process(pvr_dev, fw, layout_entries,
+						header->layout_entry_num,
+						fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
+						fw_core_data_ptr, core_code_alloc_size);
+
+	if (err)
+		goto err_free_fw_core_data_obj;
+
+	/* We're finished with the firmware section memory on the CPU, unmap. */
+	if (fw_core_data_ptr)
+		pvr_fw_object_vunmap(fw_mem->core_data_obj, false);
+	if (fw_core_code_ptr)
+		pvr_fw_object_vunmap(fw_mem->core_code_obj, false);
+	pvr_fw_object_vunmap(fw_mem->data_obj, false);
+	fw_data_ptr = NULL;
+	pvr_fw_object_vunmap(fw_mem->code_obj, false);
+	fw_code_ptr = NULL;
+
+	err = pvr_fw_create_fwif_connection_ctl(pvr_dev);
+	if (err)
+		goto err_free_fw_core_data_obj;
+
+	return 0;
+
+err_free_fw_core_data_obj:
+	if (fw_core_data_ptr) {
+		pvr_fw_object_vunmap(fw_mem->core_data_obj, false);
+		pvr_fw_object_release(fw_mem->core_data_obj);
+	}
+
+err_free_fw_core_code_obj:
+	if (fw_core_code_ptr) {
+		pvr_fw_object_vunmap(fw_mem->core_code_obj, false);
+		pvr_fw_object_release(fw_mem->core_code_obj);
+	}
+
+err_free_fw_data_obj:
+	if (fw_data_ptr)
+		pvr_fw_object_vunmap(fw_mem->data_obj, false);
+	pvr_fw_object_release(fw_mem->data_obj);
+
+err_free_fw_code_obj:
+	if (fw_code_ptr)
+		pvr_fw_object_vunmap(fw_mem->code_obj, false);
+	pvr_fw_object_release(fw_mem->code_obj);
+
+err_out:
+	return err;
+}
+
+static void
+pvr_fw_cleanup(struct pvr_device *pvr_dev)
+{
+	struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem;
+
+	pvr_fw_fini_fwif_connection_ctl(pvr_dev);
+
+	if (fw_mem->core_code_obj)
+		pvr_fw_object_release(fw_mem->core_code_obj);
+	if (fw_mem->core_data_obj)
+		pvr_fw_object_release(fw_mem->core_data_obj);
+	pvr_fw_object_release(fw_mem->code_obj);
+	pvr_fw_object_release(fw_mem->data_obj);
+}
+
+/**
+ * pvr_wait_for_fw_boot() - Wait for firmware to finish booting
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * -%ETIMEDOUT if firmware fails to boot within timeout.
+ */
+int
+pvr_wait_for_fw_boot(struct pvr_device *pvr_dev)
+{
+	ktime_t deadline = ktime_add_us(ktime_get(), FW_BOOT_TIMEOUT_USEC);
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+	while (ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) {
+		if (READ_ONCE(fw_dev->fwif_sysinit->firmware_started))
+			return 0;
+	}
+
+	return -ETIMEDOUT;
+}
+
+/*
+ * pvr_fw_heap_info_init() - Calculate size and masks for FW heap
+ * @pvr_dev: Target PowerVR device.
+ * @log2_size: Log2 of raw heap size.
+ * @reserved_size: Size of reserved area of heap, in bytes. May be zero.
+ */
+void
+pvr_fw_heap_info_init(struct pvr_device *pvr_dev, u32 log2_size, u32 reserved_size)
+{
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+	fw_dev->fw_heap_info.gpu_addr = PVR_ROGUE_FW_MAIN_HEAP_BASE;
+	fw_dev->fw_heap_info.log2_size = log2_size;
+	fw_dev->fw_heap_info.reserved_size = reserved_size;
+	fw_dev->fw_heap_info.raw_size = 1 << fw_dev->fw_heap_info.log2_size;
+	fw_dev->fw_heap_info.offset_mask = fw_dev->fw_heap_info.raw_size - 1;
+	fw_dev->fw_heap_info.config_offset = fw_dev->fw_heap_info.raw_size -
+					     PVR_ROGUE_FW_CONFIG_HEAP_SIZE;
+	fw_dev->fw_heap_info.size = fw_dev->fw_heap_info.raw_size -
+				    (PVR_ROGUE_FW_CONFIG_HEAP_SIZE + reserved_size);
+}
+
+/**
+ * pvr_fw_init() - Initialise and boot firmware
+ * @pvr_dev: Target PowerVR device
+ *
+ * On successful completion of the function the PowerVR device will be
+ * initialised and ready to use.
+ *
+ * Returns:
+ *  * 0 on success,
+ *  * -%EINVAL on invalid firmware image,
+ *  * -%ENOMEM on out of memory, or
+ *  * -%ETIMEDOUT if firmware processor fails to boot or on register poll timeout.
+ */
+int
+pvr_fw_init(struct pvr_device *pvr_dev)
+{
+	u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT;
+	u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb_rtn);
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	int err;
+
+	if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_META) {
+		fw_dev->funcs = &pvr_fw_funcs_meta;
+	} else if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_MIPS) {
+		fw_dev->funcs = &pvr_fw_funcs_mips;
+	} else {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	err = fw_dev->funcs->init(pvr_dev);
+	if (err)
+		goto err_out;
+
+	drm_mm_init(&fw_dev->fw_mm, ROGUE_FW_HEAP_BASE, fw_dev->fw_heap_info.raw_size);
+	fw_dev->fw_mm_base = ROGUE_FW_HEAP_BASE;
+	spin_lock_init(&fw_dev->fw_mm_lock);
+
+	err = pvr_fw_process(pvr_dev);
+	if (err)
+		goto err_mm_takedown;
+
+	/* Initialise KCCB and FWCCB. */
+	err = pvr_kccb_init(pvr_dev);
+	if (err)
+		goto err_fw_cleanup;
+
+	err = pvr_fwccb_init(pvr_dev);
+	if (err)
+		goto err_kccb_fini;
+
+	/* Allocate memory for KCCB return slots. */
+	pvr_dev->kccb_rtn = pvr_gem_create_and_map_fw_object(pvr_dev, kccb_rtn_size,
+							     PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							     DRM_PVR_BO_CREATE_ZEROED,
+							     &pvr_dev->kccb_rtn_obj);
+	if (IS_ERR(pvr_dev->kccb_rtn)) {
+		err = PTR_ERR(pvr_dev->kccb_rtn);
+		goto err_fwccb_fini;
+	}
+
+	err = pvr_fw_create_os_structures(pvr_dev);
+	if (err)
+		goto err_kccb_rtn_release;
+
+	err = pvr_fw_create_dev_structures(pvr_dev);
+	if (err)
+		goto err_destroy_os_structures;
+
+	pvr_power_lock(pvr_dev);
+
+	err = pvr_power_set_state(pvr_dev, PVR_POWER_STATE_ON);
+	if (err)
+		goto err_power_unlock;
+
+	fw_dev->booted = true;
+
+	pvr_power_unlock(pvr_dev);
+
+	return 0;
+
+err_power_unlock:
+	pvr_power_unlock(pvr_dev);
+
+	pvr_fw_destroy_dev_structures(pvr_dev);
+
+err_destroy_os_structures:
+	pvr_fw_destroy_os_structures(pvr_dev);
+
+err_kccb_rtn_release:
+	pvr_fw_object_vunmap(pvr_dev->kccb_rtn_obj, false);
+	pvr_fw_object_release(pvr_dev->kccb_rtn_obj);
+
+err_fwccb_fini:
+	pvr_ccb_fini(&pvr_dev->fwccb);
+
+err_kccb_fini:
+	pvr_ccb_fini(&pvr_dev->kccb);
+
+err_fw_cleanup:
+	pvr_fw_cleanup(pvr_dev);
+
+err_mm_takedown:
+	drm_mm_takedown(&fw_dev->fw_mm);
+
+	if (fw_dev->funcs->fini)
+		fw_dev->funcs->fini(pvr_dev);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_fw_fini() - Shutdown firmware processor and free associated memory
+ * @pvr_dev: Target PowerVR device
+ */
+void
+pvr_fw_fini(struct pvr_device *pvr_dev)
+{
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+	pvr_power_lock(pvr_dev);
+
+	fw_dev->booted = false;
+	pvr_power_set_state(pvr_dev, PVR_POWER_STATE_OFF);
+
+	pvr_power_unlock(pvr_dev);
+
+	pvr_fw_destroy_dev_structures(pvr_dev);
+	pvr_fw_destroy_os_structures(pvr_dev);
+	pvr_fw_object_vunmap(pvr_dev->kccb_rtn_obj, false);
+	pvr_fw_object_release(pvr_dev->kccb_rtn_obj);
+	/*
+	 * Ensure FWCCB worker has finished executing before destroying FWCCB. The IRQ handler has
+	 * been unregistered at this point so no new work should be being submitted.
+	 */
+	flush_work(&pvr_dev->fwccb_work);
+	pvr_ccb_fini(&pvr_dev->fwccb);
+	pvr_ccb_fini(&pvr_dev->kccb);
+	pvr_fw_cleanup(pvr_dev);
+
+	drm_mm_takedown(&fw_dev->fw_mm);
+
+	if (fw_dev->funcs->fini)
+		fw_dev->funcs->fini(pvr_dev);
+}
+
+/**
+ * pvr_fw_mts_schedule() - Schedule work via an MTS kick
+ * @pvr_dev: Target PowerVR device
+ * @val: Kick mask. Should be a combination of %ROGUE_CR_MTS_SCHEDULE_*
+ */
+void
+pvr_fw_mts_schedule(struct pvr_device *pvr_dev, u32 val)
+{
+	/* Ensure memory is flushed before kicking MTS. */
+	wmb();
+
+	PVR_CR_WRITE32(pvr_dev, MTS_SCHEDULE, val);
+
+	/* Ensure the MTS kick goes through before continuing. */
+	mb();
+}
+
+/**
+ * pvr_fw_mem_context_create() - Create firmware memory context
+ * @pvr_dev: Target PowerVR device.
+ * @vm_ctx: VM context to be associated with the firmware memory context.
+ * @fw_mem_ctx_obj_out: Pointer to location to store the address of the firmware memory context
+ *                      object.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_gem_create_and_map_fw_object().
+ */
+int pvr_fw_mem_context_create(struct pvr_device *pvr_dev, struct pvr_vm_context *vm_ctx,
+			      struct pvr_fw_object **fw_mem_ctx_obj_out)
+{
+	struct rogue_fwif_fwmemcontext *fw_mem_ctx;
+	struct pvr_fw_object *fw_mem_ctx_obj;
+	int err;
+
+	fw_mem_ctx = pvr_gem_create_and_map_fw_object(pvr_dev, sizeof(*fw_mem_ctx),
+						      PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+						      DRM_PVR_BO_CREATE_ZEROED,
+						      &fw_mem_ctx_obj);
+	if (IS_ERR(fw_mem_ctx)) {
+		err = PTR_ERR(fw_mem_ctx);
+		goto err_out;
+	}
+
+	fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx);
+	fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET;
+
+	pvr_fw_object_vunmap(fw_mem_ctx_obj, true);
+
+	*fw_mem_ctx_obj_out = fw_mem_ctx_obj;
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_fw_mem_context_destroy() - Destroy firmware memory context
+ * @fw_mem_ctx_obj: Pointer to firmware object representing memory context.
+ */
+void pvr_fw_mem_context_destroy(struct pvr_fw_object *fw_mem_ctx_obj)
+{
+	pvr_fw_object_release(fw_mem_ctx_obj);
+}
+
+/**
+ * pvr_fw_structure_cleanup() - Send FW cleanup request for an object
+ * @pvr_dev: Target PowerVR device.
+ * @type: Type of object to cleanup. Must be one of &enum rogue_fwif_cleanup_type.
+ * @fw_obj: Pointer to FW object containing object to cleanup.
+ * @offset: Offset within FW object of object to cleanup.
+ *
+ * Returns:
+ *  * 0 on success,
+ *  * -EBUSY if object is busy, or
+ *  * -ETIMEDOUT on timeout.
+ */
+int
+pvr_fw_structure_cleanup(struct pvr_device *pvr_dev, u32 type, struct pvr_fw_object *fw_obj,
+			 u32 offset)
+{
+	struct rogue_fwif_kccb_cmd cmd;
+	int slot_nr;
+	int err;
+	u32 rtn;
+
+	struct rogue_fwif_cleanup_request *cleanup_req = &cmd.cmd_data.cleanup_data;
+
+	cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_CLEANUP;
+	cmd.kccb_flags = 0;
+	cleanup_req->cleanup_type = type;
+
+	switch (type) {
+	case ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT:
+		pvr_gem_get_fw_addr_offset(fw_obj, offset,
+					   &cleanup_req->cleanup_data.context_fw_addr);
+		break;
+	case ROGUE_FWIF_CLEANUP_HWRTDATA:
+		pvr_gem_get_fw_addr_offset(fw_obj, offset,
+					   &cleanup_req->cleanup_data.hwrt_data_fw_addr);
+		break;
+	case ROGUE_FWIF_CLEANUP_FREELIST:
+		pvr_gem_get_fw_addr_offset(fw_obj, offset,
+					   &cleanup_req->cleanup_data.freelist_fw_addr);
+		break;
+	default:
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	err = pvr_kccb_send_cmd(pvr_dev, &cmd, &slot_nr);
+	if (err)
+		goto err_out;
+
+	err = pvr_kccb_wait_for_completion(pvr_dev, slot_nr, HZ, &rtn);
+	if (err)
+		goto err_out;
+
+	if (rtn & ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)
+		err = -EBUSY;
+
+err_out:
+	return err;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h
new file mode 100644
index 000000000000..aa7db6988287
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw.h
@@ -0,0 +1,345 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_FW_H__
+#define __PVR_FW_H__
+
+#include "pvr_fw_info.h"
+#include "pvr_fw_trace.h"
+
+#include <drm/drm_mm.h>
+
+#include <linux/types.h>
+
+/* Forward declarations from "pvr_device.h". */
+struct pvr_device;
+struct pvr_file;
+
+/* Forward declaration from "pvr_gem.h". */
+struct pvr_fw_object;
+
+/* Forward declaration from "pvr_vm.h". */
+struct pvr_vm_context;
+
+#define ROGUE_FWIF_FWCCB_NUMCMDS_LOG2 5
+
+#define ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT 7
+
+/**
+ * struct pvr_fw_funcs - FW processor function table
+ */
+struct pvr_fw_funcs {
+	/**
+	 * @init:
+	 *
+	 * FW processor specific initialisation.
+	 * @pvr_dev: Target PowerVR device.
+	 *
+	 * This function must call pvr_fw_heap_calculate() to initialise the firmware heap for this
+	 * FW processor.
+	 *
+	 * This function is mandatory.
+	 *
+	 * Returns:
+	 *  * 0 on success, or
+	 *  * Any appropriate error on failure.
+	 */
+	int (*init)(struct pvr_device *pvr_dev);
+
+	/**
+	 * @fini:
+	 *
+	 * FW processor specific finalisation.
+	 * @pvr_dev: Target PowerVR device.
+	 *
+	 * This function is optional.
+	 */
+	void (*fini)(struct pvr_device *pvr_dev);
+
+	/**
+	 * @fw_process:
+	 *
+	 * Load and process firmware image.
+	 * @pvr_dev: Target PowerVR device.
+	 * @fw: Pointer to firmware image.
+	 * @layout_entries: Layout of firmware memory.
+	 * @num_layout_entries: Number of entries in @layout_entries.
+	 * @fw_code_ptr: Pointer to firmware code section.
+	 * @fw_data_ptr: Pointer to firmware data section.
+	 * @fw_core_code_ptr: Pointer to firmware core code section. May be %NULL.
+	 * @fw_core_data_ptr: Pointer to firmware core data section. May be %NULL.
+	 * @core_code_alloc_size: Total allocation size of core code section.
+	 *
+	 * This function is mandatory.
+	 *
+	 * Returns:
+	 *  * 0 on success, or
+	 *  * Any appropriate error on failure.
+	 */
+	int (*fw_process)(struct pvr_device *pvr_dev, const u8 *fw,
+			  const struct pvr_fw_layout_entry *layout_entries, u32 num_layout_entries,
+			  u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr,
+			  u8 *fw_core_data_ptr, u32 core_code_alloc_size);
+
+	/**
+	 * @vm_map:
+	 *
+	 * Map FW object into FW processor address space.
+	 * @pvr_dev: Target PowerVR device.
+	 * @fw_obj: FW object to map.
+	 *
+	 * This function is mandatory.
+	 *
+	 * Returns:
+	 *  * 0 on success, or
+	 *  * Any appropriate error on failure.
+	 */
+	int (*vm_map)(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj);
+
+	/**
+	 * @vm_unmap:
+	 *
+	 * Unmap FW object from FW processor address space.
+	 * @pvr_dev: Target PowerVR device.
+	 * @fw_obj: FW object to map.
+	 *
+	 * This function is mandatory.
+	 */
+	void (*vm_unmap)(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj);
+
+	/**
+	 * @get_fw_addr_with_offset:
+	 *
+	 * Called to get address of object in firmware address space, with offset.
+	 * @fw_obj: Pointer to object.
+	 * @offset: Desired offset from start of object.
+	 *
+	 * This function is mandatory.
+	 *
+	 * Returns:
+	 *  * Address in firmware address space.
+	 */
+	u32 (*get_fw_addr_with_offset)(struct pvr_fw_object *fw_obj, u32 offset);
+
+	/**
+	 * @wrapper_init:
+	 *
+	 * Called to initialise FW wrapper.
+	 * @pvr_dev: Target PowerVR device.
+	 *
+	 * This function is mandatory.
+	 *
+	 * Returns:
+	 *  * 0 on success.
+	 *  * Any appropriate error on failure.
+	 */
+	int (*wrapper_init)(struct pvr_device *pvr_dev);
+
+	/**
+	 * @check_and_ack_irq:
+	 *
+	 * Called to check if a GPU interrupt has occurred, and to acknowledge if it has.
+	 * @pvr_dev: Target PowerVR device.
+	 *
+	 * This function is mandatory.
+	 *
+	 * Returns:
+	 *  * %true if an interrupt has occurred, or
+	 *  * %false if no interrupt has occurred.
+	 */
+	bool (*check_and_ack_irq)(struct pvr_device *pvr_dev);
+
+	/**
+	 * @has_fixed_data_addr:
+	 *
+	 * Called to check if firmware fixed data must be loaded at the address given by the
+	 * firmware layout table.
+	 *
+	 * This function is mandatory.
+	 *
+	 * Returns:
+	 *  * %true if firmware fixed data must be loaded at the address given by the firmware
+	 *    layout table.
+	 *  * %false otherwise.
+	 */
+	bool (*has_fixed_data_addr)(void);
+};
+
+/**
+ * struct pvr_fw_mem - FW memory allocations
+ */
+struct pvr_fw_mem {
+	/** @code_obj: Object representing firmware code. */
+	struct pvr_fw_object *code_obj;
+
+	/** @data_obj: Object representing firmware data. */
+	struct pvr_fw_object *data_obj;
+
+	/**
+	 * @core_code_obj: Object representing firmware core code. May be
+	 *                 %NULL if firmware does not contain this section.
+	 */
+	struct pvr_fw_object *core_code_obj;
+
+	/**
+	 * @core_data_obj: Object representing firmware core data. May be
+	 *                 %NULL if firmware does not contain this section.
+	 */
+	struct pvr_fw_object *core_data_obj;
+
+	/**
+	 * @fwif_connection_ctl_obj: Object representing FWIF connection control
+	 *                           structure.
+	 */
+	struct pvr_fw_object *fwif_connection_ctl_obj;
+
+	/** @osinit_obj: Object representing FW OSINIT structure. */
+	struct pvr_fw_object *osinit_obj;
+
+	/** @sysinit_obj: Object representing FW SYSINIT structure. */
+	struct pvr_fw_object *sysinit_obj;
+
+	/** @osdata_obj: Object representing FW OSDATA structure. */
+	struct pvr_fw_object *osdata_obj;
+
+	/** @hwrinfobuf_obj: Object representing FW hwrinfobuf structure. */
+	struct pvr_fw_object *hwrinfobuf_obj;
+
+	/** @sysdata_obj: Object representing FW SYSDATA structure. */
+	struct pvr_fw_object *sysdata_obj;
+
+	/** @power_sync_obj: Object representing power sync state. */
+	struct pvr_fw_object *power_sync_obj;
+
+	/** @fault_page_obj: Object representing FW fault page. */
+	struct pvr_fw_object *fault_page_obj;
+
+	/** @gpu_util_fwcb_obj: Object representing FW GPU utilisation control structure. */
+	struct pvr_fw_object *gpu_util_fwcb_obj;
+
+	/** @runtime_cfg_obj: Object representing FW runtime config structure. */
+	struct pvr_fw_object *runtime_cfg_obj;
+
+	/** @mmucache_sync_obj: Object used as the sync parameter in an MMU cache operation. */
+	struct pvr_fw_object *mmucache_sync_obj;
+};
+
+struct pvr_fw_device {
+	/** @firmware: Handle to the firmware loaded into the device. */
+	const struct firmware *firmware;
+
+	/** @mem: Structure containing objects representing firmware memory allocations. */
+	struct pvr_fw_mem mem;
+
+	/** @booted: %true if the firmware has been booted, %false otherwise. */
+	bool booted;
+
+	/**
+	 * @processor_type: FW processor type for this device. Must be one of
+	 *                  %PVR_FW_PROCESSOR_TYPE_*.
+	 */
+	u16 processor_type;
+
+	/** @funcs: Function table for the FW processor used by this device. */
+	const struct pvr_fw_funcs *funcs;
+
+	/** @processor_data: Pointer to data specific to FW processor. */
+	union {
+		/** @mips_data: Pointer to MIPS-specific data. */
+		struct pvr_fw_mips_data *mips_data;
+	} processor_data;
+
+	/** @fw_heap_info: Firmware heap information. */
+	struct {
+		/** @gpu_addr: Base address of firmware heap in GPU address space. */
+		u64 gpu_addr;
+
+		/** @size: Size of main area of heap. */
+		u32 size;
+
+		/** @offset_mask: Mask for offsets within FW heap. */
+		u32 offset_mask;
+
+		/** @raw_size: Raw size of heap, including reserved areas. */
+		u32 raw_size;
+
+		/** @log2_size: Log2 of raw size of heap. */
+		u32 log2_size;
+
+		/** @config_offset: Offset of config area within heap. */
+		u32 config_offset;
+
+		/** @reserved_size: Size of reserved area in heap. */
+		u32 reserved_size;
+	} fw_heap_info;
+
+	/** @fw_mm: Firmware address space allocator. */
+	struct drm_mm fw_mm;
+
+	/** @fw_mm_lock: Lock protecting access to &fw_mm. */
+	spinlock_t fw_mm_lock;
+
+	/** @fw_mm_base: Base address of address space managed by @fw_mm. */
+	u64 fw_mm_base;
+
+	/**
+	 * @fwif_connection_ctl: Pointer to CPU mapping of FWIF connection
+	 *                       control structure.
+	 */
+	struct rogue_fwif_connection_ctl *fwif_connection_ctl;
+
+	/** @fwif_sysinit: Pointer to CPU mapping of FW SYSINIT structure. */
+	struct rogue_fwif_sysinit *fwif_sysinit;
+
+	/** @fwif_sysdata: Pointer to CPU mapping of FW SYSDATA structure. */
+	struct rogue_fwif_sysdata *fwif_sysdata;
+
+	/** @fwif_osinit: Pointer to CPU mapping of FW OSINIT structure. */
+	struct rogue_fwif_osinit *fwif_osinit;
+
+	/** @fwif_osdata: Pointer to CPU mapping of FW OSDATA structure. */
+	struct rogue_fwif_osdata *fwif_osdata;
+
+	/** @power_sync: Pointer to CPU mapping of power sync state. */
+	u32 *power_sync;
+
+	/** @hwrinfobuf: Pointer to CPU mapping of FW HWR info buffer. */
+	struct rogue_fwif_hwrinfobuf *hwrinfobuf;
+
+	/** @fw_trace: Device firmware trace buffer state. */
+	struct pvr_fw_trace fw_trace;
+};
+
+extern const struct pvr_fw_funcs pvr_fw_funcs_meta;
+extern const struct pvr_fw_funcs pvr_fw_funcs_mips;
+
+int pvr_fw_init(struct pvr_device *pvr_dev);
+void pvr_fw_fini(struct pvr_device *pvr_dev);
+
+int pvr_wait_for_fw_boot(struct pvr_device *pvr_dev);
+
+void pvr_fw_mts_schedule(struct pvr_device *pvr_dev, u32 val);
+
+int
+pvr_fw_mem_context_create(struct pvr_device *pvr_dev, struct pvr_vm_context *vm_ctx,
+			  struct pvr_fw_object **fw_mem_ctx_obj_out);
+void
+pvr_fw_mem_context_destroy(struct pvr_fw_object *fw_mem_ctx_obj);
+
+void
+pvr_fw_heap_info_init(struct pvr_device *pvr_dev, u32 log2_size, u32 reserved_size);
+
+const struct pvr_fw_layout_entry *
+pvr_fw_find_layout_entry(const struct pvr_fw_layout_entry *layout_entries, u32 num_layout_entries,
+			 enum pvr_fw_section_id id);
+int
+pvr_fw_find_mmu_segment(u32 addr, u32 size, const struct pvr_fw_layout_entry *layout_entries,
+			u32 num_layout_entries, void *fw_code_ptr, void *fw_data_ptr,
+			void *fw_core_code_ptr, void *fw_core_data_ptr,
+			void **host_addr_out);
+
+int
+pvr_fw_structure_cleanup(struct pvr_device *pvr_dev, u32 type, struct pvr_fw_object *fw_obj,
+			 u32 offset);
+
+#endif /* __PVR_FW_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_info.h b/drivers/gpu/drm/imagination/pvr_fw_info.h
new file mode 100644
index 000000000000..17321fb70106
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_info.h
@@ -0,0 +1,115 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_FW_INFO_H__
+#define __PVR_FW_INFO_H__
+
+#include <linux/bits.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+/*
+ * Firmware binary block unit in bytes.
+ * Raw data stored in FW binary will be aligned to this size.
+ */
+#define FW_BLOCK_SIZE SZ_4K
+
+/* Maximum number of entries in firmware layout table. */
+#define PVR_FW_INFO_MAX_NUM_ENTRIES 8
+
+enum pvr_fw_section_id {
+	META_CODE = 0,
+	META_PRIVATE_DATA,
+	META_COREMEM_CODE,
+	META_COREMEM_DATA,
+	MIPS_CODE,
+	MIPS_EXCEPTIONS_CODE,
+	MIPS_BOOT_CODE,
+	MIPS_PRIVATE_DATA,
+	MIPS_BOOT_DATA,
+	MIPS_STACK,
+	RISCV_UNCACHED_CODE,
+	RISCV_CACHED_CODE,
+	RISCV_PRIVATE_DATA,
+	RISCV_COREMEM_CODE,
+	RISCV_COREMEM_DATA,
+};
+
+enum pvr_fw_section_type {
+	NONE = 0,
+	FW_CODE,
+	FW_DATA,
+	FW_COREMEM_CODE,
+	FW_COREMEM_DATA,
+};
+
+/*
+ * FW binary format with FW info attached:
+ *
+ *          Contents        Offset
+ *     +-----------------+
+ *     |                 |    0
+ *     |                 |
+ *     | Original binary |
+ *     |      file       |
+ *     |   (.ldr/.elf)   |
+ *     |                 |
+ *     |                 |
+ *     +-----------------+
+ *     | FW info header  |  FILE_SIZE - 4K
+ *     +-----------------+
+ *     |                 |
+ *     | FW layout table |
+ *     |                 |
+ *     +-----------------+
+ *                          FILE_SIZE
+ */
+
+#define PVR_FW_INFO_VERSION 2
+
+#define PVR_FW_FLAGS_OPEN_SOURCE BIT(0)
+
+/** struct pvr_fw_info_header - Firmware header */
+struct pvr_fw_info_header {
+	/** @info_version: FW info header version. */
+	u32 info_version;
+	/** @header_len: Header length. */
+	u32 header_len;
+	/** @layout_entry_num: Number of entries in the layout table. */
+	u32 layout_entry_num;
+	/** @layout_entry_size: Size of an entry in the layout table. */
+	u32 layout_entry_size;
+	/** @bvnc: GPU ID supported by firmware. */
+	aligned_u64 bvnc;
+	/** @fw_page_size: Page size of processor on which firmware executes. */
+	u32 fw_page_size;
+	/** @flags: Compatibility flags. */
+	u32 flags;
+	/** @fw_version_major: Firmware major version number. */
+	u16 fw_version_major;
+	/** @fw_version_minor: Firmware minor version number. */
+	u16 fw_version_minor;
+	/** @fw_version_build: Firmware build number. */
+	u32 fw_version_build;
+};
+
+/**
+ * struct pvr_fw_layout_entry - Entry in firmware layout table, describing a
+ *                              section of the firmware image
+ */
+struct pvr_fw_layout_entry {
+	/** @id: Section ID. */
+	enum pvr_fw_section_id id;
+	/** @type: Section type. */
+	enum pvr_fw_section_type type;
+	/** @base_addr: Base address of section in FW address space. */
+	u32 base_addr;
+	/** @max_size: Maximum size of section, in bytes. */
+	u32 max_size;
+	/** @alloc_size: Allocation size of section, in bytes. */
+	u32 alloc_size;
+	/** @alloc_offset: Allocation offset of section. */
+	u32 alloc_offset;
+};
+
+#endif /* __PVR_FW_INFO_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
new file mode 100644
index 000000000000..f7fed85b04aa
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -0,0 +1,598 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_info.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_rogue_meta.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define ROGUE_FW_HEAP_META_SHIFT 25 /* 32 MB */
+
+#define POLL_TIMEOUT_USEC 1000000
+
+/**
+ * pvr_meta_cr_read32() - Read a META register via the Slave Port
+ * @pvr_dev: Device pointer.
+ * @reg_addr: Address of register to read.
+ * @reg_value_out: Pointer to location to store register value.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_cr_poll_reg32().
+ */
+int
+pvr_meta_cr_read32(struct pvr_device *pvr_dev, u32 reg_addr, u32 *reg_value_out)
+{
+	int err;
+
+	/* Wait for Slave Port to be Ready. */
+	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL1,
+				ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
+					ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+				ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
+					ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+				POLL_TIMEOUT_USEC);
+	if (err)
+		goto err_out;
+
+	/* Issue a Read. */
+	PVR_CR_WRITE32(pvr_dev, META_SP_MSLVCTRL0,
+		       reg_addr | ROGUE_CR_META_SP_MSLVCTRL0_RD_EN);
+	(void)PVR_CR_READ32(pvr_dev, META_SP_MSLVCTRL0); /* Fence write. */
+
+	/* Wait for Slave Port to be Ready. */
+	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL1,
+				ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
+					ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+				ROGUE_CR_META_SP_MSLVCTRL1_READY_EN |
+					ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+				POLL_TIMEOUT_USEC);
+	if (err)
+		goto err_out;
+
+	*reg_value_out = PVR_CR_READ32(pvr_dev, META_SP_MSLVDATAX);
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static int
+pvr_meta_wrapper_init(struct pvr_device *pvr_dev)
+{
+	u64 garten_config;
+
+	/* Configure META to Master boot. */
+	PVR_CR_WRITE64(pvr_dev, META_BOOT, ROGUE_CR_META_BOOT_MODE_EN);
+
+	/* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address. */
+
+	/* Garten IDLE bit controlled by META. */
+	garten_config = ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META;
+
+	/* The fence addr is set during the fw init sequence. */
+
+	/* Set PC = 0 for fences. */
+	garten_config &=
+		ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK;
+	garten_config |=
+		(u64)MMU_CONTEXT_MAPPING_FWPRIV
+		<< ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT;
+
+	/* Set SLC DM=META. */
+	garten_config |= ((u64)ROGUE_FW_SEGMMU_META_BIFDM_ID)
+			 << ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT;
+
+	PVR_CR_WRITE64(pvr_dev, MTS_GARTEN_WRAPPER_CONFIG, garten_config);
+
+	return 0;
+}
+
+static __always_inline void
+add_boot_arg(u32 **boot_conf, u32 param, u32 data)
+{
+	*(*boot_conf)++ = param;
+	*(*boot_conf)++ = data;
+}
+
+static int
+meta_ldr_cmd_loadmem(struct drm_device *drm_dev, const u8 *fw,
+		     struct rogue_meta_ldr_l1_data_blk *l1_data,
+		     u32 coremem_size,
+		     const struct pvr_fw_layout_entry *layout_entries,
+		     u32 num_layout_entries, u8 *fw_code_ptr, u8 *fw_data_ptr,
+		     u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
+		     const u32 fw_size)
+{
+	struct rogue_meta_ldr_l2_data_blk *l2_block =
+		(struct rogue_meta_ldr_l2_data_blk *)(fw +
+						      l1_data->cmd_data[1]);
+	u32 offset = l1_data->cmd_data[0];
+	u32 data_size;
+	void *write_addr;
+	int err;
+
+	/* Verify header is within bounds. */
+	if (((u8 *)l2_block - fw) >= fw_size || ((u8 *)(l2_block + 1) - fw) >= fw_size) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	data_size = l2_block->length - 6 /* L2 Tag length and checksum */;
+
+	/* Verify data is within bounds. */
+	if (((u8 *)l2_block->block_data - fw) >= fw_size ||
+	    ((((u8 *)l2_block->block_data) + data_size) - fw) >= fw_size) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	if (!ROGUE_META_IS_COREMEM_CODE(offset, coremem_size) &&
+	    !ROGUE_META_IS_COREMEM_DATA(offset, coremem_size)) {
+		/* Global range is aliased to local range */
+		offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+	}
+
+	err = pvr_fw_find_mmu_segment(offset, data_size, layout_entries,
+				      num_layout_entries, fw_code_ptr, fw_data_ptr,
+				      fw_core_code_ptr, fw_core_data_ptr, &write_addr);
+	if (err) {
+		drm_err(drm_dev,
+			"Addr 0x%x (size: %d) not found in any firmware segment",
+			offset, data_size);
+		goto err_out;
+	}
+
+	memcpy(write_addr, l2_block->block_data, data_size);
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static int
+meta_ldr_cmd_zeromem(struct drm_device *drm_dev,
+		     struct rogue_meta_ldr_l1_data_blk *l1_data,
+		     u32 coremem_size,
+		     const struct pvr_fw_layout_entry *layout_entries,
+		     u32 num_layout_entries, u8 *fw_code_ptr, u8 *fw_data_ptr,
+		     u8 *fw_core_code_ptr, u8 *fw_core_data_ptr)
+{
+	u32 offset = l1_data->cmd_data[0];
+	u32 byte_count = l1_data->cmd_data[1];
+	void *write_addr;
+	int err;
+
+	if (ROGUE_META_IS_COREMEM_DATA(offset, coremem_size)) {
+		/* cannot zero coremem directly */
+		return 0;
+	}
+
+	/* Global range is aliased to local range */
+	offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+
+	err = pvr_fw_find_mmu_segment(offset, byte_count, layout_entries,
+				      num_layout_entries, fw_code_ptr, fw_data_ptr,
+				      fw_core_code_ptr, fw_core_data_ptr, &write_addr);
+	if (err) {
+		drm_err(drm_dev,
+			"Addr 0x%x (size: %d) not found in any firmware segment",
+			offset, byte_count);
+		goto err_out;
+	}
+
+	memset(write_addr, 0, byte_count);
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static int
+meta_ldr_cmd_config(struct drm_device *drm_dev, const u8 *fw,
+		    struct rogue_meta_ldr_l1_data_blk *l1_data,
+		    const u32 fw_size, u32 **boot_conf_ptr)
+{
+	struct rogue_meta_ldr_l2_data_blk *l2_block =
+		(struct rogue_meta_ldr_l2_data_blk *)(fw +
+						      l1_data->cmd_data[0]);
+	struct rogue_meta_ldr_cfg_blk *config_command;
+	u32 l2_block_size;
+	u32 curr_block_size = 0;
+	u32 *boot_conf = boot_conf_ptr ? *boot_conf_ptr : NULL;
+	int err;
+
+	/* Verify block header is within bounds. */
+	if (((u8 *)l2_block - fw) >= fw_size || ((u8 *)(l2_block + 1) - fw) >= fw_size) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	l2_block_size = l2_block->length - 6 /* L2 Tag length and checksum */;
+	config_command = (struct rogue_meta_ldr_cfg_blk *)l2_block->block_data;
+
+	if (((u8 *)config_command - fw) >= fw_size ||
+	    ((((u8 *)config_command) + l2_block_size) - fw) >= fw_size) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	while (l2_block_size >= 12) {
+		if (config_command->type != ROGUE_META_LDR_CFG_WRITE) {
+			err = -EINVAL;
+			goto err_out;
+		}
+
+		/*
+		 * Only write to bootloader if we got a valid pointer to the FW
+		 * code allocation.
+		 */
+		if (boot_conf) {
+			u32 register_offset = config_command->block_data[0];
+			u32 register_value = config_command->block_data[1];
+
+			/* Do register write */
+			add_boot_arg(&boot_conf, register_offset,
+				     register_value);
+		}
+
+		curr_block_size = 12;
+		l2_block_size -= curr_block_size;
+		config_command = (struct rogue_meta_ldr_cfg_blk
+					  *)((uintptr_t)config_command +
+					     curr_block_size);
+	}
+
+	if (boot_conf_ptr)
+		*boot_conf_ptr = boot_conf;
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+/**
+ * process_ldr_command_stream() - Process LDR firmware image and populate
+ *                                firmware sections
+ * @pvr_dev: Device pointer.
+ * @fw: Pointer to firmware image.
+ * @layout_entries: Pointer to layout table.
+ * @num_layout_entries: Number of entries in layout table.
+ * @fw_code_ptr: Pointer to FW code section.
+ * @fw_data_ptr: Pointer to FW data section.
+ * @fw_core_code_ptr: Pointer to FW coremem code section.
+ * @fw_core_data_ptr: Pointer to FW coremem data section.
+ * @boot_conf_ptr: Pointer to boot config argument pointer.
+ *
+ * Returns :
+ *  * 0 on success, or
+ *  * -EINVAL on any error in LDR command stream.
+ */
+static int
+process_ldr_command_stream(struct pvr_device *pvr_dev, const u8 *fw,
+			   const struct pvr_fw_layout_entry *layout_entries,
+			   u32 num_layout_entries, u8 *fw_code_ptr,
+			   u8 *fw_data_ptr, u8 *fw_core_code_ptr,
+			   u8 *fw_core_data_ptr, u32 **boot_conf_ptr)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct rogue_meta_ldr_block_hdr *ldr_header =
+		(struct rogue_meta_ldr_block_hdr *)fw;
+	struct rogue_meta_ldr_l1_data_blk *l1_data =
+		(struct rogue_meta_ldr_l1_data_blk *)(fw + ldr_header->sl_data);
+	const u32 fw_size = pvr_dev->fw_dev.firmware->size;
+	int err;
+
+	u32 *boot_conf = boot_conf_ptr ? *boot_conf_ptr : NULL;
+	u32 coremem_size;
+
+	err = PVR_FEATURE_VALUE(pvr_dev, meta_coremem_size, &coremem_size);
+	if (err)
+		goto err_out;
+
+	coremem_size *= SZ_1K;
+
+	while (l1_data) {
+		/* Verify block header is within bounds. */
+		if (((u8 *)l1_data - fw) >= fw_size || ((u8 *)(l1_data + 1) - fw) >= fw_size) {
+			err = -EINVAL;
+			goto err_out;
+		}
+
+		if (ROGUE_META_LDR_BLK_IS_COMMENT(l1_data->cmd)) {
+			/* Don't process comment blocks */
+			goto next_block;
+		}
+
+		switch (l1_data->cmd & ROGUE_META_LDR_CMD_MASK)
+		case ROGUE_META_LDR_CMD_LOADMEM: {
+			err = meta_ldr_cmd_loadmem(drm_dev, fw, l1_data,
+						   coremem_size, layout_entries,
+						   num_layout_entries,
+						   fw_code_ptr, fw_data_ptr,
+						   fw_core_code_ptr,
+						   fw_core_data_ptr, fw_size);
+			if (err)
+				goto err_out;
+			break;
+
+		case ROGUE_META_LDR_CMD_START_THREADS:
+			/* Don't process this block */
+			break;
+
+		case ROGUE_META_LDR_CMD_ZEROMEM:
+			err = meta_ldr_cmd_zeromem(drm_dev, l1_data,
+						   coremem_size, layout_entries,
+						   num_layout_entries,
+						   fw_code_ptr, fw_data_ptr,
+						   fw_core_code_ptr,
+						   fw_core_data_ptr);
+			break;
+
+		case ROGUE_META_LDR_CMD_CONFIG:
+			err = meta_ldr_cmd_config(drm_dev, fw, l1_data, fw_size,
+						  &boot_conf);
+			break;
+
+		default:
+			err = -EINVAL;
+			goto err_out;
+		}
+
+next_block:
+		if (l1_data->next == 0xFFFFFFFF)
+			break;
+
+		l1_data = (struct rogue_meta_ldr_l1_data_blk *)(fw +
+								l1_data->next);
+	}
+
+	if (boot_conf_ptr)
+		*boot_conf_ptr = boot_conf;
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static void
+configure_seg_id(u64 seg_out_addr, u32 seg_base, u32 seg_limit, u32 seg_id,
+		 u32 **boot_conf_ptr)
+{
+	u32 seg_out_addr0 = seg_out_addr & 0x00000000FFFFFFFFUL;
+	u32 seg_out_addr1 = (seg_out_addr >> 32) & 0x00000000FFFFFFFFUL;
+	u32 *boot_conf = *boot_conf_ptr;
+
+	/* META segments have a minimum size. */
+	u32 limit_off = max(seg_limit, ROGUE_FW_SEGMMU_ALIGN);
+
+	/* The limit is an offset, therefore off = size - 1. */
+	limit_off -= 1;
+
+	seg_base |= ROGUE_FW_SEGMMU_ALLTHRS_WRITEABLE;
+
+	add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_BASE(seg_id), seg_base);
+	add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_LIMIT(seg_id), limit_off);
+	add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_OUTA0(seg_id), seg_out_addr0);
+	add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_OUTA1(seg_id), seg_out_addr1);
+
+	*boot_conf_ptr = boot_conf;
+}
+
+static void
+configure_seg_mmu(struct pvr_device *pvr_dev,
+		  const struct pvr_fw_layout_entry *layout_entries,
+		  u32 num_layout_entries, u32 **boot_conf_ptr)
+{
+	u64 seg_out_addr_top;
+	u32 i;
+
+	seg_out_addr_top =
+		ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV,
+						ROGUE_FW_SEGMMU_META_BIFDM_ID);
+
+	for (i = 0; i < num_layout_entries; i++) {
+		/*
+		 * FW code is using the bootloader segment which is already
+		 * configured on boot. FW coremem code and data don't use the
+		 * segment MMU. Only the FW data segment needs to be configured.
+		 */
+		if (layout_entries[i].type == FW_DATA) {
+			u32 seg_id = ROGUE_FW_SEGMMU_DATA_ID;
+			u64 seg_out_addr;
+
+			WARN_ON(!pvr_gem_get_fw_gpu_addr(pvr_dev->fw_dev.mem.data_obj,
+							 &seg_out_addr));
+			seg_out_addr += layout_entries[i].alloc_offset;
+			seg_out_addr |= seg_out_addr_top;
+
+			/* Write the sequence to the bootldr. */
+			configure_seg_id(seg_out_addr,
+					 layout_entries[i].base_addr,
+					 layout_entries[i].alloc_size, seg_id,
+					 boot_conf_ptr);
+
+			break;
+		}
+	}
+}
+
+static void
+configure_meta_caches(u32 **boot_conf_ptr)
+{
+	u32 *boot_conf = *boot_conf_ptr;
+	u32 d_cache_t0, i_cache_t0;
+	u32 d_cache_t1, i_cache_t1;
+	u32 d_cache_t2, i_cache_t2;
+	u32 d_cache_t3, i_cache_t3;
+
+	/* Initialise I/Dcache settings */
+	d_cache_t0 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+	d_cache_t1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+	d_cache_t2 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+	d_cache_t3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+	i_cache_t0 = 0;
+	i_cache_t1 = 0;
+	i_cache_t2 = 0;
+	i_cache_t3 = 0;
+
+	d_cache_t0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+	i_cache_t0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+
+	/* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */
+	add_boot_arg(&boot_conf, META_CR_MMCU_LOCAL_EBCTRL,
+		     META_CR_MMCU_LOCAL_EBCTRL_ICWIN |
+			     META_CR_MMCU_LOCAL_EBCTRL_DCWIN);
+
+	/* Data cache partitioning thread 0 to 3 */
+	add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(0), d_cache_t0);
+	add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(1), d_cache_t1);
+	add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(2), d_cache_t2);
+	add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(3), d_cache_t3);
+
+	/* Enable data cache hits */
+	add_boot_arg(&boot_conf, META_CR_MMCU_DCACHE_CTRL,
+		     META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+	/* Instruction cache partitioning thread 0 to 3 */
+	add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(0), i_cache_t0);
+	add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(1), i_cache_t1);
+	add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(2), i_cache_t2);
+	add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(3), i_cache_t3);
+
+	/* Enable instruction cache hits */
+	add_boot_arg(&boot_conf, META_CR_MMCU_ICACHE_CTRL,
+		     META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+	add_boot_arg(&boot_conf, 0x040000C0, 0);
+
+	*boot_conf_ptr = boot_conf;
+}
+
+static int
+pvr_meta_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
+		    const struct pvr_fw_layout_entry *layout_entries, u32 num_layout_entries,
+		    u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
+		    u32 core_code_alloc_size)
+{
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	u32 *boot_conf;
+	int err;
+
+	boot_conf = ((u32 *)fw_code_ptr) + ROGUE_FW_BOOTLDR_CONF_OFFSET;
+
+	/* Slave port and JTAG accesses are privileged. */
+	add_boot_arg(&boot_conf, META_CR_SYSC_JTAG_THREAD,
+		     META_CR_SYSC_JTAG_THREAD_PRIV_EN);
+
+	configure_seg_mmu(pvr_dev, layout_entries, num_layout_entries, &boot_conf);
+
+	/* Populate FW sections from LDR image. */
+	err = process_ldr_command_stream(pvr_dev, fw, layout_entries, num_layout_entries,
+					 fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
+					 fw_core_data_ptr, &boot_conf);
+	if (err)
+		goto err_out;
+
+	configure_meta_caches(&boot_conf);
+
+	/* End argument list. */
+	add_boot_arg(&boot_conf, 0, 0);
+
+	if (fw_dev->mem.core_code_obj) {
+		u32 core_code_fw_addr;
+
+		pvr_gem_get_fw_addr(fw_dev->mem.core_code_obj, &core_code_fw_addr);
+		add_boot_arg(&boot_conf, core_code_fw_addr, core_code_alloc_size);
+	} else {
+		add_boot_arg(&boot_conf, 0, 0);
+	}
+	/* None of the cores supported by this driver have META DMA. */
+	add_boot_arg(&boot_conf, 0, 0);
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static int
+pvr_meta_init(struct pvr_device *pvr_dev)
+{
+	pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_META_SHIFT, 0);
+
+	return 0;
+}
+
+static u32
+pvr_meta_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
+{
+	u32 fw_addr = fw_obj->fw_addr_offset + offset + ROGUE_FW_SEGMMU_DATA_BASE_ADDRESS;
+
+	/* META cacheability is determined by address. */
+	if (fw_obj->base.flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
+		fw_addr |= ROGUE_FW_SEGMMU_DATA_META_UNCACHED |
+			   ROGUE_FW_SEGMMU_DATA_VIVT_SLC_UNCACHED;
+
+	return fw_addr;
+}
+
+static int
+pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+	struct pvr_gem_object *pvr_obj = from_pvr_fw_object(fw_obj);
+
+	return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start,
+			  pvr_gem_object_size(pvr_obj));
+}
+
+static void
+pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+	pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start);
+}
+
+static bool
+pvr_meta_check_and_ack_irq(struct pvr_device *pvr_dev)
+{
+	u32 irq_status = PVR_CR_READ32(pvr_dev, META_SP_MSLVIRQSTATUS);
+
+	if (!(irq_status & ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN))
+		return false; /* Spurious IRQ - ignore. */
+
+	/* Acknowledge IRQ. */
+	PVR_CR_WRITE32(pvr_dev, META_SP_MSLVIRQSTATUS,
+		       ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK);
+
+	return true;
+}
+
+static bool
+pvr_meta_has_fixed_data_addr(void)
+{
+	return false;
+}
+
+const struct pvr_fw_funcs pvr_fw_funcs_meta = {
+	.init = pvr_meta_init,
+	.fw_process = pvr_meta_fw_process,
+	.vm_map = pvr_meta_vm_map,
+	.vm_unmap = pvr_meta_vm_unmap,
+	.get_fw_addr_with_offset = pvr_meta_get_fw_addr_with_offset,
+	.wrapper_init = pvr_meta_wrapper_init,
+	.check_and_ack_irq = pvr_meta_check_and_ack_irq,
+	.has_fixed_data_addr = pvr_meta_has_fixed_data_addr,
+};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.h b/drivers/gpu/drm/imagination/pvr_fw_meta.h
new file mode 100644
index 000000000000..5caeb8a4ff61
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.h
@@ -0,0 +1,14 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_FW_META_H__
+#define __PVR_FW_META_H__
+
+#include <linux/types.h>
+
+/* Forward declaration from pvr_device.h */
+struct pvr_device;
+
+int pvr_meta_cr_read32(struct pvr_device *pvr_dev, u32 reg_addr, u32 *reg_value_out);
+
+#endif /* __PVR_FW_META_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.c b/drivers/gpu/drm/imagination/pvr_fw_mips.c
new file mode 100644
index 000000000000..45c152fd8363
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.c
@@ -0,0 +1,276 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_mips.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_mips.h"
+#include "pvr_vm_mips.h"
+
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+#define ROGUE_FW_HEAP_MIPS_BASE 0xC0000000
+#define ROGUE_FW_HEAP_MIPS_SHIFT 24 /* 16 MB */
+#define ROGUE_FW_HEAP_MIPS_RESERVED_SIZE SZ_1M
+
+/**
+ * process_elf_command_stream() - Process ELF firmware image and populate
+ *                                firmware sections
+ * @pvr_dev: Device pointer.
+ * @fw: Pointer to firmware image.
+ * @layout_entries: Pointer to layout table.
+ * @num_layout_entries: Number of entries in layout table.
+ * @fw_code_ptr: Pointer to FW code section.
+ * @fw_data_ptr: Pointer to FW data section.
+ * @fw_core_code_ptr: Pointer to FW coremem code section.
+ * @fw_core_data_ptr: Pointer to FW coremem data section.
+ *
+ * Returns :
+ *  * 0 on success, or
+ *  * -EINVAL on any error in ELF command stream.
+ */
+static int
+process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw,
+			   const struct pvr_fw_layout_entry *layout_entries,
+			   u32 num_layout_entries, u8 *fw_code_ptr,
+			   u8 *fw_data_ptr, u8 *fw_core_code_ptr,
+			   u8 *fw_core_data_ptr)
+{
+	struct elf32_hdr *header = (struct elf32_hdr *)fw;
+	struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff);
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	u32 entry;
+	int err;
+
+	for (entry = 0; entry < header->e_phnum; entry++, program_header++) {
+		void *write_addr;
+
+		/* Only consider loadable entries in the ELF segment table */
+		if (program_header->p_type != PT_LOAD)
+			continue;
+
+		err = pvr_fw_find_mmu_segment(program_header->p_vaddr, program_header->p_memsz,
+					      layout_entries, num_layout_entries, fw_code_ptr,
+					      fw_data_ptr, fw_core_code_ptr, fw_core_data_ptr,
+					      &write_addr);
+		if (err) {
+			drm_err(drm_dev,
+				"Addr 0x%x (size: %d) not found in any firmware segment",
+				program_header->p_vaddr, program_header->p_memsz);
+			goto err_out;
+		}
+
+		/* Write to FW allocation only if available */
+		if (write_addr) {
+			memcpy(write_addr, fw + program_header->p_offset,
+			       program_header->p_filesz);
+
+			memset((u8 *)write_addr + program_header->p_filesz, 0,
+			       program_header->p_memsz - program_header->p_filesz);
+		}
+	}
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static int
+pvr_mips_init(struct pvr_device *pvr_dev)
+{
+	pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_MIPS_SHIFT, ROGUE_FW_HEAP_MIPS_RESERVED_SIZE);
+
+	return pvr_vm_mips_init(pvr_dev);
+}
+
+static void
+pvr_mips_fini(struct pvr_device *pvr_dev)
+{
+	pvr_vm_mips_fini(pvr_dev);
+}
+
+static int
+pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw,
+		    const struct pvr_fw_layout_entry *layout_entries, u32 num_layout_entries,
+		    u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr,
+		    u32 core_code_alloc_size)
+{
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
+	const struct pvr_fw_layout_entry *boot_code_entry;
+	const struct pvr_fw_layout_entry *boot_data_entry;
+	const struct pvr_fw_layout_entry *exception_code_entry;
+	const struct pvr_fw_layout_entry *stack_entry;
+	struct rogue_mipsfw_boot_data *boot_data;
+	dma_addr_t dma_addr;
+	u32 page_nr;
+	int err;
+
+	err = process_elf_command_stream(pvr_dev, fw, layout_entries, num_layout_entries,
+					 fw_code_ptr, fw_data_ptr, fw_core_code_ptr,
+					 fw_core_data_ptr);
+	if (err)
+		goto err_out;
+
+	boot_code_entry = pvr_fw_find_layout_entry(layout_entries, num_layout_entries,
+						   MIPS_BOOT_CODE);
+	boot_data_entry = pvr_fw_find_layout_entry(layout_entries, num_layout_entries,
+						   MIPS_BOOT_DATA);
+	exception_code_entry = pvr_fw_find_layout_entry(layout_entries, num_layout_entries,
+							MIPS_EXCEPTIONS_CODE);
+	if (!boot_code_entry || !boot_data_entry || !exception_code_entry) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	WARN_ON(pvr_gem_get_dma_addr(&fw_dev->mem.code_obj->base, boot_code_entry->alloc_offset,
+				     &mips_data->boot_code_dma_addr));
+	WARN_ON(pvr_gem_get_dma_addr(&fw_dev->mem.data_obj->base, boot_data_entry->alloc_offset,
+				     &mips_data->boot_data_dma_addr));
+	WARN_ON(pvr_gem_get_dma_addr(&fw_dev->mem.code_obj->base,
+				     exception_code_entry->alloc_offset,
+				     &mips_data->exception_code_dma_addr));
+
+	stack_entry = pvr_fw_find_layout_entry(layout_entries, num_layout_entries, MIPS_STACK);
+	if (!stack_entry) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	boot_data = (struct rogue_mipsfw_boot_data *)(fw_data_ptr + boot_data_entry->alloc_offset +
+						      ROGUE_MIPSFW_BOOTLDR_CONF_OFFSET);
+
+	WARN_ON(pvr_fw_get_dma_addr(fw_dev->mem.data_obj, stack_entry->alloc_offset,
+				    &dma_addr));
+	boot_data->stack_phys_addr = dma_addr;
+
+	boot_data->reg_base = pvr_dev->regs_resource->start;
+
+	for (page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) {
+		WARN_ON(pvr_gem_get_dma_addr(mips_data->pt_obj,
+					     page_nr << ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K, &dma_addr));
+
+		boot_data->pt_phys_addr[page_nr] = dma_addr;
+	}
+
+	boot_data->pt_log2_page_size = ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+	boot_data->pt_num_pages = ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES;
+	boot_data->reserved1 = 0;
+	boot_data->reserved2 = 0;
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static int
+pvr_mips_wrapper_init(struct pvr_device *pvr_dev)
+{
+	struct pvr_fw_mips_data *mips_data = pvr_dev->fw_dev.processor_data.mips_data;
+	u64 remap_settings = ROGUE_MIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE;
+	u32 phys_bus_width;
+
+	WARN_ON(PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width));
+	/* Currently MIPS FW only supported with physical bus width > 32 bits. */
+	if (WARN_ON(phys_bus_width <= 32))
+		return -EINVAL;
+
+	PVR_CR_WRITE32(pvr_dev, MIPS_WRAPPER_CONFIG,
+		       (ROGUE_MIPSFW_REGISTERS_VIRTUAL_BASE >>
+			ROGUE_MIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN) |
+		       ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS);
+
+	/* Configure remap for boot code, boot data and exceptions code areas. */
+	PVR_CR_WRITE64(pvr_dev, MIPS_ADDR_REMAP1_CONFIG1,
+		       ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN |
+		       ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN);
+	PVR_CR_WRITE64(pvr_dev, MIPS_ADDR_REMAP1_CONFIG2,
+		       (mips_data->boot_code_dma_addr &
+			~ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK) | remap_settings);
+
+	if (PVR_HAS_QUIRK(pvr_dev, 63553)) {
+		/*
+		 * WA always required on 36 bit cores, to avoid continuous unmapped memory accesses
+		 * to address 0x0.
+		 */
+		WARN_ON(phys_bus_width != 36);
+
+		PVR_CR_WRITE64(pvr_dev, MIPS_ADDR_REMAP5_CONFIG1,
+			       ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN);
+		PVR_CR_WRITE64(pvr_dev, MIPS_ADDR_REMAP5_CONFIG2,
+			       (mips_data->boot_code_dma_addr &
+				~ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK) |
+			       remap_settings);
+	}
+
+	PVR_CR_WRITE64(pvr_dev, MIPS_ADDR_REMAP2_CONFIG1,
+		       ROGUE_MIPSFW_DATA_REMAP_PHYS_ADDR_IN |
+		       ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN);
+	PVR_CR_WRITE64(pvr_dev, MIPS_ADDR_REMAP2_CONFIG2,
+		       (mips_data->boot_data_dma_addr &
+			~ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK) | remap_settings);
+
+	PVR_CR_WRITE64(pvr_dev, MIPS_ADDR_REMAP3_CONFIG1,
+		       ROGUE_MIPSFW_CODE_REMAP_PHYS_ADDR_IN |
+		       ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN);
+	PVR_CR_WRITE64(pvr_dev, MIPS_ADDR_REMAP3_CONFIG2,
+		       (mips_data->exception_code_dma_addr &
+			~ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK) | remap_settings);
+
+	/* Garten IDLE bit controlled by MIPS. */
+	PVR_CR_WRITE64(pvr_dev, MTS_GARTEN_WRAPPER_CONFIG,
+		       ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+
+	/* Turn on the EJTAG probe. */
+	PVR_CR_WRITE32(pvr_dev, MIPS_DEBUG_CONFIG, 0);
+
+	return 0;
+}
+
+static u32
+pvr_mips_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset)
+{
+	struct pvr_device *pvr_dev = fw_obj->base.pvr_dev;
+
+	/* MIPS cacheability is determined by page table. */
+	return ((fw_obj->fw_addr_offset + offset) & pvr_dev->fw_dev.fw_heap_info.offset_mask) |
+	       ROGUE_FW_HEAP_MIPS_BASE;
+}
+
+static bool
+pvr_mips_check_and_ack_irq(struct pvr_device *pvr_dev)
+{
+	u32 irq_status = PVR_CR_READ32(pvr_dev, MIPS_WRAPPER_IRQ_STATUS);
+
+	if (!(irq_status & ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN))
+		return false; /* Spurious IRQ - ignore. */
+
+	/* Acknowledge IRQ. */
+	PVR_CR_WRITE32(pvr_dev, MIPS_WRAPPER_IRQ_CLEAR,
+		       ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN);
+
+	return true;
+}
+
+static bool
+pvr_mips_has_fixed_data_addr(void)
+{
+	return true;
+}
+
+const struct pvr_fw_funcs pvr_fw_funcs_mips = {
+	.init = pvr_mips_init,
+	.fini = pvr_mips_fini,
+	.fw_process = pvr_mips_fw_process,
+	.vm_map = pvr_vm_mips_map,
+	.vm_unmap = pvr_vm_mips_unmap,
+	.get_fw_addr_with_offset = pvr_mips_get_fw_addr_with_offset,
+	.wrapper_init = pvr_mips_wrapper_init,
+	.check_and_ack_irq = pvr_mips_check_and_ack_irq,
+	.has_fixed_data_addr = pvr_mips_has_fixed_data_addr,
+};
diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.h b/drivers/gpu/drm/imagination/pvr_fw_mips.h
new file mode 100644
index 000000000000..d5db00a9aacf
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_mips.h
@@ -0,0 +1,38 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_FW_MIPS_H__
+#define __PVR_FW_MIPS_H__
+
+#include <linux/types.h>
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_gem_object;
+
+/**
+ * struct pvr_fw_mips_data - MIPS-specific data
+ */
+struct pvr_fw_mips_data {
+	/** @pt_obj: Object representing MIPS pagetable. */
+	struct pvr_gem_object *pt_obj;
+
+	/** @pt: Pointer to CPU mapping of MIPS pagetable. */
+	u32 *pt;
+
+	/** @boot_code_dma_addr: DMA address of MIPS boot code. */
+	dma_addr_t boot_code_dma_addr;
+
+	/** @boot_data_dma_addr: DMA address of MIPS boot data. */
+	dma_addr_t boot_data_dma_addr;
+
+	/** @exception_code_dma_addr: DMA address of MIPS exception code. */
+	dma_addr_t exception_code_dma_addr;
+
+	/** @cache_policy: Cache policy for this processor. */
+	u32 cache_policy;
+
+	/** @pfn_mask: PFN mask for MIPS pagetable. */
+	u32 pfn_mask;
+};
+
+#endif /* __PVR_FW_MIPS_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.c b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
new file mode 100644
index 000000000000..7a7f5a9ea61b
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.c
@@ -0,0 +1,279 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_meta.h"
+#include "pvr_fw_startstop.h"
+#include "pvr_rogue_cr_defs.h"
+#include "pvr_rogue_meta.h"
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#define POLL_TIMEOUT_USEC 1000000
+
+static void
+rogue_axi_ace_list_init(struct pvr_device *pvr_dev)
+{
+	/* Setup AXI-ACE config. Set everything to outer cache. */
+	u64 reg_val =
+		(3U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) |
+		(3U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) |
+		(2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT) |
+		(2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) |
+		(2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) |
+		(2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) |
+		(2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) |
+		(2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT);
+
+	PVR_CR_WRITE64(pvr_dev, AXI_ACE_LITE_CONFIGURATION, reg_val);
+}
+
+static void
+rogue_bif_init(struct pvr_device *pvr_dev)
+{
+	dma_addr_t pc_dma_addr;
+	u64 pc_addr;
+
+	/* Acquire the address of the Kernel Page Catalogue. */
+	pc_dma_addr = pvr_vm_get_page_table_root_addr(pvr_dev->kernel_vm_ctx);
+
+	/* Write the kernel catalogue base. */
+	pc_addr = ((((u64)pc_dma_addr >> ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT)
+		    << ROGUE_CR_BIF_CAT_BASE0_ADDR_SHIFT) &
+		   ~ROGUE_CR_BIF_CAT_BASE0_ADDR_CLRMSK);
+
+	__pvr_cr_write64(pvr_dev, BIF_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV),
+			 pc_addr);
+}
+
+static int
+rogue_slc_init(struct pvr_device *pvr_dev)
+{
+	u16 slc_cache_line_size_in_bits;
+	u32 reg_val;
+	int err;
+
+	/*
+	 * SLC Misc control.
+	 *
+	 * Note: This is a 64bit register and we set only the lower 32bits
+	 *       leaving the top 32bits (ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS)
+	 *       unchanged from the HW default.
+	 */
+	reg_val = (PVR_CR_READ32(pvr_dev, SLC_CTRL_MISC) &
+		      ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN) |
+		     ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1;
+
+	err = PVR_FEATURE_VALUE(pvr_dev, slc_cache_line_size_in_bits, &slc_cache_line_size_in_bits);
+	if (err)
+		return err;
+
+	/* Bypass burst combiner if SLC line size is smaller than 1024 bits. */
+	if (slc_cache_line_size_in_bits < 1024)
+		reg_val |= ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN;
+
+	PVR_CR_WRITE32(pvr_dev, SLC_CTRL_MISC, reg_val);
+
+	return 0;
+}
+
+/**
+ * pvr_fw_start() - Start FW processor and boot firmware
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * Any error returned by rogue_slc_init().
+ */
+int
+pvr_fw_start(struct pvr_device *pvr_dev)
+{
+	int err;
+
+	if (PVR_HAS_FEATURE(pvr_dev, sys_bus_secure_reset)) {
+		/*
+		 * Disable the default sys_bus_secure protection to perform
+		 * minimal setup.
+		 */
+		PVR_CR_WRITE32(pvr_dev, SYS_BUS_SECURE, 0);
+		(void)PVR_CR_READ32(pvr_dev, SYS_BUS_SECURE); /* Fence write */
+	}
+
+	/* Set Rogue in soft-reset. */
+	PVR_CR_WRITE64(pvr_dev, SOFT_RESET, ROGUE_CR_SOFT_RESET_MASKFULL);
+
+	/* Read soft-reset to fence previous write in order to clear the SOCIF pipeline. */
+	(void)PVR_CR_READ64(pvr_dev, SOFT_RESET);
+
+	/* Take Rascal and Dust out of reset. */
+	PVR_CR_WRITE64(pvr_dev, SOFT_RESET,
+		       ROGUE_CR_SOFT_RESET_MASKFULL ^
+			       ROGUE_CR_SOFT_RESET_RASCALDUSTS_EN);
+
+	(void)PVR_CR_READ64(pvr_dev, SOFT_RESET);
+
+	/* Take everything out of reset but the FW processor. */
+	PVR_CR_WRITE64(pvr_dev, SOFT_RESET, ROGUE_CR_SOFT_RESET_GARTEN_EN);
+
+	(void)PVR_CR_READ64(pvr_dev, SOFT_RESET);
+
+	err = rogue_slc_init(pvr_dev);
+	if (err)
+		goto err_reset;
+
+	/* Initialise Firmware wrapper. */
+	pvr_dev->fw_dev.funcs->wrapper_init(pvr_dev);
+
+	/* We must init the AXI-ACE interface before first BIF transaction. */
+	rogue_axi_ace_list_init(pvr_dev);
+
+	/* Initialise BIF. */
+	rogue_bif_init(pvr_dev);
+
+	/* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */
+	udelay(3);
+
+	PVR_CR_WRITE64(pvr_dev, SOFT_RESET, 0x0);
+	(void)PVR_CR_READ64(pvr_dev, SOFT_RESET);
+
+	/* ... and afterwards. */
+	udelay(3);
+
+	return 0;
+
+err_reset:
+	/* Put everything back into soft-reset. */
+	PVR_CR_WRITE64(pvr_dev, SOFT_RESET, ROGUE_CR_SOFT_RESET_MASKFULL);
+
+	return err;
+}
+
+/**
+ * pvr_fw_stop() - Stop FW processor
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_cr_poll_reg32().
+ */
+int
+pvr_fw_stop(struct pvr_device *pvr_dev)
+{
+	const u32 sidekick_idle_mask = ROGUE_CR_SIDEKICK_IDLE_MASKFULL &
+				       ~(ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN |
+					 ROGUE_CR_SIDEKICK_IDLE_SOCIF_EN |
+					 ROGUE_CR_SIDEKICK_IDLE_HOSTIF_EN);
+	bool skip_garten_idle = false;
+	u32 reg_value;
+	int err;
+
+	/*
+	 * Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper.
+	 * For cores with the LAYOUT_MARS feature, SIDEKICK would have been
+	 * powered down by the FW.
+	 */
+	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE, sidekick_idle_mask,
+				sidekick_idle_mask, POLL_TIMEOUT_USEC);
+	if (err)
+		goto err_out;
+
+	/* Unset MTS DM association with threads. */
+	PVR_CR_WRITE32(pvr_dev, MTS_INTCTX_THREAD0_DM_ASSOC,
+		       ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL &
+		       ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK);
+	PVR_CR_WRITE32(pvr_dev, MTS_BGCTX_THREAD0_DM_ASSOC,
+		       ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL &
+		       ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK);
+	PVR_CR_WRITE32(pvr_dev, MTS_INTCTX_THREAD1_DM_ASSOC,
+		       ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL &
+		       ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK);
+	PVR_CR_WRITE32(pvr_dev, MTS_BGCTX_THREAD1_DM_ASSOC,
+		       ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL &
+		       ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK);
+
+	/* Extra Idle checks. */
+	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIF_STATUS_MMU, 0,
+				ROGUE_CR_BIF_STATUS_MMU_MASKFULL,
+				POLL_TIMEOUT_USEC);
+	if (err)
+		goto err_out;
+
+	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIFPM_STATUS_MMU, 0,
+				ROGUE_CR_BIFPM_STATUS_MMU_MASKFULL,
+				POLL_TIMEOUT_USEC);
+	if (err)
+		goto err_out;
+
+	if (!PVR_HAS_FEATURE(pvr_dev, xt_top_infrastructure)) {
+		err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIF_READS_EXT_STATUS, 0,
+					ROGUE_CR_BIF_READS_EXT_STATUS_MASKFULL,
+					POLL_TIMEOUT_USEC);
+		if (err)
+			goto err_out;
+	}
+
+	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIFPM_READS_EXT_STATUS, 0,
+				ROGUE_CR_BIFPM_READS_EXT_STATUS_MASKFULL,
+				POLL_TIMEOUT_USEC);
+	if (err)
+		goto err_out;
+
+	err = pvr_cr_poll_reg64(pvr_dev, ROGUE_CR_SLC_STATUS1, 0,
+				ROGUE_CR_SLC_STATUS1_MASKFULL,
+				POLL_TIMEOUT_USEC);
+	if (err)
+		goto err_out;
+
+	/*
+	 * Wait for SLC to signal IDLE.
+	 * For cores with the LAYOUT_MARS feature, SLC would have been powered
+	 * down by the FW.
+	 */
+	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SLC_IDLE,
+				ROGUE_CR_SLC_IDLE_MASKFULL,
+				ROGUE_CR_SLC_IDLE_MASKFULL, POLL_TIMEOUT_USEC);
+	if (err)
+		goto err_out;
+
+	/*
+	 * Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper.
+	 * For cores with the LAYOUT_MARS feature, SIDEKICK would have been powered
+	 * down by the FW.
+	 */
+	err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE, sidekick_idle_mask,
+				sidekick_idle_mask, POLL_TIMEOUT_USEC);
+	if (err)
+		goto err_out;
+
+	if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_META) {
+		err = pvr_meta_cr_read32(pvr_dev, META_CR_TxVECINT_BHALT, &reg_value);
+		if (err)
+			goto err_out;
+
+		/*
+		 * Wait for Sidekick/Jones to signal IDLE including the Garten
+		 * Wrapper if there is no debugger attached (TxVECINT_BHALT =
+		 * 0x0).
+		 */
+		if (reg_value)
+			skip_garten_idle = true;
+	}
+
+	if (!skip_garten_idle) {
+		err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE,
+					ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN,
+					ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN,
+					POLL_TIMEOUT_USEC);
+		if (err)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	return err;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.h b/drivers/gpu/drm/imagination/pvr_fw_startstop.h
new file mode 100644
index 000000000000..3607789c9605
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.h
@@ -0,0 +1,13 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_FW_STARTSTOP_H__
+#define __PVR_FW_STARTSTOP_H__
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+int pvr_fw_start(struct pvr_device *pvr_dev);
+int pvr_fw_stop(struct pvr_device *pvr_dev);
+
+#endif /* __PVR_FW_STARTSTOP_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
new file mode 100644
index 000000000000..42dcb10d6ba3
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -0,0 +1,505 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_fwif_sf.h"
+#include "pvr_fw_trace.h"
+
+#include <drm/drm_file.h>
+
+#include <linux/build_bug.h>
+#include <linux/dcache.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+/*
+ * The tuple pairs that will be generated using XMacros will be stored here.
+ * This macro definition must match the definition of rogue_fw_log_sfids in
+ * pvr_rogue_fwif_sf.h.
+ */
+static const struct rogue_km_stid_fmt stid_fmts[] = {
+#define X(a, b, c, d, e) { ROGUE_FW_LOG_CREATESFID(a, b, e), d },
+	ROGUE_FW_LOG_SFIDLIST
+#undef X
+};
+
+int pvr_fw_trace_init(struct pvr_device *pvr_dev)
+{
+	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	u32 thread_nr;
+	int err;
+
+	fw_trace->tracebuf_ctrl =
+		pvr_gem_create_and_map_fw_object(pvr_dev,
+						 sizeof(*fw_trace->tracebuf_ctrl),
+						 PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+						 DRM_PVR_BO_CREATE_ZEROED,
+						 &fw_trace->tracebuf_ctrl_obj);
+	if (IS_ERR(fw_trace->tracebuf_ctrl)) {
+		drm_err(drm_dev, "Unable to allocate trace buffer control structure\n");
+		err = PTR_ERR(fw_trace->tracebuf_ctrl);
+		goto err_out;
+	}
+
+	BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) !=
+		     ARRAY_SIZE(fw_trace->buffers));
+
+	for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+		struct rogue_fwif_tracebuf_space *tracebuf_space =
+			&fw_trace->tracebuf_ctrl->tracebuf[thread_nr];
+		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+		trace_buffer->buf =
+			pvr_gem_create_and_map_fw_object(pvr_dev,
+							 ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS *
+							 sizeof(*trace_buffer->buf),
+							 PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							 DRM_PVR_BO_CREATE_ZEROED,
+							 &trace_buffer->buf_obj);
+		if (IS_ERR(trace_buffer->buf)) {
+			drm_err(drm_dev, "Unable to allocate trace buffer\n");
+			err = PTR_ERR(trace_buffer->buf);
+			trace_buffer->buf = NULL;
+			goto err_free_buf;
+		}
+		trace_buffer->tracebuf_space = tracebuf_space;
+
+		pvr_gem_get_fw_addr(trace_buffer->buf_obj, &tracebuf_space->trace_buffer_fw_addr);
+
+		tracebuf_space->trace_buffer = trace_buffer->buf;
+		tracebuf_space->trace_pointer = 0;
+	}
+
+	fw_trace->tracebuf_ctrl->tracebuf_size_in_dwords =
+		ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
+	fw_trace->tracebuf_ctrl->tracebuf_flags = 0;
+
+	fw_trace->group_mask = pvr_dev->params.fw_trace_mask;
+	if (fw_trace->group_mask)
+		fw_trace->tracebuf_ctrl->log_type = fw_trace->group_mask |
+						    ROGUE_FWIF_LOG_TYPE_TRACE;
+	else
+		fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
+
+	return 0;
+
+err_free_buf:
+	for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+		if (trace_buffer->buf) {
+			pvr_fw_object_vunmap(trace_buffer->buf_obj, false);
+			pvr_fw_object_release(trace_buffer->buf_obj);
+		}
+	}
+
+	pvr_fw_object_vunmap(fw_trace->tracebuf_ctrl_obj, false);
+	pvr_fw_object_release(fw_trace->tracebuf_ctrl_obj);
+
+err_out:
+	return err;
+}
+
+void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
+{
+	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
+	u32 thread_nr;
+
+	for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) {
+		struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr];
+
+		pvr_fw_object_vunmap(trace_buffer->buf_obj, false);
+		pvr_fw_object_release(trace_buffer->buf_obj);
+	}
+	pvr_fw_object_vunmap(fw_trace->tracebuf_ctrl_obj, false);
+	pvr_fw_object_release(fw_trace->tracebuf_ctrl_obj);
+}
+
+/**
+ * update_logtype() - Send KCCB command to trigger FW to update logtype
+ * @pvr_dev: Target PowerVR device
+ * @group_mask: New log group mask.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_kccb_send_cmd().
+ */
+static int
+update_logtype(struct pvr_device *pvr_dev, u32 group_mask)
+{
+	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
+	struct rogue_fwif_kccb_cmd cmd;
+
+	if (group_mask)
+		fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_TRACE | group_mask;
+	else
+		fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE;
+
+	cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE;
+	cmd.kccb_flags = 0;
+
+	return pvr_kccb_send_cmd(pvr_dev, &cmd, NULL);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int fw_trace_group_mask_show(struct seq_file *m, void *data)
+{
+	struct pvr_device *pvr_dev = m->private;
+
+	seq_printf(m, "%08x\n", pvr_dev->fw_dev.fw_trace.group_mask);
+
+	return 0;
+}
+
+static int fw_trace_group_mask_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, fw_trace_group_mask_show, inode->i_private);
+}
+
+static ssize_t fw_trace_group_mask_write(struct file *file, const char __user *ubuf, size_t len,
+					 loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct pvr_device *pvr_dev = m->private;
+	u32 new_group_mask;
+	int err;
+
+	err = kstrtouint_from_user(ubuf, len, 0, &new_group_mask);
+	if (err)
+		goto err_out;
+
+	err = update_logtype(pvr_dev, new_group_mask);
+	if (err)
+		goto err_out;
+
+	pvr_dev->fw_dev.fw_trace.group_mask = new_group_mask;
+
+err_out:
+	return err ? err : len;
+}
+
+static const struct file_operations pvr_fw_trace_group_mask_fops = {
+	.owner = THIS_MODULE,
+	.open = fw_trace_group_mask_open,
+	.read = seq_read,
+	.write = fw_trace_group_mask_write,
+	.llseek = default_llseek,
+	.release = single_release,
+};
+
+struct pvr_fw_trace_seq_data {
+	/** @buffer: Pointer to copy of trace data. */
+	u32 *buffer;
+
+	/** @start_offset: Starting offset in trace data, as reported by FW. */
+	u32 start_offset;
+
+	/** @idx: Current index into trace data. */
+	u32 idx;
+
+	/** @assert_buf: Trace assert buffer, as reported by FW. */
+	struct rogue_fwif_file_info_buf assert_buf;
+};
+
+static u32 find_sfid(u32 id)
+{
+	u32 i;
+
+	for (i = 0; i < ARRAY_SIZE(stid_fmts); i++) {
+		if (stid_fmts[i].id == id)
+			return i;
+	}
+
+	return ROGUE_FW_SF_LAST;
+}
+
+static u32 read_fw_trace(struct pvr_fw_trace_seq_data *trace_seq_data, u32 offset)
+{
+	u32 idx;
+
+	idx = trace_seq_data->idx + offset;
+	if (idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS)
+		return 0;
+
+	idx = (idx + trace_seq_data->start_offset) % ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS;
+	return trace_seq_data->buffer[idx];
+}
+
+/**
+ * fw_trace_get_next() - Advance trace index to next entry
+ * @trace_seq_data: Trace sequence data.
+ *
+ * Returns:
+ *  * %true if trace index is now pointing to a valid entry, or
+ *  * %false if trace index is pointing to an invalid entry, or has hit the end
+ *    of the trace.
+ */
+static bool fw_trace_get_next(struct pvr_fw_trace_seq_data *trace_seq_data)
+{
+	u32 id, sf_id;
+
+	while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
+		id = read_fw_trace(trace_seq_data, 0);
+		trace_seq_data->idx++;
+		if (!ROGUE_FW_LOG_VALIDID(id))
+			continue;
+		if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) {
+			/* Assertion failure marks the end of the trace. */
+			return false;
+		}
+
+		sf_id = find_sfid(id);
+		if (sf_id == ROGUE_FW_SF_FIRST)
+			continue;
+		if (sf_id == ROGUE_FW_SF_LAST) {
+			/*
+			 * Could not match with an ID in the SF table, trace is
+			 * most likely corrupt from this point.
+			 */
+			return false;
+		}
+
+		/* Skip over the timestamp, and any parameters. */
+		trace_seq_data->idx += 2 + ROGUE_FW_SF_PARAMNUM(id);
+
+		/* Ensure index is now pointing to a valid trace entry. */
+		id = read_fw_trace(trace_seq_data, 0);
+		if (!ROGUE_FW_LOG_VALIDID(id))
+			continue;
+
+		return true;
+	};
+
+	/* Hit end of trace data. */
+	return false;
+}
+
+/**
+ * fw_trace_get_first() - Find first valid entry in trace
+ * @trace_seq_data: Trace sequence data.
+ *
+ * Skips over invalid (usually zero) and ROGUE_FW_SF_FIRST entries.
+ *
+ * If the trace has no valid entries, this function will exit with the trace
+ * index pointing to the end of the trace. trace_seq_show() will return an error
+ * in this state.
+ */
+static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data)
+{
+	trace_seq_data->idx = 0;
+
+	while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
+		u32 id = read_fw_trace(trace_seq_data, 0);
+
+		if (ROGUE_FW_LOG_VALIDID(id)) {
+			u32 sf_id = find_sfid(id);
+
+			if (sf_id != ROGUE_FW_SF_FIRST)
+				break;
+		}
+		trace_seq_data->idx++;
+	}
+}
+
+static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos)
+{
+	struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
+	u32 i;
+
+	/* Reset trace index, then advance to *pos. */
+	fw_trace_get_first(trace_seq_data);
+
+	for (i = 0; i < *pos; i++) {
+		if (!fw_trace_get_next(trace_seq_data))
+			return NULL;
+	}
+
+	return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL;
+}
+
+static void *fw_trace_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+	struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
+
+	(*pos)++;
+	if (!fw_trace_get_next(trace_seq_data))
+		return NULL;
+
+	return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL;
+}
+
+static void fw_trace_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int fw_trace_seq_show(struct seq_file *s, void *v)
+{
+	struct pvr_fw_trace_seq_data *trace_seq_data = s->private;
+	u64 timestamp;
+	u32 id;
+	u32 sf_id;
+	int err;
+
+	if (trace_seq_data->idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	id = read_fw_trace(trace_seq_data, 0);
+	if (!ROGUE_FW_LOG_VALIDID(id)) {
+		/* Index is not pointing at a valid entry. */
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	sf_id = find_sfid(id);
+	if (sf_id == ROGUE_FW_SF_LAST) {
+		/* Index is not pointing at a valid entry. */
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	timestamp = read_fw_trace(trace_seq_data, 1) |
+		((u64)read_fw_trace(trace_seq_data, 2) << 32);
+	timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
+		ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
+
+	seq_printf(s, "[%llu] : ", timestamp);
+	if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) {
+		seq_printf(s, "ASSERTION %s failed at %s:%u",
+			   trace_seq_data->assert_buf.info,
+			   trace_seq_data->assert_buf.path,
+			   trace_seq_data->assert_buf.line_num);
+	} else {
+		seq_printf(s, stid_fmts[sf_id].name,
+			   read_fw_trace(trace_seq_data, 3),
+			   read_fw_trace(trace_seq_data, 4),
+			   read_fw_trace(trace_seq_data, 5),
+			   read_fw_trace(trace_seq_data, 6),
+			   read_fw_trace(trace_seq_data, 7),
+			   read_fw_trace(trace_seq_data, 8),
+			   read_fw_trace(trace_seq_data, 9),
+			   read_fw_trace(trace_seq_data, 10),
+			   read_fw_trace(trace_seq_data, 11),
+			   read_fw_trace(trace_seq_data, 12),
+			   read_fw_trace(trace_seq_data, 13),
+			   read_fw_trace(trace_seq_data, 14),
+			   read_fw_trace(trace_seq_data, 15),
+			   read_fw_trace(trace_seq_data, 16),
+			   read_fw_trace(trace_seq_data, 17),
+			   read_fw_trace(trace_seq_data, 18),
+			   read_fw_trace(trace_seq_data, 19),
+			   read_fw_trace(trace_seq_data, 20),
+			   read_fw_trace(trace_seq_data, 21),
+			   read_fw_trace(trace_seq_data, 22));
+	}
+	seq_puts(s, "\n");
+	return 0;
+
+err_out:
+	return err;
+}
+
+static const struct seq_operations pvr_fw_trace_seq_ops = {
+	.start = fw_trace_seq_start,
+	.next = fw_trace_seq_next,
+	.stop = fw_trace_seq_stop,
+	.show = fw_trace_seq_show
+};
+
+static int fw_trace_open(struct inode *inode, struct file *file)
+{
+	struct pvr_fw_trace_buffer *trace_buffer = inode->i_private;
+	struct rogue_fwif_tracebuf_space *tracebuf_space =
+		trace_buffer->tracebuf_space;
+	struct pvr_fw_trace_seq_data *trace_seq_data;
+	int err;
+
+	trace_seq_data = kzalloc(sizeof(*trace_seq_data), GFP_KERNEL);
+	if (!trace_seq_data)
+		goto err_out;
+
+	trace_seq_data->buffer = kcalloc(ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS,
+					 sizeof(*trace_seq_data->buffer), GFP_KERNEL);
+	if (!trace_seq_data->buffer)
+		goto err_free_data;
+
+	/*
+	 * Take a local copy of the trace buffer, as firmware may still be
+	 * writing to it. This will exist as long as this file is open.
+	 */
+	memcpy(trace_seq_data->buffer, trace_buffer->buf,
+	       ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS * sizeof(u32));
+	trace_seq_data->start_offset = READ_ONCE(tracebuf_space->trace_pointer);
+	trace_seq_data->assert_buf = tracebuf_space->assert_buf;
+	fw_trace_get_first(trace_seq_data);
+
+	err = seq_open(file, &pvr_fw_trace_seq_ops);
+	if (err)
+		goto err_free_buffer;
+
+	((struct seq_file *)file->private_data)->private = trace_seq_data;
+
+	return 0;
+
+err_free_buffer:
+	kfree(trace_seq_data->buffer);
+
+err_free_data:
+	kfree(trace_seq_data);
+
+err_out:
+	return err;
+}
+
+static int fw_trace_release(struct inode *inode, struct file *file)
+{
+	struct pvr_fw_trace_seq_data *trace_seq_data =
+		((struct seq_file *)file->private_data)->private;
+
+	seq_release(inode, file);
+	kfree(trace_seq_data->buffer);
+	kfree(trace_seq_data);
+
+	return 0;
+}
+
+static const struct file_operations pvr_fw_trace_fops = {
+	.owner = THIS_MODULE,
+	.open = fw_trace_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = fw_trace_release,
+};
+
+void
+pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, u32 new_mask)
+{
+	if (old_mask != new_mask)
+		update_logtype(pvr_dev, new_mask);
+}
+
+void
+pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
+{
+	struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace;
+	u32 thread_nr;
+
+	static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10,
+		      "The filename buffer is only large enough for a "
+		      "single-digit thread count");
+
+	for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) {
+		char filename[8];
+
+		snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr);
+		debugfs_create_file(filename, 0400, dir,
+				    &fw_trace->buffers[thread_nr],
+				    &pvr_fw_trace_fops);
+	}
+}
+#endif
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.h b/drivers/gpu/drm/imagination/pvr_fw_trace.h
new file mode 100644
index 000000000000..6f4781b0f84d
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.h
@@ -0,0 +1,78 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_FW_TRACE_H__
+#define __PVR_FW_TRACE_H__
+
+#include <drm/drm_file.h>
+#include <linux/types.h>
+
+#include "pvr_rogue_fwif.h"
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+/* Forward declarations from pvr_rogue_fwif.h */
+struct rogue_fwif_tracebuf;
+struct rogue_fwif_tracebuf_space;
+
+/**
+ * struct pvr_fw_trace_buffer - Structure representing a trace buffer
+ */
+struct pvr_fw_trace_buffer {
+	/** @buf_obj: FW buffer object representing trace buffer. */
+	struct pvr_fw_object *buf_obj;
+
+	/** @buf: Pointer to CPU mapping of trace buffer. */
+	u32 *buf;
+
+	/**
+	 * @tracebuf_space: Pointer to FW tracebuf_space structure for this
+	 *                  trace buffer.
+	 */
+	struct rogue_fwif_tracebuf_space *tracebuf_space;
+};
+
+/**
+ * struct pvr_fw_trace - Device firmware trace data
+ */
+struct pvr_fw_trace {
+	/**
+	 * @tracebuf_ctrl_obj: Object representing FW trace buffer control
+	 *                     structure.
+	 */
+	struct pvr_fw_object *tracebuf_ctrl_obj;
+
+	/**
+	 * @tracebuf_ctrl: Pointer to CPU mapping of FW trace buffer control
+	 *                 structure.
+	 */
+	struct rogue_fwif_tracebuf *tracebuf_ctrl;
+
+	/**
+	 * @buffers: Array representing the actual trace buffers owned by this
+	 *           device.
+	 */
+	struct pvr_fw_trace_buffer buffers[ROGUE_FW_THREAD_MAX];
+
+	/** @group_mask: Mask of enabled trace groups. */
+	u32 group_mask;
+};
+
+int pvr_fw_trace_init(struct pvr_device *pvr_dev);
+void pvr_fw_trace_fini(struct pvr_device *pvr_dev);
+
+#if defined(CONFIG_DEBUG_FS)
+/* Forward declaration from <linux/dcache.h>. */
+struct dentry;
+
+void pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask,
+			      u32 new_mask);
+
+void pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir);
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* __PVR_FW_TRACE_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
new file mode 100644
index 000000000000..7877d4a404c7
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -0,0 +1,1122 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_meta.h"
+#include "pvr_vm.h"
+#include "pvr_vm_mips.h"
+
+#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
+
+#include <linux/compiler.h>
+#include <linux/compiler_attributes.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/iosys-map.h>
+#include <linux/log2.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+
+static vm_fault_t pvr_gem_vm_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct drm_gem_object *gem_obj = vma->vm_private_data;
+	struct pvr_gem_object *pvr_obj = to_pvr_gem_object(gem_obj);
+	loff_t num_pages = gem_obj->size >> PAGE_SHIFT;
+	pgoff_t page_offset;
+	struct page *page;
+	vm_fault_t ret;
+
+	/* We don't use vmf->pgoff since that has the fake offset */
+	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+
+	if (page_offset >= num_pages || WARN_ON_ONCE(!pvr_obj->pages)) {
+		ret = VM_FAULT_SIGBUS;
+	} else {
+		page = pvr_obj->pages[page_offset];
+
+		ret = vmf_insert_page(vma, vmf->address, page);
+	}
+
+	return ret;
+}
+
+static void pvr_gem_vm_open(struct vm_area_struct *vma)
+{
+	struct drm_gem_object *gem_obj = vma->vm_private_data;
+	struct pvr_gem_object *pvr_obj = to_pvr_gem_object(gem_obj);
+	int err;
+
+	WARN_ON(gem_obj->import_attach);
+
+	err = pvr_gem_object_get_pages(pvr_obj);
+	WARN_ON(err);
+
+	drm_gem_vm_open(vma);
+}
+
+static void pvr_gem_vm_close(struct vm_area_struct *vma)
+{
+	struct drm_gem_object *gem_obj = vma->vm_private_data;
+	struct pvr_gem_object *pvr_obj = to_pvr_gem_object(gem_obj);
+
+	pvr_gem_object_put_pages(pvr_obj);
+	drm_gem_vm_close(vma);
+}
+
+static const struct vm_operations_struct pvr_gem_vm_ops = {
+	.fault = pvr_gem_vm_fault,
+	.open = pvr_gem_vm_open,
+	.close = pvr_gem_vm_close,
+};
+
+static void pvr_gem_free_object(struct drm_gem_object *gem_obj)
+{
+	struct pvr_gem_object *pvr_obj = to_pvr_gem_object(gem_obj);
+
+	if (gem_obj->import_attach)
+		drm_prime_gem_destroy(gem_obj, pvr_obj->sgt);
+	drm_gem_object_release(gem_obj);
+	kfree(pvr_obj);
+}
+
+static struct sg_table *pvr_gem_get_sg_table(struct drm_gem_object *gem_obj)
+{
+	struct pvr_gem_object *pvr_obj = to_pvr_gem_object(gem_obj);
+
+	if (gem_obj->import_attach)
+		return ERR_PTR(-EINVAL);
+
+	return drm_prime_pages_to_sg(gem_obj->dev, pvr_obj->pages, gem_obj->size >> PAGE_SHIFT);
+}
+
+static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
+{
+	struct pvr_gem_object *pvr_obj = to_pvr_gem_object(gem_obj);
+	int err;
+
+	if (gem_obj->import_attach) {
+		/* Drop the reference drm_gem_mmap_obj() acquired.*/
+		drm_gem_object_put(gem_obj);
+		vma->vm_private_data = NULL;
+
+		return dma_buf_mmap(gem_obj->dma_buf, vma, 0);
+	}
+
+	if (!(pvr_obj->flags & DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS)) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	err = pvr_gem_object_get_pages(pvr_obj);
+	if (err)
+		goto err_out;
+
+	vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	if (!(pvr_obj->flags & PVR_BO_CPU_CACHED))
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static int pvr_gem_pin(struct drm_gem_object *gem_obj)
+{
+	struct pvr_gem_object *pvr_obj = to_pvr_gem_object(gem_obj);
+
+	WARN_ON(gem_obj->import_attach);
+
+	return pvr_gem_object_get_pages(pvr_obj);
+}
+
+static void pvr_gem_unpin(struct drm_gem_object *gem_obj)
+{
+	struct pvr_gem_object *pvr_obj = to_pvr_gem_object(gem_obj);
+
+	WARN_ON(gem_obj->import_attach);
+
+	pvr_gem_object_put_pages(pvr_obj);
+}
+
+static int pvr_gem_vmap(struct drm_gem_object *gem_obj, struct iosys_map *map)
+{
+	struct pvr_gem_object *pvr_obj = to_pvr_gem_object(gem_obj);
+	void *cpu_ptr;
+
+	cpu_ptr = pvr_gem_object_vmap(pvr_obj, true);
+	if (IS_ERR(cpu_ptr))
+		return PTR_ERR(cpu_ptr);
+
+	iosys_map_set_vaddr(map, cpu_ptr);
+
+	return 0;
+}
+
+static void pvr_gem_vunmap(struct drm_gem_object *gem_obj, struct iosys_map *map)
+{
+	struct pvr_gem_object *pvr_obj = to_pvr_gem_object(gem_obj);
+
+	pvr_gem_object_vunmap(pvr_obj, true);
+}
+
+static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
+	.free = pvr_gem_free_object,
+	.get_sg_table = pvr_gem_get_sg_table,
+	.mmap = pvr_gem_mmap,
+	.pin = pvr_gem_pin,
+	.unpin = pvr_gem_unpin,
+	.vm_ops = &pvr_gem_vm_ops,
+	.vmap = pvr_gem_vmap,
+	.vunmap = pvr_gem_vunmap,
+};
+
+static void pvr_free_fw_object(struct drm_gem_object *gem_obj)
+{
+	struct pvr_gem_object *pvr_obj = to_pvr_gem_object(gem_obj);
+	struct pvr_fw_object *fw_obj = to_pvr_fw_object(pvr_obj);
+
+	WARN_ON(gem_obj->import_attach);
+	drm_gem_object_release(gem_obj);
+	kfree(fw_obj);
+}
+
+/* FW objects may not be mmap'ed or exported. */
+static const struct drm_gem_object_funcs pvr_gem_fw_object_funcs = {
+	.free = pvr_free_fw_object,
+};
+
+/**
+ * pvr_gem_object_flags_validate() - Verify that a collection of PowerVR GEM
+ * mapping and/or creation flags form a valid combination.
+ * @flags: PowerVR GEM mapping/creation flags to validate.
+ *
+ * This function explicitly allows kernel-only flags. All ioctl entrypoints
+ * should do their own validation as well as relying on this function.
+ *
+ * Return:
+ *  * %true if @flags contains valid mapping and/or creation flags, or
+ *  * %false otherwise.
+ */
+static bool
+pvr_gem_object_flags_validate(u64 flags)
+{
+	static const u64 invalid_combinations[] = {
+		/*
+		 * Memory flagged as PM/FW-protected cannot be mapped to
+		 * userspace. To make this explicit, we require that the two
+		 * flags allowing each of these respective features are never
+		 * specified together.
+		 */
+		(DRM_PVR_BO_DEVICE_PM_FW_PROTECT |
+		 DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS),
+	};
+
+	int i;
+
+	/*
+	 * Check for bits set in undefined regions. Reserved regions refer to
+	 * options that can only be set by the kernel. These are explicitly
+	 * allowed in most cases, and must be checked specifically in IOCTL
+	 * callback code.
+	 */
+	if ((flags & PVR_BO_UNDEFINED_MASK) != 0)
+		return false;
+
+	/*
+	 * Check for all combinations of flags marked as invalid in the array
+	 * above.
+	 */
+	for (i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
+		u64 combo = invalid_combinations[i];
+
+		if ((flags & combo) == combo)
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * pvr_gem_object_into_handle() - Convert a reference to an object into a
+ * userspace-accessible handle.
+ * @pvr_obj: [IN] Target PowerVR-specific object.
+ * @pvr_file: [IN] File to associate the handle with.
+ * @handle: [OUT] Pointer to store the created handle in. Remains unmodified if
+ * an error is encountered.
+ *
+ * If an error is encountered, ownership of @pvr_obj will not have been
+ * transferred. If this function succeeds, however, further use of @pvr_obj is
+ * considered undefined behaviour unless another reference to it is explicitly
+ * held.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error encountered while attempting to allocate a handle on @pvr_file.
+ */
+int
+pvr_gem_object_into_handle(struct pvr_gem_object *pvr_obj,
+			   struct pvr_file *pvr_file, u32 *handle)
+{
+	struct drm_gem_object *gem_obj = from_pvr_gem_object(pvr_obj);
+	struct drm_file *file = from_pvr_file(pvr_file);
+
+	u32 new_handle;
+	int err;
+
+	err = drm_gem_handle_create(file, gem_obj, &new_handle);
+	if (err)
+		goto err_out;
+
+	/*
+	 * Release our reference to @pvr_obj, effectively transferring
+	 * ownership to the handle.
+	 */
+	pvr_gem_object_put(pvr_obj);
+
+	/*
+	 * Do not store the new handle in @handle until no more errors can
+	 * occur.
+	 */
+	*handle = new_handle;
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_gem_object_from_handle() - Obtain a reference to an object from a
+ * userspace handle.
+ * @pvr_file: PowerVR-specific file to which @handle is associated.
+ * @handle: Userspace handle referencing the target object.
+ *
+ * On return, @handle always maintains its reference to the requested object
+ * (if it had one in the first place). If this function succeeds, the returned
+ * object will hold an additional reference. When the caller is finished with
+ * the returned object, they should call pvr_gem_object_put() on it to release
+ * this reference.
+ *
+ * Return:
+ *  * A pointer to the requested PowerVR-specific object on success, or
+ *  * %NULL otherwise.
+ */
+struct pvr_gem_object *
+pvr_gem_object_from_handle(struct pvr_file *pvr_file, u32 handle)
+{
+	struct drm_file *file = from_pvr_file(pvr_file);
+	struct drm_gem_object *gem_obj;
+
+	gem_obj = drm_gem_object_lookup(file, handle);
+	if (!gem_obj)
+		return NULL;
+
+	return to_pvr_gem_object(gem_obj);
+}
+
+static int
+pvr_gem_object_get_pages_locked(struct pvr_gem_object *pvr_obj)
+{
+	struct drm_gem_object *obj = from_pvr_gem_object(pvr_obj);
+	struct page **pages;
+	struct sg_table *sgt;
+	int err;
+
+	lockdep_assert_held(&pvr_obj->lock);
+
+	if (obj->import_attach) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	if ((++pvr_obj->pages_ref_count) == 1) {
+		WARN_ON(pvr_obj->pages);
+
+		pages = drm_gem_get_pages(obj);
+		if (IS_ERR(pages)) {
+			err = PTR_ERR(pages);
+			goto err_dec_ref_count;
+		}
+
+		sgt = drm_prime_pages_to_sg(obj->dev, pages, obj->size >> PAGE_SHIFT);
+		if (IS_ERR(sgt)) {
+			err = PTR_ERR(sgt);
+			goto err_put_pages;
+		}
+
+		err = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+		if (err)
+			goto err_free_sgt;
+
+		pvr_obj->pages = pages;
+		pvr_obj->sgt = sgt;
+	} else {
+		WARN_ON(!pvr_obj->pages);
+	}
+
+	return 0;
+
+err_free_sgt:
+	sg_free_table(sgt);
+	kfree(sgt);
+
+err_put_pages:
+	drm_gem_put_pages(obj, pages, false, false);
+
+err_dec_ref_count:
+	pvr_obj->pages_ref_count--;
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_gem_object_get_pages: Get pages associated with a &struct pvr_gem_object
+ * @pvr_obj: Target object
+ *
+ * This will fill out the pages array of the object. This must be called before
+ * the object is mapped to userspace.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error returned by drm_gem_get_pages(), or
+ *  * Any error returned by drm_prime_pages_to_sg(), or
+ *  * Any error returned by dma_map_sgtable().
+ */
+int
+pvr_gem_object_get_pages(struct pvr_gem_object *pvr_obj)
+{
+	int err;
+
+	mutex_lock(&pvr_obj->lock);
+	err = pvr_gem_object_get_pages_locked(pvr_obj);
+	mutex_unlock(&pvr_obj->lock);
+
+	return err;
+}
+
+static void
+pvr_gem_object_put_pages_locked(struct pvr_gem_object *pvr_obj)
+{
+	struct drm_gem_object *gem_obj = from_pvr_gem_object(pvr_obj);
+
+	lockdep_assert_held(&pvr_obj->lock);
+
+	if (gem_obj->import_attach)
+		return;
+
+	if (pvr_obj->pages && (--pvr_obj->pages_ref_count) == 0) {
+		sg_free_table(pvr_obj->sgt);
+		kfree(pvr_obj->sgt);
+		drm_gem_put_pages(gem_obj, pvr_obj->pages, true, true);
+		pvr_obj->sgt = NULL;
+		pvr_obj->pages = NULL;
+	}
+}
+
+/**
+ * pvr_gem_object_put_pages: Release pages associated with a &struct
+ *                           pvr_gem_object
+ * @pvr_obj: Target object
+ */
+void
+pvr_gem_object_put_pages(struct pvr_gem_object *pvr_obj)
+{
+	mutex_lock(&pvr_obj->lock);
+	pvr_gem_object_put_pages_locked(pvr_obj);
+	mutex_unlock(&pvr_obj->lock);
+}
+
+/**
+ * pvr_gem_object_vmap_prot() - Map a PowerVR GEM object into CPU virtual
+ * address space without using information from the object's flags.
+ * @pvr_obj: Target PowerVR GEM object.
+ * @sync_to_cpu: Specifies whether the buffer should be synced to the CPU
+ * immediately after mapping.
+ * @prot: Page protection options for the mapping.
+ *
+ * Once the caller is finished with the CPU mapping, they must call
+ * pvr_gem_object_vunmap() on @pvr_obj.
+ *
+ * Unlike pvr_gem_object_vmap(), this function does NOT use information from
+ * the flags on @pvr_obj to determine page protection options. You probably
+ * want to use pvr_gem_object_vmap() instead. If you really need to use this
+ * function, be absolutely sure that @prot is compatible with the flags on
+ * @pvr_obj. There are no safeguards!
+ *
+ * Return:
+ *  * A pointer to the CPU mapping on success,
+ *  * -%ENOMEM if the mapping fails, or
+ *  * Any error encountered while attempting to acquire a reference to the
+ *    backing pages for @pvr_obj.
+ */
+static void *
+pvr_gem_object_vmap_prot(struct pvr_gem_object *pvr_obj, bool sync_to_cpu,
+			 pgprot_t prot)
+{
+	struct drm_gem_object *gem_obj = from_pvr_gem_object(pvr_obj);
+	/* The size of @pvr_obj is always CPU page-aligned. */
+	size_t nr_pages = pvr_gem_object_size(pvr_obj) >> PAGE_SHIFT;
+
+	int err;
+
+	mutex_lock(&pvr_obj->lock);
+
+	if ((++pvr_obj->vmap_ref_count) == 1) {
+		if (gem_obj->import_attach) {
+			struct iosys_map map;
+
+			err = dma_buf_vmap(gem_obj->import_attach->dmabuf, &map);
+			if (err)
+				goto err_unlock;
+
+			pvr_obj->vmap_cpu_addr = map.vaddr;
+		} else {
+			err = pvr_gem_object_get_pages_locked(pvr_obj);
+			if (err)
+				goto err_unlock;
+
+			pvr_obj->vmap_cpu_addr = vmap(pvr_obj->pages, nr_pages, VM_MAP, prot);
+			if (!pvr_obj->vmap_cpu_addr) {
+				err = -ENOMEM;
+				goto err_put_pages;
+			}
+		}
+	}
+
+	/*
+	 * There's no need for sync operations on the CPU cache if we're not
+	 * using the CPU cache.
+	 */
+	if ((pvr_obj->flags & PVR_BO_CPU_CACHED) && sync_to_cpu) {
+		struct device *dev = gem_obj->dev->dev;
+
+		dma_sync_sgtable_for_cpu(dev, pvr_obj->sgt, DMA_BIDIRECTIONAL);
+	}
+
+	mutex_unlock(&pvr_obj->lock);
+
+	return pvr_obj->vmap_cpu_addr;
+
+err_put_pages:
+	pvr_gem_object_put_pages(pvr_obj);
+
+err_unlock:
+	mutex_unlock(&pvr_obj->lock);
+
+	return ERR_PTR(err);
+}
+
+/**
+ * pvr_gem_object_vmap() - Map a PowerVR GEM object into CPU virtual address
+ * space.
+ * @pvr_obj: Target PowerVR GEM object.
+ * @sync_to_cpu: Specifies whether the buffer should be synced to the CPU
+ * immediately after mapping.
+ *
+ * Once the caller is finished with the CPU mapping, they must call
+ * pvr_gem_object_vunmap() on @pvr_obj.
+ *
+ * If @pvr_obj is not using the CPU cache, @sync_to_cpu is ignored.
+ *
+ * Return:
+ *  * A pointer to the CPU mapping on success,
+ *  * -%ENOMEM if the mapping fails, or
+ *  * Any error encountered while attempting to acquire a reference to the
+ *    backing pages for @pvr_obj.
+ */
+void *
+pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj, bool sync_to_cpu)
+{
+	pgprot_t prot;
+
+	/* Determine parameters from @pvr_obj CPU caching strategy. */
+	if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
+		prot = PAGE_KERNEL;
+	} else {
+		/* The default caching strategy is write-combined. */
+		prot = pgprot_writecombine(PAGE_KERNEL);
+	}
+
+	return pvr_gem_object_vmap_prot(pvr_obj, sync_to_cpu, prot);
+}
+
+/**
+ * pvr_gem_object_vunmap() - Unmap a PowerVR memory object from CPU virtual
+ * address space.
+ * @pvr_obj: Target PowerVR GEM object.
+ * @sync_to_device: Specifies whether the buffer should be synced to the device
+ * immediately before unmapping from the CPU.
+ *
+ * If @pvr_obj is not using the CPU cache, @sync_to_device is ignored.
+ */
+void
+pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj, bool sync_to_device)
+{
+	struct drm_gem_object *gem_obj = from_pvr_gem_object(pvr_obj);
+
+	mutex_lock(&pvr_obj->lock);
+
+	if (WARN_ON(!pvr_obj->vmap_ref_count || !pvr_obj->vmap_cpu_addr)) {
+		mutex_unlock(&pvr_obj->lock);
+		return;
+	}
+
+	/*
+	 * There's no need for sync operations on the CPU cache if we're not
+	 * using the CPU cache.
+	 */
+	if ((pvr_obj->flags & PVR_BO_CPU_CACHED) && sync_to_device) {
+		struct device *dev = gem_obj->dev->dev;
+
+		dma_sync_sgtable_for_device(dev, pvr_obj->sgt,
+					    DMA_BIDIRECTIONAL);
+	}
+
+	if ((--pvr_obj->vmap_ref_count) == 0) {
+		if (gem_obj->import_attach) {
+			struct iosys_map map = IOSYS_MAP_INIT_VADDR(pvr_obj->vmap_cpu_addr);
+
+			dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
+		} else {
+			vunmap(pvr_obj->vmap_cpu_addr);
+
+			pvr_gem_object_put_pages_locked(pvr_obj);
+		}
+
+		pvr_obj->vmap_cpu_addr = NULL;
+	}
+
+	mutex_unlock(&pvr_obj->lock);
+}
+
+/**
+ * pvr_gem_object_zero() - Zeroes the physical memory behind an object.
+ * @pvr_obj: Target PowerVR GEM object.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error encountered while attempting to map @pvr_obj to the CPU (see
+ *    pvr_gem_object_vmap_unchecked()).
+ */
+static int
+pvr_gem_object_zero(struct pvr_gem_object *pvr_obj)
+{
+	void *cpu_ptr;
+	int err;
+
+	/*
+	 * We always map writecombined here so there's no need to flush the
+	 * CPU cache afterwards.
+	 */
+	cpu_ptr = pvr_gem_object_vmap_prot(pvr_obj, false,
+					   pgprot_writecombine(PAGE_KERNEL));
+	if (IS_ERR(cpu_ptr)) {
+		err = PTR_ERR(cpu_ptr);
+		goto err_out;
+	}
+
+	memset(cpu_ptr, 0, pvr_gem_object_size(pvr_obj));
+
+	pvr_gem_object_vunmap(pvr_obj, false);
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_gem_object_init() - Initialises a PowerVR-specific buffer object.
+ * @pvr_dev: Target PowerVR device.
+ * @pvr_obj: PowerVR buffer object to initialise
+ * @size: Size of the object to allocate in bytes. Must be greater than zero.
+ * Any value which is not an exact multiple of the system page size will be
+ * rounded up to satisfy this condition.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ * @funcs: Pointer to &struct drm_gem_object_funcs to assign to this object.
+ * @is_imported: True if buffer object represents an imported buffer.
+ *
+ * The created object may be larger than @size, but can never be smaller. To
+ * get the exact size, call pvr_gem_object_size() on the returned pointer.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%EINVAL if @size is zero or @flags is not valid,
+ *  * -%ENOMEM if sufficient physical memory cannot be allocated,
+ *  * Any other error returned by drm_gem_object_init(), or
+ *  * Any other error returned by drm_gem_create_mmap_offset().
+ */
+static int
+pvr_gem_object_init(struct pvr_device *pvr_dev, struct pvr_gem_object *pvr_obj, size_t size,
+		    u64 flags, const struct drm_gem_object_funcs *funcs, bool is_imported)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct drm_gem_object *gem_obj;
+	int err;
+
+	/* Verify @size and @flags before continuing. */
+	if (size == 0 || !pvr_gem_object_flags_validate(flags)) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	size = PAGE_ALIGN(size);
+
+	/* FIXME: Compute any kernel-only options and apply them to @flags. */
+
+	gem_obj = from_pvr_gem_object(pvr_obj);
+
+	if (is_imported) {
+		drm_gem_private_object_init(drm_dev, gem_obj, size);
+	} else {
+		err = drm_gem_object_init(drm_dev, gem_obj, size);
+		if (err)
+			goto err_out;
+
+		/*
+		 * Our buffers are kept pinned, so allocating them from the MOVABLE zone is a
+		 * really bad idea, and conflicts with CMA. See comments above new_inode() why this
+		 * is required _and_ expected if you're going to pin these pages.
+		 */
+		mapping_set_gfp_mask(gem_obj->filp->f_mapping, GFP_HIGHUSER |
+				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+	}
+
+	err = drm_gem_create_mmap_offset(gem_obj);
+	if (err)
+		goto err_release;
+
+	pvr_obj->pvr_dev = pvr_dev;
+	mutex_init(&pvr_obj->lock);
+
+	/* Safe to cast away the const-qualifier during initialization. */
+	*(u64 *)&pvr_obj->flags = flags;
+
+	gem_obj->funcs = funcs;
+
+	/*
+	 * Do this last because pvr_gem_object_zero() requires a fully
+	 * configured instance of struct pvr_gem_object.
+	 */
+	if (flags & DRM_PVR_BO_CREATE_ZEROED)
+		pvr_gem_object_zero(pvr_obj);
+
+	return 0;
+
+err_release:
+	drm_gem_object_release(gem_obj);
+
+err_out:
+	return err;
+}
+
+static struct pvr_gem_object *
+pvr_gem_object_create_internal(struct pvr_device *pvr_dev, size_t size, u64 flags, bool is_imported)
+{
+	struct pvr_gem_object *pvr_obj;
+	int err;
+
+	/* Allocate a powervr-specific buffer object, which includes a &struct drm_gem_object. */
+	pvr_obj = kzalloc(sizeof(*pvr_obj), GFP_KERNEL);
+	if (!pvr_obj) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	err = pvr_gem_object_init(pvr_dev, pvr_obj, size, flags, &pvr_gem_object_funcs,
+				  is_imported);
+	if (err)
+		goto err_kfree_pvr_obj;
+
+	return pvr_obj;
+
+err_kfree_pvr_obj:
+	kfree(pvr_obj);
+
+err_out:
+	return ERR_PTR(err);
+}
+
+/**
+ * pvr_gem_object_create() - Creates a PowerVR-specific buffer object.
+ * @pvr_dev: Target PowerVR device.
+ * @size: Size of the object to allocate in bytes. Must be greater than zero.
+ * Any value which is not an exact multiple of the system page size will be
+ * rounded up to satisfy this condition.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ *
+ * The created object may be larger than @size, but can never be smaller. To
+ * get the exact size, call pvr_gem_object_size() on the returned pointer.
+ *
+ * Return:
+ *  * The newly-minted PowerVR-specific buffer object on success,
+ *  * -%EINVAL if @size is zero or @flags is not valid,
+ *  * -%ENOMEM if sufficient physical memory cannot be allocated, or
+ *  * Any other error returned by drm_gem_create_mmap_offset().
+ */
+struct pvr_gem_object *
+pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
+{
+	return pvr_gem_object_create_internal(pvr_dev, size, flags, false);
+}
+
+/**
+ * pvr_gem_fw_vmap() - Map a FW object in firmware address space
+ * @pvr_dev: Device pointer.
+ * @fw_obj: FW object to map.
+ * @dev_addr: Desired address in device space, if a specific address is
+ *            required. 0 otherwise.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * -%EINVAL if @fw_obj is already mapped but has no references, or
+ *  * Any error returned by DRM.
+ */
+static int
+pvr_gem_fw_vmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj,
+		u64 dev_addr)
+{
+	struct pvr_gem_object *pvr_obj = from_pvr_fw_object(fw_obj);
+	struct drm_gem_object *gem_obj = from_pvr_gem_object(pvr_obj);
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+	int err;
+
+	err = pvr_gem_object_get_pages(pvr_obj);
+	if (err)
+		goto err_out;
+
+	spin_lock(&fw_dev->fw_mm_lock);
+
+	if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
+		err = -EINVAL;
+		goto err_unlock;
+	}
+
+	if (!dev_addr) {
+		/*
+		 * Allocate from the main heap only (firmware heap minus
+		 * config space).
+		 */
+		err = drm_mm_insert_node_in_range(&fw_dev->fw_mm, &fw_obj->fw_mm_node,
+						  gem_obj->size, 0, 0,
+						  fw_dev->fw_heap_info.gpu_addr,
+						  fw_dev->fw_heap_info.gpu_addr +
+						  fw_dev->fw_heap_info.size, 0);
+		if (err)
+			goto err_unlock;
+	} else {
+		fw_obj->fw_mm_node.start = dev_addr;
+		fw_obj->fw_mm_node.size = gem_obj->size;
+		err = drm_mm_reserve_node(&fw_dev->fw_mm, &fw_obj->fw_mm_node);
+		if (err)
+			goto err_unlock;
+	}
+
+	spin_unlock(&fw_dev->fw_mm_lock);
+
+	/* Map object on GPU. */
+	err = fw_dev->funcs->vm_map(pvr_dev, fw_obj);
+	if (err)
+		goto err_remove_node;
+
+	fw_obj->fw_addr_offset = (u32)(fw_obj->fw_mm_node.start - fw_dev->fw_mm_base);
+
+	return 0;
+
+err_remove_node:
+	spin_lock(&fw_dev->fw_mm_lock);
+	drm_mm_remove_node(&fw_obj->fw_mm_node);
+
+err_unlock:
+	spin_unlock(&fw_dev->fw_mm_lock);
+
+	pvr_gem_object_put_pages(pvr_obj);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_gem_fw_vunmap() - Unmap a previously mapped FW object
+ * @fw_obj: FW object to unmap.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * -%EINVAL if object is not currently mapped.
+ */
+static int
+pvr_gem_fw_vunmap(struct pvr_fw_object *fw_obj)
+{
+	struct pvr_gem_object *pvr_obj = from_pvr_fw_object(fw_obj);
+	struct drm_gem_object *gem_obj = from_pvr_gem_object(pvr_obj);
+	struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev);
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	int err;
+
+	fw_dev->funcs->vm_unmap(pvr_dev, fw_obj);
+
+	spin_lock(&fw_dev->fw_mm_lock);
+
+	if (!drm_mm_node_allocated(&fw_obj->fw_mm_node)) {
+		spin_unlock(&fw_dev->fw_mm_lock);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	drm_mm_remove_node(&fw_obj->fw_mm_node);
+
+	spin_unlock(&fw_dev->fw_mm_lock);
+
+	pvr_gem_object_put_pages(pvr_obj);
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static int
+pvr_gem_create_fw_object_common(struct pvr_device *pvr_dev, size_t size,
+				u64 flags, u64 dev_addr,
+				struct pvr_fw_object **fw_obj_out)
+{
+	struct pvr_fw_object *fw_obj;
+	int err;
+
+	/* %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implicit for FW objects. */
+	flags |= DRM_PVR_BO_DEVICE_PM_FW_PROTECT;
+
+	fw_obj = kzalloc(sizeof(*fw_obj), GFP_KERNEL);
+	if (!fw_obj) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	/*
+	 * All firmware objects use the same mapping flags. See
+	 * %PVR_BO_FW_FLAGS_* for details.
+	 */
+	err = pvr_gem_object_init(pvr_dev, &fw_obj->base, size, flags, &pvr_gem_fw_object_funcs,
+				  false);
+	if (err)
+		goto err_fw_obj_free;
+
+	err = pvr_gem_fw_vmap(pvr_dev, fw_obj, dev_addr);
+	if (err)
+		goto err_release_object;
+
+	*fw_obj_out = fw_obj;
+
+	return 0;
+
+err_release_object:
+	pvr_gem_object_put(&fw_obj->base);
+
+err_fw_obj_free:
+	kfree(fw_obj);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_gem_create_fw_object() - Create a FW object and map to firmware
+ * @pvr_dev: PowerVR device pointer.
+ * @size: Size of object, in bytes.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ * @fw_obj_out: Pointer to location to store created object pointer.
+ *
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
+ * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
+ * set.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_gem_create_fw_object_common().
+ */
+int
+pvr_gem_create_fw_object(struct pvr_device *pvr_dev, size_t size, u64 flags,
+			 struct pvr_fw_object **fw_obj_out)
+{
+	return pvr_gem_create_fw_object_common(pvr_dev, size, flags, 0,
+					       fw_obj_out);
+}
+
+static void *
+pvr_gem_create_and_map_fw_common(struct pvr_device *pvr_dev, size_t size,
+				 u64 flags, u64 dev_addr,
+				 struct pvr_fw_object **fw_obj_out)
+{
+	struct pvr_fw_object *fw_obj;
+	void *cpu_ptr;
+	int err;
+
+	err = pvr_gem_create_fw_object_common(pvr_dev, size, flags, dev_addr,
+					      &fw_obj);
+	if (err)
+		goto err_out;
+
+	cpu_ptr = pvr_fw_object_vmap(fw_obj, true);
+	if (IS_ERR(cpu_ptr)) {
+		err = PTR_ERR(cpu_ptr);
+		goto err_put_object;
+	}
+
+	*fw_obj_out = fw_obj;
+
+	return cpu_ptr;
+
+err_put_object:
+	pvr_fw_object_release(fw_obj);
+
+err_out:
+	return ERR_PTR(err);
+}
+
+/**
+ * pvr_gem_create_and_map_fw_object() - Create a FW object and map to firmware
+ *                                      and CPU
+ * @pvr_dev: PowerVR device pointer.
+ * @size: Size of object, in bytes.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ * @fw_obj_out: Pointer to location to store created object pointer.
+ *
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
+ * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
+ * set.
+ *
+ * Caller is responsible for calling pvr_gem_object_vunmap() to release the CPU
+ * mapping.
+ *
+ * Returns:
+ *  * Pointer to CPU mapping of newly created object, or
+ *  * Any error returned by pvr_gem_create_fw_object(), or
+ *  * Any error returned by pvr_gem_object_vmap().
+ */
+void *
+pvr_gem_create_and_map_fw_object(struct pvr_device *pvr_dev, size_t size,
+				 u64 flags, struct pvr_fw_object **fw_obj_out)
+{
+	return pvr_gem_create_and_map_fw_common(pvr_dev, size, flags, 0,
+						fw_obj_out);
+}
+
+/**
+ * pvr_gem_create_and_map_fw_object_offset() - Create a FW object and map to
+ *                                             firmware at the provided offset
+ *                                             and to the CPU.
+ * @pvr_dev: PowerVR device pointer.
+ * @dev_offset: Base address of desired FW mapping, offset from start of FW heap.
+ * @size: Size of object, in bytes.
+ * @flags: Options which affect both this operation and future mapping
+ * operations performed on the returned object. Must be a combination of
+ * DRM_PVR_BO_* and/or PVR_BO_* flags.
+ * @fw_obj_out: Pointer to location to store created object pointer.
+ *
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently,
+ * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS
+ * set.
+ *
+ * Caller is responsible for calling pvr_gem_object_vunmap() to release the CPU
+ * mapping.
+ *
+ * Returns:
+ *  * Pointer to CPU mapping of newly created object, or
+ *  * Any error returned by pvr_gem_create_fw_object(), or
+ *  * Any error returned by pvr_gem_object_vmap().
+ */
+void *
+pvr_gem_create_and_map_fw_object_offset(struct pvr_device *pvr_dev,
+					u32 dev_offset, size_t size, u64 flags,
+					struct pvr_fw_object **fw_obj_out)
+{
+	u64 dev_addr = pvr_dev->fw_dev.fw_mm_base + dev_offset;
+
+	return pvr_gem_create_and_map_fw_common(pvr_dev, size, flags, dev_addr,
+						fw_obj_out);
+}
+
+/**
+ * pvr_gem_get_dma_addr() - Get DMA address for given offset in object
+ * @pvr_obj: Pointer to object to lookup address in.
+ * @offset: Offset within object to lookup address at.
+ * @dma_addr_out: Pointer to location to store DMA address.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * -%EINVAL if object is not currently backed, or if @offset is out of valid
+ *    range for this object.
+ */
+int
+pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset,
+		     dma_addr_t *dma_addr_out)
+{
+	u32 accumulated_offset = 0;
+	struct scatterlist *sgl;
+	unsigned int sgt_idx;
+
+	if (!pvr_obj->sgt)
+		return -EINVAL;
+
+	for_each_sgtable_dma_sg(pvr_obj->sgt, sgl, sgt_idx) {
+		u32 new_offset = accumulated_offset + sg_dma_len(sgl);
+
+		if (offset >= accumulated_offset && offset < new_offset) {
+			*dma_addr_out = sg_dma_address(sgl) +
+					(offset - accumulated_offset);
+			return 0;
+		}
+
+		accumulated_offset = new_offset;
+	}
+
+	return -EINVAL;
+}
+
+/**
+ * pvr_fw_object_release() - Release FW object and unmap from FW space
+ * @fw_obj: Object to release.
+ */
+void pvr_fw_object_release(struct pvr_fw_object *fw_obj)
+{
+	WARN_ON(pvr_gem_fw_vunmap(fw_obj));
+	pvr_fw_object_put(fw_obj);
+}
+
+struct drm_gem_object *
+__pvr_gem_prime_import_sg_table(struct drm_device *drm_dev,
+				struct dma_buf_attachment *attach,
+				struct sg_table *sgt)
+{
+	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
+	size_t size = attach->dmabuf->size;
+	struct pvr_gem_object *pvr_obj;
+
+	pvr_obj = pvr_gem_object_create_internal(pvr_dev, size, 0, true);
+	if (IS_ERR(pvr_obj))
+		return ERR_CAST(pvr_obj);
+
+	pvr_obj->sgt = sgt;
+
+	return from_pvr_gem_object(pvr_obj);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_gem.h b/drivers/gpu/drm/imagination/pvr_gem.h
new file mode 100644
index 000000000000..0c22e0d0e744
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_gem.h
@@ -0,0 +1,386 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_GEM_H__
+#define __PVR_GEM_H__
+
+#include "pvr_fw.h"
+#include "pvr_rogue_heap_config.h"
+#include "pvr_rogue_meta.h"
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <drm/drm_gem.h>
+#include <drm/drm_mm.h>
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/const.h>
+#include <linux/compiler_attributes.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+/* Forward declaration from "pvr_device.h". */
+struct pvr_device;
+struct pvr_file;
+
+/**
+ * DOC: Flags for DRM_IOCTL_PVR_CREATE_BO (kernel-only)
+ *
+ * Kernel-only values allowed in &pvr_gem_object->flags. The majority of options
+ * for this field are specified in the UAPI header "pvr_drm.h" with a
+ * DRM_PVR_BO_ prefix. To distinguish these internal options (which must exist
+ * in ranges marked as "reserved" in the UAPI header), we drop the DRM prefix.
+ * The public options should be used directly, DRM prefix and all.
+ *
+ * To avoid potentially confusing gaps in the UAPI options, these kernel-only
+ * options are specified "in reverse", starting at bit 63.
+ *
+ * We use "reserved" to refer to bits defined here and not exposed in the UAPI.
+ * Bits not defined anywhere are "undefined".
+ *
+ * Creation options
+ *    These use the prefix PVR_BO_CREATE_.
+ *
+ *    *There are currently no kernel-only flags in this group.*
+ *
+ * Device mapping options
+ *    These use the prefix PVR_BO_DEVICE_.
+ *
+ *    *There are currently no kernel-only flags in this group.*
+ *
+ * CPU mapping options
+ *    These use the prefix PVR_BO_CPU_.
+ *
+ *    :CACHED: By default, all GEM objects are mapped write-combined on the
+ *       CPU. Set this flag to override this behaviour and map the object
+ *       cached.
+ */
+#define PVR_BO_CPU_CACHED BIT_ULL(63)
+
+/* Bits 62..4 are undefined. */
+/* Bits 3..0 are defined in the UAPI. */
+
+/* Other utilities. */
+#define PVR_BO_UNDEFINED_MASK GENMASK_ULL(62, 4)
+#define PVR_BO_RESERVED_MASK (PVR_BO_UNDEFINED_MASK | GENMASK_ULL(63, 63))
+
+/*
+ * All firmware-mapped memory uses (mostly) the same flags. Specifically,
+ * firmware-mapped memory should be:
+ *  * Read/write on the device,
+ *  * Read/write on the CPU, and
+ *  * Write-combined on the CPU.
+ *
+ * The only variation is in caching on the device.
+ */
+#define PVR_BO_FW_FLAGS_DEVICE_CACHED (ULL(0))
+#define PVR_BO_FW_FLAGS_DEVICE_UNCACHED DRM_PVR_BO_DEVICE_BYPASS_CACHE
+
+/**
+ * struct pvr_gem_object - powervr-specific wrapper for &struct drm_gem_object
+ */
+struct pvr_gem_object {
+	/**
+	 * @base: The underlying &struct drm_gem_object.
+	 *
+	 * Do not access this member directly, instead call
+	 * from_pvr_gem_object().
+	 */
+	struct drm_gem_object base;
+
+	/** @pvr_dev: Owning PowerVR device. */
+	struct pvr_device *pvr_dev;
+
+	/**
+	 * @lock: Mutex protecting @pages_ref_count, @fw_mm_ref_count,
+	 *        @vmap_ref_count and @vmap_cpu_addr, and writes to @pages, @sgt
+	 *        and @mm_node.
+	 */
+	struct mutex lock;
+
+	/**
+	 * @pages_ref_count: Reference count for @pages. @lock must be held when
+	 *                   accessing.
+	 */
+	int pages_ref_count;
+
+	/**
+	 * @pages: Array of page structures representing the memory backing
+	 *         this object. @lock must be held when writing.
+	 *         pvr_gem_get_pages() must be called before reading.
+	 */
+	struct page **pages;
+
+	/**
+	 * @sgt: Scatter-gather table representing the memory backing this
+	 *       object. @lock must be held when writing. pvr_gem_get_pages()
+	 *       must be called before reading.
+	 */
+	struct sg_table *sgt;
+
+	/**
+	 * @flags: Options set at creation-time. Some of these options apply to
+	 * the creation operation itself (which are stored here for reference)
+	 * with the remainder used for mapping options to both the device and
+	 * CPU. These are used every time this object is mapped, but may be
+	 * changed after creation.
+	 *
+	 * Must be a combination of DRM_PVR_BO_* and/or PVR_BO_* flags.
+	 *
+	 * .. note::
+	 *
+	 *    This member is declared const to indicate that none of these
+	 *    options may change or be changed throughout the object's
+	 *    lifetime.
+	 */
+	const u64 flags;
+
+	/**
+	 * @vmap_ref_count: Reference count for @vmap_cpu_addr. @lock must be
+	 *                  held when accessing.
+	 */
+	int vmap_ref_count;
+
+	/**
+	 * @vmap_cpu_addr: CPU address of vmap mapping. Will be %NULL if object
+	 *                 is not mapped. @lock must be held when accessing.
+	 */
+	void *vmap_cpu_addr;
+};
+
+/**
+ * struct pvr_fw_object - container for firmware memory allocations
+ */
+struct pvr_fw_object {
+	/** @base: The underlying PVR GEM object backing this allocation. */
+	struct pvr_gem_object base;
+
+	/**
+	 * @fw_mm_node: Node representing mapping in FW address space. @pvr_obj->lock must
+	 *              be held when writing.
+	 */
+	struct drm_mm_node fw_mm_node;
+
+	/**
+	 * @fw_addr_offset: Virtual address offset of firmware mapping. Only
+	 *                  valid if @flags has %PVR_GEM_OBJECT_FLAGS_FW_MAPPED
+	 *                  set.
+	 */
+	u32 fw_addr_offset;
+};
+
+static __always_inline struct drm_gem_object *
+from_pvr_gem_object(struct pvr_gem_object *pvr_obj)
+{
+	return &pvr_obj->base;
+}
+
+static __always_inline struct pvr_gem_object *
+to_pvr_gem_object(struct drm_gem_object *gem_obj)
+{
+	return container_of(gem_obj, struct pvr_gem_object, base);
+}
+
+static __always_inline struct pvr_gem_object *
+from_pvr_fw_object(struct pvr_fw_object *fw_obj)
+{
+	return &fw_obj->base;
+}
+
+static __always_inline struct pvr_fw_object *
+to_pvr_fw_object(struct pvr_gem_object *pvr_obj)
+{
+	return container_of(pvr_obj, struct pvr_fw_object, base);
+}
+
+/* Functions defined in pvr_gem.c */
+
+struct drm_gem_object *
+__pvr_gem_prime_import_sg_table(struct drm_device *drm_dev,
+				struct dma_buf_attachment *attach,
+				struct sg_table *sgt);
+
+struct pvr_gem_object *pvr_gem_object_create(struct pvr_device *pvr_dev,
+					     size_t size, u64 flags);
+
+int pvr_gem_object_into_handle(struct pvr_gem_object *pvr_obj,
+			       struct pvr_file *pvr_file, u32 *handle);
+struct pvr_gem_object *pvr_gem_object_from_handle(struct pvr_file *pvr_file,
+						  u32 handle);
+
+int pvr_gem_object_get_pages(struct pvr_gem_object *pvr_obj);
+void pvr_gem_object_put_pages(struct pvr_gem_object *pvr_obj);
+
+void *pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj, bool sync_to_cpu);
+void pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj, bool sync_to_device);
+
+int pvr_gem_create_fw_object(struct pvr_device *pvr_dev, size_t size, u64 flags,
+			     struct pvr_fw_object **pvr_obj_out);
+
+void *pvr_gem_create_and_map_fw_object(struct pvr_device *pvr_dev, size_t size,
+				       u64 flags,
+				       struct pvr_fw_object **pvr_obj_out);
+
+void *
+pvr_gem_create_and_map_fw_object_offset(struct pvr_device *pvr_dev,
+					u32 dev_offset, size_t size, u64 flags,
+					struct pvr_fw_object **pvr_obj_out);
+
+int pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset,
+			 dma_addr_t *dma_addr_out);
+
+/**
+ * pvr_gem_object_get() - Acquire reference on pvr_gem_object
+ * @pvr_obj: Pointer to object to acquire reference on.
+ */
+static __always_inline void
+pvr_gem_object_get(struct pvr_gem_object *pvr_obj)
+{
+	drm_gem_object_get(from_pvr_gem_object(pvr_obj));
+}
+
+/**
+ * pvr_gem_object_put() - Release reference on pvr_gem_object
+ * @pvr_obj: Pointer to object to release reference on.
+ */
+static __always_inline void
+pvr_gem_object_put(struct pvr_gem_object *pvr_obj)
+{
+	drm_gem_object_put(from_pvr_gem_object(pvr_obj));
+}
+
+static __always_inline size_t
+pvr_gem_object_size(struct pvr_gem_object *pvr_obj)
+{
+	return from_pvr_gem_object(pvr_obj)->size;
+}
+
+/**
+ * pvr_gem_object_is_imported() - Return whether an object is imported
+ * @pvr_obj: Pointer to object to test.
+ *
+ * Returns:
+ *  * %true if object is imported, or
+ *  * %false if object is not imported.
+ */
+static __always_inline bool
+pvr_gem_object_is_imported(struct pvr_gem_object *pvr_obj)
+{
+	return pvr_obj->base.import_attach;
+}
+
+void pvr_fw_object_release(struct pvr_fw_object *fw_obj);
+
+static __always_inline void *
+pvr_fw_object_vmap(struct pvr_fw_object *fw_obj, bool sync_to_cpu)
+{
+	return pvr_gem_object_vmap(from_pvr_fw_object(fw_obj), sync_to_cpu);
+}
+
+static __always_inline void
+pvr_fw_object_vunmap(struct pvr_fw_object *fw_obj, bool sync_to_device)
+{
+	pvr_gem_object_vunmap(from_pvr_fw_object(fw_obj), sync_to_device);
+}
+
+/**
+ * pvr_fw_object_get() - Acquire reference on pvr_fw_object
+ * @fw_obj: Pointer to object to acquire reference on.
+ */
+static __always_inline void
+pvr_fw_object_get(struct pvr_fw_object *fw_obj)
+{
+	pvr_gem_object_get(from_pvr_fw_object(fw_obj));
+}
+
+/**
+ * pvr_fw_object_put() - Release reference on pvr_fw_object
+ * @fw_obj: Pointer to object to release reference on.
+ *
+ * Note: This function must _not_ be used to release the reference obtained at
+ * creation time via pvr_gem_create_fw_object(). Use pvr_fw_object_release()
+ * instead.
+ */
+static __always_inline void
+pvr_fw_object_put(struct pvr_fw_object *fw_obj)
+{
+	pvr_gem_object_put(from_pvr_fw_object(fw_obj));
+}
+
+/**
+ * pvr_fw_get_dma_addr() - Get DMA address for given offset in firmware object
+ * @fw_obj: Pointer to object to lookup address in.
+ * @offset: Offset within object to lookup address at.
+ * @dma_addr_out: Pointer to location to store DMA address.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * -%EINVAL if object is not currently backed, or if @offset is out of valid
+ *    range for this object.
+ */
+static __always_inline int
+pvr_fw_get_dma_addr(struct pvr_fw_object *fw_obj, u32 offset, dma_addr_t *dma_addr_out)
+{
+	return pvr_gem_get_dma_addr(from_pvr_fw_object(fw_obj), offset, dma_addr_out);
+}
+
+/**
+ * pvr_gem_get_fw_addr_offset() - Return address of object in firmware address space, with given
+ *                                offset.
+ * @fw_obj: Pointer to object.
+ * @offset: Desired offset from start of object.
+ * @fw_addr_out: Location to store address to.
+ */
+static __always_inline void
+pvr_gem_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset, u32 *fw_addr_out)
+{
+	struct pvr_gem_object *pvr_obj = from_pvr_fw_object(fw_obj);
+	struct pvr_device *pvr_dev = pvr_obj->pvr_dev;
+
+	*fw_addr_out = pvr_dev->fw_dev.funcs->get_fw_addr_with_offset(fw_obj, offset);
+}
+
+/**
+ * pvr_gem_get_fw_addr() - Return address of object in firmware address space
+ * @fw_obj: Pointer to object.
+ * @fw_addr_out: Location to store address to.
+ */
+static __always_inline void
+pvr_gem_get_fw_addr(struct pvr_fw_object *fw_obj, u32 *fw_addr_out)
+{
+	pvr_gem_get_fw_addr_offset(fw_obj, 0, fw_addr_out);
+}
+
+/**
+ * pvr_gem_get_fw_gpu_addr() - Return address of object in GPU address space
+ * @fw_obj: Pointer to object.
+ * @gpu_addr_out: Location to store address to.
+ *
+ * Note that this function is not valid if firmware processor is MIPS.
+ *
+ * Returns :
+ *  * %true on success, or
+ *  * %false if object is not mapped to firmware address space.
+ */
+static __always_inline bool
+pvr_gem_get_fw_gpu_addr(struct pvr_fw_object *fw_obj, u64 *gpu_addr_out)
+{
+	/* FIXME: Move to META-specific file */
+	struct pvr_gem_object *pvr_obj = from_pvr_fw_object(fw_obj);
+	struct pvr_device *pvr_dev = pvr_obj->pvr_dev;
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+
+	if (fw_dev->processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) {
+		*gpu_addr_out = fw_obj->fw_addr_offset + fw_dev->fw_heap_info.gpu_addr;
+		return true;
+	}
+
+	return false;
+}
+
+#endif /* __PVR_GEM_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.c b/drivers/gpu/drm/imagination/pvr_hwrt.c
new file mode 100644
index 000000000000..a8bbf6d4b563
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.c
@@ -0,0 +1,551 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_free_list.h"
+#include "pvr_hwrt.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_cr_defs_client.h"
+#include "pvr_rogue_fwif.h"
+
+#include <drm/drm_gem.h>
+#include <linux/bitops.h>
+#include <linux/math.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+static_assert(ROGUE_FWIF_NUM_RTDATAS == 2);
+static_assert(ROGUE_FWIF_NUM_GEOMDATAS == 1);
+static_assert(ROGUE_FWIF_NUM_RTDATA_FREELISTS == 2);
+
+/**
+ * struct pvr_rt_mtile_info - Render target macrotile information
+ */
+struct pvr_rt_mtile_info {
+	u32 mtile_x[3];
+	u32 mtile_y[3];
+	u32 tile_max_x;
+	u32 tile_max_y;
+	u32 tile_size_x;
+	u32 tile_size_y;
+	u32 num_tiles_x;
+	u32 num_tiles_y;
+};
+
+/* Size of Shadow Render Target Cache entry */
+#define SRTC_ENTRY_SIZE sizeof(u32)
+/* Size of Renders Accumulation Array entry */
+#define RAA_ENTRY_SIZE sizeof(u32)
+
+static int
+hwrt_init_kernel_structure(struct pvr_file *pvr_file,
+			   struct drm_pvr_ioctl_create_hwrt_dataset_args *args,
+			   struct pvr_hwrt_dataset *hwrt)
+{
+	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+	int err;
+	int i;
+
+	hwrt->pvr_dev = pvr_dev;
+
+	/* Get pointers to the free lists */
+	for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+		hwrt->free_lists[i] = pvr_free_list_lookup(pvr_file,  args->free_list_handles[i]);
+		if (!hwrt->free_lists[i]) {
+			err = -EINVAL;
+			goto err_put_free_lists;
+		}
+	}
+
+	if (hwrt->free_lists[ROGUE_FW_LOCAL_FREELIST]->current_pages <
+	    pvr_get_free_list_min_pages(pvr_dev)) {
+		err = -EINVAL;
+		goto err_put_free_lists;
+	}
+
+	return 0;
+
+err_put_free_lists:
+	for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+		pvr_free_list_put(hwrt->free_lists[i]);
+		hwrt->free_lists[i] = NULL;
+	}
+
+	return err;
+}
+
+static void
+hwrt_fini_kernel_structure(struct pvr_hwrt_dataset *hwrt)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) {
+		pvr_free_list_put(hwrt->free_lists[i]);
+		hwrt->free_lists[i] = NULL;
+	}
+}
+
+static void
+hwrt_fini_common_fw_structure(struct pvr_hwrt_dataset *hwrt)
+{
+	pvr_fw_object_release(hwrt->common_fw_obj);
+}
+
+static int
+get_cr_isp_mtile_size_val(struct pvr_device *pvr_dev, u32 samples,
+			  struct pvr_rt_mtile_info *info, u32 *value_out)
+{
+	u32 x = info->mtile_x[0];
+	u32 y = info->mtile_y[0];
+	u32 samples_per_pixel;
+	int err;
+
+	err = PVR_FEATURE_VALUE(pvr_dev, isp_samples_per_pixel, &samples_per_pixel);
+	if (err)
+		goto err_out;
+
+	if (samples_per_pixel == 1) {
+		if (samples >= 4)
+			x <<= 1;
+		if (samples >= 2)
+			y <<= 1;
+	} else if (samples_per_pixel == 2) {
+		if (samples >= 8)
+			x <<= 1;
+		if (samples >= 4)
+			y <<= 1;
+	} else if (samples_per_pixel == 4) {
+		if (samples >= 8)
+			y <<= 1;
+	} else {
+		WARN(true, "Unsupported ISP samples per pixel value");
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	*value_out = ((x << ROGUE_CR_ISP_MTILE_SIZE_X_SHIFT) & ~ROGUE_CR_ISP_MTILE_SIZE_X_CLRMSK) |
+		     ((y << ROGUE_CR_ISP_MTILE_SIZE_Y_SHIFT) & ~ROGUE_CR_ISP_MTILE_SIZE_Y_CLRMSK);
+
+err_out:
+	return err;
+}
+
+static int
+get_cr_multisamplectl_val(u32 samples, bool y_flip, u64 *value_out)
+{
+	static const struct {
+		u8 x[8];
+		u8 y[8];
+	} sample_positions[4] = {
+		/* 1 sample */
+		{
+			.x = { 8 },
+			.y = { 8 },
+		},
+		/* 2 samples */
+		{
+			.x = { 12, 4 },
+			.y = { 12, 4 },
+		},
+		/* 4 samples */
+		{
+			.x = { 6, 14, 2, 10 },
+			.y = { 2, 6, 10, 14 },
+		},
+		/* 8 samples */
+		{
+			.x = { 9, 7, 13, 5, 3, 1, 11, 15 },
+			.y = { 5, 11, 9, 3, 13, 7, 15, 1 },
+		},
+	};
+	int idx = fls(samples) - 1;
+	u64 value = 0;
+	u32 i;
+
+	if (idx < 0 || idx > 3)
+		return -EINVAL;
+
+	for (i = 0; i < 8; i++) {
+		value |= sample_positions[idx].x[i] << (i * 8);
+		if (y_flip)
+			value = ((16 - sample_positions[idx].y[i]) & 0xf) << (i * 8 + 4);
+		else
+			value = (sample_positions[idx].y[i]) << (i * 8 + 4);
+	}
+
+	*value_out = value;
+
+	return 0;
+}
+
+static int
+get_cr_te_aa_val(struct pvr_device *pvr_dev, u32 samples, u32 *value_out)
+{
+	u32 samples_per_pixel;
+	u32 value = 0;
+	int err = 0;
+
+	err = PVR_FEATURE_VALUE(pvr_dev, isp_samples_per_pixel, &samples_per_pixel);
+	if (err)
+		goto err_out;
+
+	switch (samples_per_pixel) {
+	case 1:
+		if (samples >= 2)
+			value |= ROGUE_CR_TE_AA_Y_EN;
+		if (samples >= 4)
+			value |= ROGUE_CR_TE_AA_X_EN;
+		break;
+	case 2:
+		if (samples >= 2)
+			value |= ROGUE_CR_TE_AA_X2_EN;
+		if (samples >= 4)
+			value |= ROGUE_CR_TE_AA_Y_EN;
+		if (samples >= 8)
+			value |= ROGUE_CR_TE_AA_X_EN;
+		break;
+	case 4:
+		if (samples >= 2)
+			value |= ROGUE_CR_TE_AA_X2_EN;
+		if (samples >= 4)
+			value |= ROGUE_CR_TE_AA_Y2_EN;
+		if (samples >= 8)
+			value |= ROGUE_CR_TE_AA_Y_EN;
+		break;
+	default:
+		WARN(true, "Unsupported ISP samples per pixel value");
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	*value_out = value;
+
+err_out:
+	return err;
+}
+
+static int
+hwrt_init_common_fw_structure(struct pvr_file *pvr_file,
+			      struct drm_pvr_ioctl_create_hwrt_dataset_args *args,
+			      struct pvr_hwrt_dataset *hwrt)
+{
+	struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args;
+	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+	struct rogue_fwif_hwrtdata_common *hwrt_data_common_fw;
+	struct pvr_rt_mtile_info info;
+	int err;
+
+	PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &info.tile_size_x);
+	PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &info.tile_size_y);
+
+	info.num_tiles_x = DIV_ROUND_UP(args->width, info.tile_size_x);
+	info.num_tiles_y = DIV_ROUND_UP(args->height, info.tile_size_y);
+
+	if (PVR_HAS_FEATURE(pvr_dev, simple_internal_parameter_format)) {
+		u32 parameter_format;
+
+		PVR_FEATURE_VALUE(pvr_dev, simple_internal_parameter_format, &parameter_format);
+		WARN_ON(parameter_format != 2);
+
+		/*
+		 * Set up 16 macrotiles with a multiple of 2x2 tiles per macrotile, which is
+		 * aligned to a tile group.
+		 */
+		info.mtile_x[0] = DIV_ROUND_UP(info.num_tiles_x, 8) * 2;
+		info.mtile_y[0] = DIV_ROUND_UP(info.num_tiles_y, 8) * 2;
+		info.mtile_x[1] = 0;
+		info.mtile_y[1] = 0;
+		info.mtile_x[2] = 0;
+		info.mtile_y[2] = 0;
+		info.tile_max_x = round_up(info.num_tiles_x, 2) - 1;
+		info.tile_max_y = round_up(info.num_tiles_y, 2) - 1;
+	} else {
+		/* Set up 16 macrotiles with a multiple of 4x4 tiles per macrotile. */
+		info.mtile_x[0] = round_up(DIV_ROUND_UP(info.num_tiles_x, 4), 4);
+		info.mtile_y[0] = round_up(DIV_ROUND_UP(info.num_tiles_y, 4), 4);
+		info.mtile_x[1] = info.mtile_x[0] * 2;
+		info.mtile_y[1] = info.mtile_y[0] * 2;
+		info.mtile_x[2] = info.mtile_x[0] * 3;
+		info.mtile_y[2] = info.mtile_y[0] * 3;
+		info.tile_max_x = info.num_tiles_x - 1;
+		info.tile_max_y = info.num_tiles_y - 1;
+	}
+
+	/*
+	 * Create and map the FW structure so we can initialise it. This is not
+	 * accessed on the CPU side post-initialisation so the mapping lifetime
+	 * is only for this function.
+	 */
+	hwrt_data_common_fw =
+		pvr_gem_create_and_map_fw_object(pvr_dev,
+						 sizeof(*hwrt_data_common_fw),
+						 PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+						 DRM_PVR_BO_CREATE_ZEROED, &hwrt->common_fw_obj);
+	if (IS_ERR(hwrt_data_common_fw)) {
+		err = PTR_ERR(hwrt_data_common_fw);
+		goto err_out;
+	}
+
+	hwrt_data_common_fw->geom_caches_need_zeroing = false;
+
+	hwrt_data_common_fw->isp_merge_lower_x = args->isp_merge_lower_x;
+	hwrt_data_common_fw->isp_merge_lower_y = args->isp_merge_lower_y;
+	hwrt_data_common_fw->isp_merge_upper_x = args->isp_merge_upper_x;
+	hwrt_data_common_fw->isp_merge_upper_y = args->isp_merge_upper_y;
+	hwrt_data_common_fw->isp_merge_scale_x = args->isp_merge_scale_x;
+	hwrt_data_common_fw->isp_merge_scale_y = args->isp_merge_scale_y;
+
+	err = get_cr_multisamplectl_val(args->samples, false,
+					&hwrt_data_common_fw->multi_sample_ctl);
+	if (err)
+		goto err_put_fw_obj;
+
+	err = get_cr_multisamplectl_val(args->samples, true,
+					&hwrt_data_common_fw->flipped_multi_sample_ctl);
+	if (err)
+		goto err_put_fw_obj;
+
+	hwrt_data_common_fw->mtile_stride = info.mtile_x[0] * info.mtile_y[0];
+
+	err = get_cr_te_aa_val(pvr_dev, args->samples, &hwrt_data_common_fw->teaa);
+	if (err)
+		goto err_put_fw_obj;
+
+	hwrt_data_common_fw->screen_pixel_max =
+		(((args->width - 1) << ROGUE_CR_PPP_SCREEN_PIXXMAX_SHIFT) &
+		 ~ROGUE_CR_PPP_SCREEN_PIXXMAX_CLRMSK) |
+		(((args->height - 1) << ROGUE_CR_PPP_SCREEN_PIXYMAX_SHIFT) &
+		 ~ROGUE_CR_PPP_SCREEN_PIXYMAX_CLRMSK);
+
+	hwrt_data_common_fw->te_screen =
+		((info.tile_max_x << ROGUE_CR_TE_SCREEN_XMAX_SHIFT) &
+		 ~ROGUE_CR_TE_SCREEN_XMAX_CLRMSK) |
+		((info.tile_max_y << ROGUE_CR_TE_SCREEN_YMAX_SHIFT) &
+		 ~ROGUE_CR_TE_SCREEN_YMAX_CLRMSK);
+	hwrt_data_common_fw->te_mtile1 =
+		((info.mtile_x[0] << ROGUE_CR_TE_MTILE1_X1_SHIFT) & ~ROGUE_CR_TE_MTILE1_X1_CLRMSK) |
+		((info.mtile_x[1] << ROGUE_CR_TE_MTILE1_X2_SHIFT) & ~ROGUE_CR_TE_MTILE1_X2_CLRMSK) |
+		((info.mtile_x[2] << ROGUE_CR_TE_MTILE1_X3_SHIFT) & ~ROGUE_CR_TE_MTILE1_X3_CLRMSK);
+	hwrt_data_common_fw->te_mtile2 =
+		((info.mtile_y[0] << ROGUE_CR_TE_MTILE2_Y1_SHIFT) & ~ROGUE_CR_TE_MTILE2_Y1_CLRMSK) |
+		((info.mtile_y[1] << ROGUE_CR_TE_MTILE2_Y2_SHIFT) & ~ROGUE_CR_TE_MTILE2_Y2_CLRMSK) |
+		((info.mtile_y[2] << ROGUE_CR_TE_MTILE2_Y3_SHIFT) & ~ROGUE_CR_TE_MTILE2_Y3_CLRMSK);
+
+	err = get_cr_isp_mtile_size_val(pvr_dev, args->samples, &info,
+					&hwrt_data_common_fw->isp_mtile_size);
+	if (err)
+		goto err_put_fw_obj;
+
+	hwrt_data_common_fw->tpc_stride = geom_data_args->tpc_stride;
+	hwrt_data_common_fw->tpc_size = geom_data_args->tpc_size;
+
+	hwrt_data_common_fw->rgn_header_size = args->region_header_size;
+
+	pvr_fw_object_vunmap(hwrt->common_fw_obj, false);
+
+	return 0;
+
+err_put_fw_obj:
+	pvr_fw_object_vunmap(hwrt->common_fw_obj, false);
+
+err_out:
+	return err;
+}
+
+static int
+hwrt_data_init_fw_structure(struct pvr_file *pvr_file,
+			    struct pvr_hwrt_dataset *hwrt,
+			    struct drm_pvr_ioctl_create_hwrt_dataset_args *args,
+			    struct drm_pvr_create_hwrt_rt_data_args *rt_data_args,
+			    struct pvr_hwrt_data *hwrt_data)
+{
+	struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args;
+	struct pvr_device *pvr_dev = pvr_file->pvr_dev;
+	struct rogue_fwif_rta_ctl *rta_ctl;
+	int free_list_i;
+	int err;
+
+	hwrt_data->fw_data = pvr_gem_create_and_map_fw_object(pvr_dev, sizeof(*hwrt_data->fw_data),
+							      PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+							      DRM_PVR_BO_CREATE_ZEROED,
+							      &hwrt_data->fw_obj);
+	if (IS_ERR(hwrt_data->fw_data)) {
+		err = PTR_ERR(hwrt_data->fw_data);
+		goto err_out;
+	}
+
+	pvr_gem_get_fw_addr(hwrt->common_fw_obj, &hwrt_data->fw_data->hwrt_data_common_fw_addr);
+
+	for (free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) {
+		pvr_gem_get_fw_addr(hwrt->free_lists[free_list_i]->fw_obj,
+				    &hwrt_data->fw_data->freelists_fw_addr[free_list_i]);
+	}
+
+	hwrt_data->fw_data->tail_ptrs_dev_addr = geom_data_args->tpc_dev_addr;
+	hwrt_data->fw_data->vheap_table_dev_addr = geom_data_args->vheap_table_dev_addr;
+	hwrt_data->fw_data->rtc_dev_addr = geom_data_args->rtc_dev_addr;
+
+	hwrt_data->fw_data->pm_mlist_dev_addr = rt_data_args->pm_mlist_dev_addr;
+	hwrt_data->fw_data->macrotile_array_dev_addr = rt_data_args->macrotile_array_dev_addr;
+	hwrt_data->fw_data->rgn_header_dev_addr = rt_data_args->region_header_dev_addr;
+
+	rta_ctl = &hwrt_data->fw_data->rta_ctl;
+
+	rta_ctl->render_target_index = 0;
+	rta_ctl->active_render_targets = 0;
+	rta_ctl->valid_render_targets_fw_addr = 0;
+	rta_ctl->rta_num_partial_renders_fw_addr = 0;
+	rta_ctl->max_rts = args->layers;
+
+	if (args->layers > 1) {
+		err = pvr_gem_create_fw_object(pvr_dev, args->layers * SRTC_ENTRY_SIZE,
+					       PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+					       DRM_PVR_BO_CREATE_ZEROED,
+					       &hwrt_data->srtc_obj);
+		if (err)
+			goto err_put_fw_obj;
+		pvr_gem_get_fw_addr(hwrt_data->srtc_obj, &rta_ctl->valid_render_targets_fw_addr);
+
+		err = pvr_gem_create_fw_object(pvr_dev, args->layers * RAA_ENTRY_SIZE,
+					       PVR_BO_FW_FLAGS_DEVICE_UNCACHED |
+					       DRM_PVR_BO_CREATE_ZEROED,
+					       &hwrt_data->raa_obj);
+		if (err)
+			goto err_put_shadow_rt_cache;
+		pvr_gem_get_fw_addr(hwrt_data->raa_obj, &rta_ctl->rta_num_partial_renders_fw_addr);
+	}
+
+	pvr_free_list_add_hwrt(hwrt->free_lists[0], hwrt_data);
+
+	return 0;
+
+err_put_shadow_rt_cache:
+	pvr_fw_object_release(hwrt_data->srtc_obj);
+
+err_put_fw_obj:
+	pvr_fw_object_vunmap(hwrt_data->fw_obj, false);
+	pvr_fw_object_release(hwrt_data->fw_obj);
+
+err_out:
+	return err;
+}
+
+static void
+hwrt_data_fini_fw_structure(struct pvr_hwrt_dataset *hwrt, int hwrt_nr)
+{
+	struct pvr_hwrt_data *hwrt_data = &hwrt->data[hwrt_nr];
+
+	pvr_free_list_remove_hwrt(hwrt->free_lists[0], hwrt_data);
+
+	if (hwrt->max_rts > 1) {
+		pvr_fw_object_release(hwrt_data->raa_obj);
+		pvr_fw_object_release(hwrt_data->srtc_obj);
+	}
+
+	pvr_fw_object_vunmap(hwrt_data->fw_obj, false);
+	pvr_fw_object_release(hwrt_data->fw_obj);
+}
+
+/**
+ * pvr_hwrt_dataset_create() - Create a new HWRT dataset
+ * @pvr_file: Pointer to pvr_file structure.
+ * @args: Creation arguments from userspace.
+ *
+ * Return:
+ *  * Pointer to new HWRT, or
+ *  * ERR_PTR(-%ENOMEM) on out of memory.
+ */
+struct pvr_hwrt_dataset *
+pvr_hwrt_dataset_create(struct pvr_file *pvr_file,
+			struct drm_pvr_ioctl_create_hwrt_dataset_args *args)
+{
+	struct pvr_hwrt_dataset *hwrt;
+	int err;
+
+	/* Create and fill out the kernel structure */
+	hwrt = kzalloc(sizeof(*hwrt), GFP_KERNEL);
+
+	if (!hwrt)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&hwrt->ref_count);
+
+	err = hwrt_init_kernel_structure(pvr_file, args, hwrt);
+	if (err < 0)
+		goto err_free;
+
+	err = hwrt_init_common_fw_structure(pvr_file, args, hwrt);
+	if (err < 0)
+		goto err_free;
+
+	for (int i = 0; i < ARRAY_SIZE(hwrt->data); i++) {
+		err = hwrt_data_init_fw_structure(pvr_file, hwrt, args,
+						  &args->rt_data_args[i],
+						  &hwrt->data[i]);
+		if (err < 0) {
+			i--;
+			/* Destroy already created structures. */
+			for (; i >= 0; i--)
+				hwrt_data_fini_fw_structure(hwrt, i);
+			goto err_free;
+		}
+
+		hwrt->data[i].hwrt_dataset = hwrt;
+	}
+
+	return hwrt;
+
+err_free:
+	pvr_hwrt_dataset_put(hwrt);
+
+	return ERR_PTR(err);
+}
+
+static void
+pvr_hwrt_dataset_release(struct kref *ref_count)
+{
+	struct pvr_hwrt_dataset *hwrt =
+		container_of(ref_count, struct pvr_hwrt_dataset, ref_count);
+
+	for (int i = ARRAY_SIZE(hwrt->data) - 1; i >= 0; i--) {
+		WARN_ON(pvr_fw_structure_cleanup(hwrt->pvr_dev, ROGUE_FWIF_CLEANUP_HWRTDATA,
+						 hwrt->data[i].fw_obj, 0));
+		hwrt_data_fini_fw_structure(hwrt, i);
+	}
+
+	hwrt_fini_common_fw_structure(hwrt);
+	hwrt_fini_kernel_structure(hwrt);
+
+	kfree(hwrt);
+}
+
+/**
+ * pvr_destroy_hwrt_datasets_for_file: Destroy any HWRT datasets associated
+ * with the given file.
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all HWRT datasets associated with @pvr_file from the device
+ * hwrt_dataset list and drops initial references. HWRT datasets will then be
+ * destroyed once all outstanding references are dropped.
+ */
+void pvr_destroy_hwrt_datasets_for_file(struct pvr_file *pvr_file)
+{
+	struct pvr_hwrt_dataset *hwrt;
+	unsigned long handle;
+
+	xa_for_each(&pvr_file->hwrt_handles, handle, hwrt) {
+		(void)hwrt;
+		pvr_hwrt_dataset_put(xa_erase(&pvr_file->hwrt_handles, handle));
+	}
+}
+
+/**
+ * pvr_hwrt_dataset_put() - Release reference on HWRT dataset
+ * @hwrt: Pointer to HWRT dataset to release reference on
+ */
+void
+pvr_hwrt_dataset_put(struct pvr_hwrt_dataset *hwrt)
+{
+	if (hwrt)
+		kref_put(&hwrt->ref_count, pvr_hwrt_dataset_release);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.h b/drivers/gpu/drm/imagination/pvr_hwrt.h
new file mode 100644
index 000000000000..83935c581c47
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.h
@@ -0,0 +1,163 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_HWRT_H__
+#define __PVR_HWRT_H__
+
+#include <linux/compiler_attributes.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+#include "pvr_device.h"
+#include "pvr_rogue_fwif_shared.h"
+
+/* Forward declaration from pvr_free_list.h. */
+struct pvr_free_list;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+/**
+ * struct pvr_hwrt_data - structure representing HWRT data
+ */
+struct pvr_hwrt_data {
+	/** @fw_obj: FW object representing the FW-side structure. */
+	struct pvr_fw_object *fw_obj;
+
+	/** @fw_data: Pointer to CPU mappings of the FW-side structure. */
+	struct rogue_fwif_hwrtdata *fw_data;
+
+	/** @freelist_node: List node connecting this HWRT to the local freelist. */
+	struct list_head freelist_node;
+
+	/**
+	 * @srtc_obj: FW object representing shadow render target cache.
+	 *
+	 * Only valid if @max_rts > 1.
+	 */
+	struct pvr_fw_object *srtc_obj;
+
+	/**
+	 * @raa_obj: FW object representing renders accumulation array.
+	 *
+	 * Only valid if @max_rts > 1.
+	 */
+	struct pvr_fw_object *raa_obj;
+
+	/** @hwrt_dataset: Back pointer to owning HWRT dataset. */
+	struct pvr_hwrt_dataset *hwrt_dataset;
+};
+
+/**
+ * struct pvr_hwrt_dataset - structure representing a HWRT data set.
+ */
+struct pvr_hwrt_dataset {
+	/** @ref_count: Reference count of object. */
+	struct kref ref_count;
+
+	/** @pvr_dev: Pointer to device that owns this object. */
+	struct pvr_device *pvr_dev;
+
+	/** @common_fw_obj: FW object representing common FW-side structure. */
+	struct pvr_fw_object *common_fw_obj;
+
+	/** @data: HWRT data structures belonging to this set. */
+	struct pvr_hwrt_data data[ROGUE_FWIF_NUM_RTDATAS];
+
+	/** @free_lists: Free lists used by HWRT data set. */
+	struct pvr_free_list *free_lists[ROGUE_FWIF_NUM_RTDATA_FREELISTS];
+
+	/** @max_rts: Maximum render targets for this HWRT data set. */
+	u16 max_rts;
+};
+
+struct pvr_hwrt_dataset *
+pvr_hwrt_dataset_create(struct pvr_file *pvr_file,
+			struct drm_pvr_ioctl_create_hwrt_dataset_args *args);
+
+void
+pvr_destroy_hwrt_datasets_for_file(struct pvr_file *pvr_file);
+
+/**
+ * pvr_hwrt_dataset_lookup() - Lookup HWRT dataset pointer from handle
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ *
+ * Takes reference on dataset object. Call pvr_hwrt_dataset_put() to release.
+ *
+ * Returns:
+ *  * The requested object on success, or
+ *  * %NULL on failure (object does not exist in list, or is not a HWRT
+ *    dataset)
+ */
+static __always_inline struct pvr_hwrt_dataset *
+pvr_hwrt_dataset_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+	struct pvr_hwrt_dataset *hwrt;
+
+	xa_lock(&pvr_file->hwrt_handles);
+	hwrt = xa_load(&pvr_file->hwrt_handles, handle);
+
+	if (hwrt)
+		kref_get(&hwrt->ref_count);
+
+	xa_unlock(&pvr_file->hwrt_handles);
+
+	return hwrt;
+}
+
+void
+pvr_hwrt_dataset_put(struct pvr_hwrt_dataset *hwrt);
+
+/**
+ * pvr_hwrt_data_lookup() - Lookup HWRT data pointer from handle and index
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ * @index: Index of RT data within dataset.
+ *
+ * Takes reference on dataset object. Call pvr_hwrt_data_put() to release.
+ *
+ * Returns:
+ *  * The requested object on success, or
+ *  * %NULL on failure (object does not exist in list, or is not a HWRT
+ *    dataset, or index is out of range)
+ */
+static __always_inline struct pvr_hwrt_data *
+pvr_hwrt_data_lookup(struct pvr_file *pvr_file, u32 handle, u32 index)
+{
+	struct pvr_hwrt_dataset *hwrt_dataset = pvr_hwrt_dataset_lookup(pvr_file, handle);
+
+	if (hwrt_dataset) {
+		if (index < ARRAY_SIZE(hwrt_dataset->data))
+			return &hwrt_dataset->data[index];
+
+		pvr_hwrt_dataset_put(hwrt_dataset);
+	}
+
+	return NULL;
+}
+
+/**
+ * pvr_hwrt_data_put() - Release reference on HWRT data
+ * @hwrt: Pointer to HWRT data to release reference on
+ */
+static __always_inline void
+pvr_hwrt_data_put(struct pvr_hwrt_data *hwrt)
+{
+	if (hwrt)
+		pvr_hwrt_dataset_put(hwrt->hwrt_dataset);
+}
+
+static __always_inline struct pvr_hwrt_data *
+pvr_hwrt_data_get(struct pvr_hwrt_data *hwrt)
+{
+	if (hwrt)
+		kref_get(&hwrt->hwrt_dataset->ref_count);
+
+	return hwrt;
+}
+
+#endif /* __PVR_HWRT_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
new file mode 100644
index 000000000000..9ab9d572feef
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -0,0 +1,1096 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_context.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_gem.h"
+#include "pvr_hwrt.h"
+#include "pvr_job.h"
+#include "pvr_rogue_fwif.h"
+#include "pvr_rogue_fwif_client.h"
+#include "pvr_stream.h"
+#include "pvr_stream_defs.h"
+
+#include <drm/drm_gem.h>
+#include <drm/drm_syncobj.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-unwrap.h>
+#include <linux/dma-resv.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/ww_mutex.h>
+#include <linux/xarray.h>
+#include <uapi/drm/pvr_drm.h>
+
+static void release_fences(struct xarray *in_fences)
+{
+	struct dma_fence *fence;
+	unsigned long id;
+
+	xa_for_each(in_fences, id, fence)
+		dma_fence_put(fence);
+
+	xa_destroy(in_fences);
+}
+
+static void pvr_job_release(struct kref *kref)
+{
+	struct pvr_job *job = container_of(kref, struct pvr_job, ref_count);
+
+	xa_erase(&job->pvr_dev->job_ids, job->id);
+
+	pvr_hwrt_data_put(job->hwrt);
+	pvr_context_put(job->ctx);
+
+	if (job->deps.cur) {
+		dma_fence_remove_callback(job->deps.cur, &job->deps.cb);
+		dma_fence_put(job->deps.cur);
+	}
+
+	release_fences(&job->deps.non_native);
+	release_fences(&job->deps.native);
+
+	dma_fence_put(job->done_fence);
+	kfree(job->cmd);
+	kfree(job);
+}
+
+static void pvr_job_dep_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+	struct pvr_job *job = container_of(cb, struct pvr_job, deps.cb);
+	struct pvr_context *ctx = job->ctx;
+
+	pvr_context_pending_job_event(ctx);
+}
+
+bool pvr_job_non_native_deps_done(struct pvr_job *job)
+{
+	/* The current fence is a native fence, meaning all non-native deps are done. */
+	if (job->deps.cur && to_pvr_context_queue_fence(job->deps.cur))
+		return true;
+
+	/* No more non-native deps to wait on. */
+	if (!job->deps.cur && xa_empty(&job->deps.non_native))
+		return true;
+
+	/* Current non-native fence is still unsignaled. */
+	if (job->deps.cur && !dma_fence_is_signaled(job->deps.cur))
+		return false;
+
+	dma_fence_put(job->deps.cur);
+	job->deps.cur = NULL;
+
+	while (!xa_empty(&job->deps.non_native)) {
+		struct dma_fence *next_dep;
+		int ret;
+
+		next_dep = xa_erase(&job->deps.non_native, job->deps.next_index++);
+		ret = dma_fence_add_callback(next_dep, &job->deps.cb, pvr_job_dep_cb);
+		if (!ret) {
+			job->deps.cur = next_dep;
+			break;
+		}
+
+		WARN_ON(ret != -ENOENT);
+		dma_fence_put(next_dep);
+	}
+
+	/* Reset the index so it can be used to iterate over the native array. */
+	if (xa_empty(&job->deps.non_native))
+		job->deps.next_index = 0;
+
+	return !job->deps.cur;
+}
+
+/**
+ * pvr_job_evict_signaled_native_deps() - Evict signaled native deps
+ * @job: Job to operate on.
+ */
+void pvr_job_evict_signaled_native_deps(struct pvr_job *job)
+{
+	struct dma_fence *fence = NULL;
+	unsigned long index;
+
+	if (job->deps.cur && dma_fence_is_signaled(job->deps.cur)) {
+		if (!WARN_ON(!to_pvr_context_queue_fence(job->deps.cur)))
+			job->deps.native_count--;
+
+		dma_fence_put(job->deps.cur);
+		job->deps.cur = NULL;
+	}
+
+	xa_for_each_start(&job->deps.native, index, fence, job->deps.next_index) {
+		if (dma_fence_is_signaled(fence)) {
+			xa_erase(&job->deps.native, index);
+			dma_fence_put(fence);
+			job->deps.native_count--;
+		}
+	}
+}
+
+/**
+ * pvr_job_wait_first_non_signaled_native_dep() - Register a fence callback on the first
+ *						  non-signaled native dep
+ * @job: Job to operate on.
+ *
+ * Returns:
+ *  * 0 on success,
+ *  * or -ENOENT if there's no fence to wait on.
+ */
+int pvr_job_wait_first_non_signaled_native_dep(struct pvr_job *job)
+{
+	struct dma_fence *fence;
+	unsigned long index;
+
+	if (job->deps.cur)
+		return 0;
+
+	xa_for_each_start(&job->deps.native, index, fence, job->deps.next_index) {
+		int err;
+
+		xa_erase(&job->deps.native, index);
+		err = dma_fence_add_callback(fence, &job->deps.cb, pvr_job_dep_cb);
+		if (!err) {
+			job->deps.cur = fence;
+			job->deps.next_index = index + 1;
+			return 0;
+		}
+
+		WARN_ON(err != -ENOENT);
+		job->deps.native_count--;
+		dma_fence_put(fence);
+	}
+
+	return -ENOENT;
+}
+
+struct pvr_cccb *
+get_cccb(struct pvr_job *job)
+{
+	switch (job->type) {
+	case DRM_PVR_JOB_TYPE_GEOMETRY:
+		return &container_of(job->ctx, struct pvr_context_render, base)->ctx_geom.cccb;
+
+	case DRM_PVR_JOB_TYPE_FRAGMENT:
+		return &container_of(job->ctx, struct pvr_context_render, base)->ctx_frag.cccb;
+
+	case DRM_PVR_JOB_TYPE_COMPUTE:
+		return &container_of(job->ctx, struct pvr_context_compute, base)->cccb;
+
+	case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+		return &container_of(job->ctx, struct pvr_context_transfer, base)->cccb;
+
+	default:
+		return NULL;
+	}
+}
+
+struct pvr_context_queue *
+get_ctx_queue(struct pvr_job *job)
+{
+	switch (job->type) {
+	case DRM_PVR_JOB_TYPE_GEOMETRY:
+		return &container_of(job->ctx, struct pvr_context_render, base)->ctx_geom.queue;
+
+	case DRM_PVR_JOB_TYPE_FRAGMENT:
+		return &container_of(job->ctx, struct pvr_context_render, base)->ctx_frag.queue;
+
+	case DRM_PVR_JOB_TYPE_COMPUTE:
+		return &container_of(job->ctx, struct pvr_context_compute, base)->queue;
+
+	case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+		return &container_of(job->ctx, struct pvr_context_transfer, base)->queue;
+
+	default:
+		return NULL;
+	}
+}
+
+static u32 get_ctx_fw_addr(struct pvr_job *job)
+{
+	struct pvr_fw_object *ctx_fw_obj = NULL;
+	u32 ctx_fw_addr;
+
+	switch (job->type) {
+	case DRM_PVR_JOB_TYPE_GEOMETRY:
+	case DRM_PVR_JOB_TYPE_FRAGMENT:
+		ctx_fw_obj = container_of(job->ctx, struct pvr_context_render, base)->fw_obj;
+		break;
+
+	case DRM_PVR_JOB_TYPE_COMPUTE:
+		ctx_fw_obj = container_of(job->ctx, struct pvr_context_compute, base)->fw_obj;
+		break;
+
+	case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+		ctx_fw_obj = container_of(job->ctx, struct pvr_context_transfer, base)->fw_obj;
+		break;
+
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+
+	pvr_gem_get_fw_addr(ctx_fw_obj, &ctx_fw_addr);
+
+	if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT)
+		ctx_fw_addr += offsetof(struct rogue_fwif_fwrendercontext, frag_context);
+
+	return ctx_fw_addr;
+}
+
+/**
+ * pvr_job_fits_in_cccb() - Check if a job fits in CCCB
+ * @job: Job to check.
+ *
+ * Returns:
+ *  * 0 on success,
+ *  * or -E2BIG if the CCCB is too small to ever hold the commands for this job,
+ *  * or -ENOMEM if the CCCB doesn't have enough memory to hold the commands for
+ *    this job at the moment.
+ */
+int pvr_job_fits_in_cccb(struct pvr_job *job)
+{
+	/* One UFO for job done signaling, and one per remaining native fence. */
+	u32 ufo_op_count = 1 + job->deps.native_count;
+
+	u32 size = (pvr_cccb_get_size_of_cmd_with_hdr(sizeof(struct rogue_fwif_ufo)) *
+		    ufo_op_count) +
+		   pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len);
+	struct pvr_cccb *cccb = get_cccb(job);
+
+	return pvr_cccb_check_command_space(cccb, size);
+}
+
+void pvr_job_submit(struct pvr_job *job)
+{
+	struct pvr_context_queue *queue = get_ctx_queue(job);
+	struct pvr_device *pvr_dev = job->pvr_dev;
+	struct pvr_context_queue_fence *qfence;
+	struct pvr_cccb *cccb = get_cccb(job);
+	struct rogue_fwif_ufo queue_ufo;
+	u32 ctx_fw_addr = get_ctx_fw_addr(job);
+	struct dma_fence *fence;
+	unsigned long index;
+	int err;
+
+	if (WARN_ON(!queue || !cccb))
+		return;
+
+	pvr_cccb_lock(cccb);
+
+	spin_lock(&pvr_dev->active_contexts.lock);
+	if (list_empty(&job->ctx->active_node))
+		list_add_tail(&job->ctx->active_node, &pvr_dev->active_contexts.list);
+	spin_lock(&queue->jobs.lock);
+	list_move_tail(&job->node, &queue->jobs.in_flight);
+	spin_unlock(&queue->jobs.lock);
+	spin_unlock(&pvr_dev->active_contexts.lock);
+
+	qfence = to_pvr_context_queue_fence(job->deps.cur);
+	WARN_ON(job->deps.cur && !qfence);
+	if (qfence) {
+		pvr_gem_get_fw_addr(qfence->ctx->timeline_ufo.fw_obj, &queue_ufo.addr);
+		queue_ufo.value = job->deps.cur->seqno;
+		err = pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR,
+							 sizeof(queue_ufo), &queue_ufo, 0, 0);
+		if (WARN_ON(err))
+			goto err_cccb_unlock_rollback;
+	}
+
+	xa_for_each(&job->deps.native, index, fence) {
+		qfence = to_pvr_context_queue_fence(fence);
+		if (WARN_ON(!qfence))
+			continue;
+
+		pvr_gem_get_fw_addr(qfence->ctx->timeline_ufo.fw_obj, &queue_ufo.addr);
+		queue_ufo.value = fence->seqno;
+		err = pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR,
+							 sizeof(queue_ufo), &queue_ufo, 0, 0);
+		if (WARN_ON(err))
+			goto err_cccb_unlock_rollback;
+	}
+
+	/* Submit job to FW */
+	err = pvr_cccb_write_command_with_header(cccb, job->fw_ccb_cmd_type, job->cmd_len, job->cmd,
+						 job->id, job->id);
+	if (WARN_ON(err))
+		goto err_cccb_unlock_rollback;
+
+	pvr_gem_get_fw_addr(queue->fence_ctx->timeline_ufo.fw_obj, &queue_ufo.addr);
+	queue_ufo.value = job->done_fence->seqno;
+	err = pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_UPDATE,
+						 sizeof(queue_ufo), &queue_ufo, 0, 0);
+	if (WARN_ON(err))
+		goto err_cccb_unlock_rollback;
+
+	err = pvr_cccb_unlock_send_kccb_kick(pvr_dev, cccb, ctx_fw_addr, job->hwrt);
+	if (WARN_ON(err))
+		goto err_cccb_unlock_rollback;
+
+	return;
+
+err_cccb_unlock_rollback:
+	spin_lock(&pvr_dev->active_contexts.lock);
+	spin_lock(&queue->jobs.lock);
+	list_move(&job->node, &queue->jobs.pending);
+	spin_unlock(&queue->jobs.lock);
+	if (!pvr_context_has_in_flight_jobs(job->ctx))
+		list_del_init(&job->ctx->active_node);
+	spin_unlock(&pvr_dev->active_contexts.lock);
+	pvr_cccb_unlock_rollback(cccb);
+}
+
+static void pvr_job_push(struct pvr_job *job)
+{
+	struct pvr_context_queue *queue = get_ctx_queue(job);
+
+	pvr_job_get(job);
+
+	spin_lock(&queue->jobs.lock);
+	list_add_tail(&job->node, &queue->jobs.pending);
+	spin_unlock(&queue->jobs.lock);
+
+	pvr_context_pending_job_event(job->ctx);
+}
+
+/**
+ * pvr_job_put() - Release reference on job
+ * @job: Target job.
+ */
+void
+pvr_job_put(struct pvr_job *job)
+{
+	if (job)
+		kref_put(&job->ref_count, pvr_job_release);
+}
+
+static int
+pvr_check_sync_op(const struct drm_pvr_sync_op *sync_op)
+{
+	u8 handle_type;
+
+	if (sync_op->flags & ~DRM_PVR_SYNC_OP_FLAGS_MASK)
+		return -EINVAL;
+
+	handle_type = sync_op->flags & DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK;
+	if (handle_type != DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ &&
+	    handle_type != DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ)
+		return -EINVAL;
+
+	if (handle_type == DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ &&
+	    sync_op->value != 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * struct pvr_sync_signal - Object encoding a syncobj signal operation
+ *
+ * The job submission logic collects all signal operations in an array of
+ * pvr_sync_signal objects. This array also serves as a cache to get the
+ * latest dma_fence when multiple jobs are submitted at once, and one job
+ * signals a syncobj point that's later waited on by a subsequent job.
+ */
+struct pvr_sync_signal {
+	/** @handle: Handle of the syncobj to signal. */
+	u32 handle;
+
+	/** @point: Point to signal in the syncobj.
+	 *
+	 * Only relevant for timeline syncobjs.
+	 */
+	u64 point;
+
+	/** @syncobj: Syncobj retrieved from the handle. */
+	struct drm_syncobj *syncobj;
+
+	/**
+	 * @chain: Chain object used to link the new fence with the
+	 *	   existing timeline syncobj.
+	 *
+	 * Should be zero when manipulating a regular syncobj.
+	 */
+	struct dma_fence_chain *chain;
+
+	/**
+	 * @fence: New fence object to attach to the syncobj.
+	 *
+	 * This pointer starts with the currently fence bound to
+	 * the <handle,point> pair.
+	 */
+	struct dma_fence *fence;
+};
+
+static void
+pvr_sync_signal_free(struct pvr_sync_signal *sig_sync)
+{
+	if (!sig_sync)
+		return;
+
+	drm_syncobj_put(sig_sync->syncobj);
+	dma_fence_chain_free(sig_sync->chain);
+	dma_fence_put(sig_sync->fence);
+	kfree(sig_sync);
+}
+
+static void
+pvr_sync_signal_array_cleanup(struct xarray *array)
+{
+	struct pvr_sync_signal *sig_sync;
+	unsigned long i;
+
+	xa_for_each(array, i, sig_sync)
+		pvr_sync_signal_free(sig_sync);
+
+	xa_destroy(array);
+}
+
+static struct pvr_sync_signal *
+pvr_sync_signal_array_add(struct xarray *array, struct drm_file *file, u32 handle, u64 point)
+{
+	struct pvr_sync_signal *sig_sync;
+	int err;
+	u32 id;
+
+	sig_sync = kzalloc(sizeof(*sig_sync), GFP_KERNEL);
+	if (!sig_sync)
+		return ERR_PTR(-ENOMEM);
+
+	sig_sync->handle = handle;
+	sig_sync->point = point;
+
+	if (point > 0) {
+		sig_sync->chain = dma_fence_chain_alloc();
+		if (!sig_sync->chain) {
+			err = -ENOMEM;
+			goto err_free_sig_sync;
+		}
+	}
+
+	sig_sync->syncobj = drm_syncobj_find(file, handle);
+	if (!sig_sync->syncobj) {
+		err = -EINVAL;
+		goto err_free_sig_sync;
+	}
+
+	/* Retrieve the current fence attached to that point. It's
+	 * perfectly fine to get a NULL fence here, it just means there's
+	 * no fence attached to that point yet.
+	 */
+	drm_syncobj_find_fence(file, handle, point, 0, &sig_sync->fence);
+
+	err = xa_alloc(array, &id, sig_sync, xa_limit_32b, GFP_KERNEL);
+	if (err)
+		goto err_free_sig_sync;
+
+	return sig_sync;
+
+err_free_sig_sync:
+	pvr_sync_signal_free(sig_sync);
+	return ERR_PTR(err);
+}
+
+static struct pvr_sync_signal *
+pvr_sync_signal_array_search(struct xarray *array, u32 handle, u64 point)
+{
+	struct pvr_sync_signal *sig_sync;
+	unsigned long i;
+
+	xa_for_each(array, i, sig_sync) {
+		if (handle == sig_sync->handle && point == sig_sync->point)
+			return sig_sync;
+	}
+
+	return NULL;
+}
+
+static struct pvr_sync_signal *
+pvr_sync_signal_array_get(struct xarray *array, struct drm_file *file, u32 handle, u64 point)
+{
+	struct pvr_sync_signal *sig_sync;
+
+	sig_sync = pvr_sync_signal_array_search(array, handle, point);
+	if (sig_sync)
+		return sig_sync;
+
+	return pvr_sync_signal_array_add(array, file, handle, point);
+}
+
+static int
+pvr_sync_signal_array_collect_ops(struct xarray *array,
+				  struct drm_file *file,
+				  u32 sync_op_count,
+				  struct drm_pvr_sync_op *sync_ops)
+{
+	for (u32 i = 0; i < sync_op_count; i++) {
+		struct pvr_sync_signal *sig_sync;
+		int ret;
+
+		if (!(sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL))
+			continue;
+
+		ret = pvr_check_sync_op(&sync_ops[i]);
+		if (ret)
+			return ret;
+
+		sig_sync = pvr_sync_signal_array_get(array, file,
+						     sync_ops[i].handle,
+						     sync_ops[i].value);
+		if (IS_ERR(sig_sync))
+			return PTR_ERR(sig_sync);
+	}
+
+	return 0;
+}
+
+static int
+pvr_sync_signal_array_update_fences(struct xarray *array,
+				    u32 sync_op_count,
+				    const struct drm_pvr_sync_op *sync_ops,
+				    struct dma_fence *done_fence)
+{
+	for (u32 i = 0; i < sync_op_count; i++) {
+		struct dma_fence *old_fence;
+		struct pvr_sync_signal *sig_sync;
+
+		if (!(sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL))
+			continue;
+
+		sig_sync = pvr_sync_signal_array_search(array, sync_ops[i].handle,
+							sync_ops[i].value);
+		if (WARN_ON(!sig_sync))
+			return -EINVAL;
+
+		old_fence = sig_sync->fence;
+		sig_sync->fence = dma_fence_get(done_fence);
+		dma_fence_put(old_fence);
+
+		if (!sig_sync->fence)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void
+pvr_sync_signal_array_push_fences(struct xarray *array)
+{
+	struct pvr_sync_signal *sig_sync;
+	unsigned long i;
+
+	xa_for_each(array, i, sig_sync) {
+		if (sig_sync->chain) {
+			drm_syncobj_add_point(sig_sync->syncobj, sig_sync->chain,
+					      sig_sync->fence, sig_sync->point);
+			sig_sync->chain = NULL;
+		} else {
+			drm_syncobj_replace_fence(sig_sync->syncobj, sig_sync->fence);
+		}
+	}
+}
+
+/**
+ * fence_array_add() - Adds the fence to an array of fences to be waited on,
+ *                     deduplicating fences from the same context.
+ * @fence_array: array of dma_fence * for the job to block on.
+ * @fence: the dma_fence to add to the list of dependencies.
+ *
+ * This functions consumes the reference for @fence both on success and error
+ * cases.
+ *
+ * Returns:
+ *  * 0 on success, or an error on failing to expand the array.
+ */
+static int
+fence_array_add(struct xarray *fence_array, struct dma_fence *fence)
+{
+	struct dma_fence *entry;
+	unsigned long index;
+	u32 id = 0;
+	int ret;
+
+	if (!fence)
+		return 0;
+
+	/* Deduplicate if we already depend on a fence from the same context.
+	 * This lets the size of the array of deps scale with the number of
+	 * engines involved, rather than the number of BOs.
+	 */
+	xa_for_each(fence_array, index, entry) {
+		if (entry->context != fence->context)
+			continue;
+
+		if (dma_fence_is_later(fence, entry)) {
+			dma_fence_put(entry);
+			xa_store(fence_array, index, fence, GFP_KERNEL);
+		} else {
+			dma_fence_put(fence);
+		}
+		return 0;
+	}
+
+	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
+	if (ret != 0)
+		dma_fence_put(fence);
+
+	return ret;
+}
+
+static int
+pvr_job_add_deps(struct pvr_file *pvr_file, struct pvr_job *job,
+		 u32 sync_op_count, const struct drm_pvr_sync_op *sync_ops,
+		 struct xarray *signal_array)
+{
+	struct dma_fence *fence;
+	unsigned long index;
+	int err = 0;
+
+	if (!sync_op_count)
+		return 0;
+
+	for (u32 i = 0; i < sync_op_count; i++) {
+		struct dma_fence *unwrapped_fence;
+		struct pvr_sync_signal *sig_sync;
+		struct dma_fence_unwrap iter;
+		u32 native_fence_count = 0;
+
+		if (sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL)
+			continue;
+
+		err = pvr_check_sync_op(&sync_ops[i]);
+		if (err)
+			return err;
+
+		sig_sync = pvr_sync_signal_array_search(signal_array, sync_ops[i].handle,
+							sync_ops[i].value);
+		if (sig_sync) {
+			if (WARN_ON(!sig_sync->fence))
+				return -EINVAL;
+
+			fence = dma_fence_get(sig_sync->fence);
+		} else {
+			err = drm_syncobj_find_fence(from_pvr_file(pvr_file), sync_ops[i].handle,
+						     sync_ops[i].value, 0, &fence);
+			if (err)
+				return err;
+		}
+
+		dma_fence_unwrap_for_each(unwrapped_fence, &iter, fence) {
+			if (to_pvr_context_queue_fence(unwrapped_fence))
+				native_fence_count++;
+		}
+
+		if (!native_fence_count) {
+			/* No need to unwrap the fence if it's fully non-native. */
+			err = fence_array_add(&job->deps.non_native, fence);
+			if (err)
+				return err;
+		} else {
+			dma_fence_unwrap_for_each(unwrapped_fence, &iter, fence) {
+				/* There's no dma_fence_unwrap_stop() helper cleaning up the refs
+				 * owned by dma_fence_unwrap(), so let's just iterate over all
+				 * entries without doing anything when something failed.
+				 */
+				if (err)
+					continue;
+
+				dma_fence_get(unwrapped_fence);
+				if (to_pvr_context_queue_fence(unwrapped_fence)) {
+					err = fence_array_add(&job->deps.native, unwrapped_fence);
+				} else {
+					err = fence_array_add(&job->deps.non_native,
+							      unwrapped_fence);
+				}
+				dma_fence_put(unwrapped_fence);
+			}
+
+			if (err)
+				return err;
+		}
+	}
+
+	xa_for_each(&job->deps.native, index, fence)
+		job->deps.native_count++;
+
+	return 0;
+}
+
+static int pvr_fw_cmd_init(struct pvr_device *pvr_dev, struct pvr_job *job,
+			   const struct pvr_stream_cmd_defs *stream_def,
+			   u64 stream_userptr, u32 stream_len)
+{
+	void *stream;
+	int err;
+
+	stream = kzalloc(stream_len, GFP_KERNEL);
+	if (!stream) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	if (copy_from_user(stream, u64_to_user_ptr(stream_userptr), stream_len)) {
+		err = -EFAULT;
+		goto err_free_stream;
+	}
+
+	err = pvr_stream_process(pvr_dev, stream_def, stream, stream_len, job);
+
+err_free_stream:
+	kfree(stream);
+
+err_out:
+	return err;
+}
+
+static u32
+convert_geom_flags(u32 in_flags)
+{
+	u32 out_flags = 0;
+
+	if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST)
+		out_flags |= ROGUE_GEOM_FLAGS_FIRSTKICK;
+	if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST)
+		out_flags |= ROGUE_GEOM_FLAGS_LASTKICK;
+	if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE)
+		out_flags |= ROGUE_GEOM_FLAGS_SINGLE_CORE;
+
+	return out_flags;
+}
+
+static u32
+convert_frag_flags(u32 in_flags)
+{
+	u32 out_flags = 0;
+
+	if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE)
+		out_flags |= ROGUE_FRAG_FLAGS_SINGLE_CORE;
+	if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER)
+		out_flags |= ROGUE_FRAG_FLAGS_DEPTHBUFFER;
+	if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER)
+		out_flags |= ROGUE_FRAG_FLAGS_STENCILBUFFER;
+	if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP)
+		out_flags |= ROGUE_FRAG_FLAGS_PREVENT_CDM_OVERLAP;
+	if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS)
+		out_flags |= ROGUE_FRAG_FLAGS_GET_VIS_RESULTS;
+
+	return out_flags;
+}
+
+static int
+pvr_geom_job_fw_cmd_init(struct pvr_job *job,
+			 struct drm_pvr_job *args)
+{
+	struct rogue_fwif_cmd_geom *cmd;
+	int err;
+
+	if (args->flags & ~DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK)
+		return -EINVAL;
+
+	if (!to_pvr_context_render(job->ctx))
+		return -EINVAL;
+
+	if (!job->hwrt)
+		return -EINVAL;
+
+	job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_GEOM;
+	err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_geom_stream,
+			      args->cmd_stream, args->cmd_stream_len);
+	if (err)
+		return err;
+
+	cmd = job->cmd;
+	cmd->cmd_shared.cmn.frame_num = 0;
+	cmd->flags = convert_geom_flags(args->flags);
+	pvr_gem_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr);
+	return 0;
+}
+
+static int
+pvr_frag_job_fw_cmd_init(struct pvr_job *job,
+			 struct drm_pvr_job *args)
+{
+	struct rogue_fwif_cmd_frag *cmd;
+	int err;
+
+	if (args->flags & ~DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK)
+		return -EINVAL;
+
+	if (!to_pvr_context_render(job->ctx))
+		return -EINVAL;
+
+	if (!job->hwrt)
+		return -EINVAL;
+
+	job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_FRAG;
+	err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_frag_stream,
+			      args->cmd_stream, args->cmd_stream_len);
+	if (err)
+		return err;
+
+	cmd = job->cmd;
+	cmd->cmd_shared.cmn.frame_num = 0;
+	cmd->flags = convert_frag_flags(args->flags);
+	pvr_gem_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr);
+	return 0;
+}
+
+static u32
+convert_compute_flags(u32 in_flags)
+{
+	u32 out_flags = 0;
+
+	if (in_flags & DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP)
+		out_flags |= ROGUE_COMPUTE_FLAG_PREVENT_ALL_OVERLAP;
+	if (in_flags & DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE)
+		out_flags |= ROGUE_COMPUTE_FLAG_SINGLE_CORE;
+
+	return out_flags;
+}
+
+static int
+pvr_compute_job_fw_cmd_init(struct pvr_job *job,
+			    struct drm_pvr_job *args)
+{
+	struct rogue_fwif_cmd_compute *cmd;
+	int err;
+
+	if (args->flags & ~DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK)
+		return -EINVAL;
+
+	if (!to_pvr_context_compute(job->ctx))
+		return -EINVAL;
+
+	job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_CDM;
+	err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_compute_stream,
+			      args->cmd_stream, args->cmd_stream_len);
+	if (err)
+		return err;
+
+	cmd = job->cmd;
+	cmd->common.frame_num = 0;
+	cmd->flags = convert_compute_flags(args->flags);
+	return 0;
+}
+
+static u32
+convert_transfer_flags(u32 in_flags)
+{
+	u32 out_flags = 0;
+
+	if (in_flags & DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE)
+		out_flags |= ROGUE_TRANSFER_FLAGS_SINGLE_CORE;
+
+	return out_flags;
+}
+
+static int
+pvr_transfer_job_fw_cmd_init(struct pvr_job *job,
+			     struct drm_pvr_job *args)
+{
+	struct rogue_fwif_cmd_transfer *cmd;
+	int err;
+
+	if (args->flags & ~DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK)
+		return -EINVAL;
+
+	if (!to_pvr_context_transfer_frag(job->ctx))
+		return -EINVAL;
+
+	job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_TQ_3D;
+	err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_transfer_stream,
+			      args->cmd_stream, args->cmd_stream_len);
+	if (err)
+		return err;
+
+	cmd = job->cmd;
+	cmd->common.frame_num = 0;
+	cmd->flags = convert_transfer_flags(args->flags);
+	return 0;
+}
+
+static int
+pvr_job_fw_cmd_init(struct pvr_job *job,
+		    struct drm_pvr_job *args)
+{
+	switch (args->type) {
+	case DRM_PVR_JOB_TYPE_GEOMETRY:
+		return pvr_geom_job_fw_cmd_init(job, args);
+
+	case DRM_PVR_JOB_TYPE_FRAGMENT:
+		return pvr_frag_job_fw_cmd_init(job, args);
+
+	case DRM_PVR_JOB_TYPE_COMPUTE:
+		return pvr_compute_job_fw_cmd_init(job, args);
+
+	case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
+		return pvr_transfer_job_fw_cmd_init(job, args);
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static struct pvr_job *
+pvr_create_job(struct pvr_device *pvr_dev,
+	       struct pvr_file *pvr_file,
+	       struct drm_pvr_job *args,
+	       struct xarray *signal_array)
+{
+	struct drm_pvr_sync_op *sync_ops = NULL;
+	struct pvr_context_queue *queue;
+	struct pvr_job *job = NULL;
+	int err;
+
+	if (!args->cmd_stream || !args->cmd_stream_len)
+		return ERR_PTR(-EINVAL);
+
+	if (args->type != DRM_PVR_JOB_TYPE_GEOMETRY &&
+	    args->type != DRM_PVR_JOB_TYPE_FRAGMENT &&
+	    (args->hwrt.set_handle || args->hwrt.data_index))
+		return ERR_PTR(-EINVAL);
+
+	job = kzalloc(sizeof(*job), GFP_KERNEL);
+	if (!job)
+		return ERR_PTR(-ENOMEM);
+
+	xa_init_flags(&job->deps.non_native, XA_FLAGS_ALLOC);
+	xa_init_flags(&job->deps.native, XA_FLAGS_ALLOC);
+	kref_init(&job->ref_count);
+	job->pvr_dev = pvr_dev;
+	job->type = args->type;
+
+	err = xa_alloc(&pvr_dev->job_ids, &job->id, job, xa_limit_32b, GFP_KERNEL);
+	if (err)
+		goto err_put_job;
+
+	err = PVR_UOBJ_GET_ARRAY(sync_ops, &args->sync_ops);
+	if (err)
+		goto err_put_job;
+
+	err = pvr_sync_signal_array_collect_ops(signal_array, from_pvr_file(pvr_file),
+						args->sync_ops.count, sync_ops);
+	if (err)
+		goto err_put_job;
+
+	job->ctx = pvr_context_lookup(pvr_file, args->context_handle);
+	if (!job->ctx) {
+		err = -EINVAL;
+		goto err_put_job;
+	}
+
+	if (args->hwrt.set_handle) {
+		job->hwrt = pvr_hwrt_data_lookup(pvr_file, args->hwrt.set_handle,
+						 args->hwrt.data_index);
+		if (!job->hwrt) {
+			err = -EINVAL;
+			goto err_put_job;
+		}
+	}
+
+	err = pvr_job_fw_cmd_init(job, args);
+	if (err)
+		goto err_put_job;
+
+	/* Check if the job will ever fit in the CCCB. */
+	err = pvr_job_fits_in_cccb(job);
+	if (err == -E2BIG)
+		goto err_put_job;
+
+	err = pvr_job_add_deps(pvr_file, job, args->sync_ops.count, sync_ops, signal_array);
+	if (err)
+		goto err_put_job;
+
+	queue = get_ctx_queue(job);
+	if (!queue) {
+		err = -EINVAL;
+		goto err_put_job;
+	}
+
+	job->done_fence = pvr_context_queue_fence_create(queue);
+	if (IS_ERR(job->done_fence)) {
+		err = PTR_ERR(job->done_fence);
+		job->done_fence = NULL;
+		goto err_put_job;
+	}
+
+	err = pvr_sync_signal_array_update_fences(signal_array,
+						  args->sync_ops.count, sync_ops,
+						  job->done_fence);
+	if (err)
+		goto err_put_job;
+
+	kvfree(sync_ops);
+	return job;
+
+err_put_job:
+	kvfree(sync_ops);
+	pvr_job_put(job);
+	return ERR_PTR(err);
+}
+
+/**
+ * pvr_submit_jobs() - Submit jobs to the GPU
+ * @pvr_dev: Target PowerVR device.
+ * @pvr_file: Pointer to PowerVR file structure.
+ * @args: IOCTL arguments.
+ *
+ * This initial implementation is entirely synchronous; on return the GPU will
+ * be idle. This will not be the case for future implementations.
+ *
+ * Returns:
+ *  * 0 on success,
+ *  * -%EFAULT if arguments can not be copied from user space,
+ *  * -%EINVAL on invalid arguments, or
+ *  * Any other error.
+ */
+int
+pvr_submit_jobs(struct pvr_device *pvr_dev,
+		struct pvr_file *pvr_file,
+		struct drm_pvr_ioctl_submit_jobs_args *args)
+{
+	struct drm_pvr_job *jobs_args = NULL;
+	DEFINE_XARRAY_ALLOC(signal_array);
+	struct pvr_job **jobs = NULL;
+	int err;
+
+	if (!args->jobs.count)
+		return -EINVAL;
+
+	err = PVR_UOBJ_GET_ARRAY(jobs_args, &args->jobs);
+	if (err)
+		return err;
+
+	jobs = kvmalloc_array(args->jobs.count, sizeof(*jobs), GFP_KERNEL | __GFP_ZERO);
+	if (!jobs) {
+		err = -ENOMEM;
+		goto out_free_jobs_args;
+	}
+
+	for (u32 i = 0; i < args->jobs.count; i++) {
+		jobs[i] = pvr_create_job(pvr_dev, pvr_file, &jobs_args[i], &signal_array);
+		if (IS_ERR(jobs[i])) {
+			err = PTR_ERR(jobs[i]);
+			jobs[i] = NULL;
+			goto out_free_jobs;
+		}
+	}
+
+	for (u32 i = 0; i < args->jobs.count; i++)
+		pvr_job_push(jobs[i]);
+
+	pvr_sync_signal_array_push_fences(&signal_array);
+	err = 0;
+
+out_free_jobs:
+	for (u32 i = 0; i < args->jobs.count; i++)
+		pvr_job_put(jobs[i]);
+
+	kvfree(jobs);
+
+out_free_jobs_args:
+	pvr_sync_signal_array_cleanup(&signal_array);
+	return err;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_job.h b/drivers/gpu/drm/imagination/pvr_job.h
new file mode 100644
index 000000000000..a9f7ef24a759
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_job.h
@@ -0,0 +1,116 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_JOB_H__
+#define __PVR_JOB_H__
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <linux/kref.h>
+#include <linux/types.h>
+
+#include <drm/drm_gem.h>
+
+/* Forward declaration from "pvr_context.h". */
+struct pvr_context;
+
+/* Forward declarations from "pvr_device.h". */
+struct pvr_device;
+struct pvr_file;
+
+/* Forward declarations from "pvr_hwrt.h". */
+struct pvr_hwrt_data;
+
+struct pvr_job {
+	/** @ref_count: Refcount for job. */
+	struct kref ref_count;
+
+	/** @type: Type of job. */
+	enum drm_pvr_job_type type;
+
+	/** @id: Job ID number. */
+	u32 id;
+
+	/** @node: List node used to add a job to a context queue. */
+	struct list_head node;
+
+	/** @done_fence: Fence to signal when the job is done. */
+	struct dma_fence *done_fence;
+
+	/** @deps: Dependency tracking data. */
+	struct {
+		/** @cb: dma_fence callback used to get informed when a dependency is signaled. */
+		struct dma_fence_cb cb;
+
+		/** @cur: Current dependency we're waiting on. */
+		struct dma_fence *cur;
+
+		/** @next_index: Index of the next dependency to process. */
+		unsigned long next_index;
+
+		/** @non_native: Array containing remaining non-native dependencies to wait on. */
+		struct xarray non_native;
+
+		/** @native_count: Number of native dependencies. */
+		unsigned long native_count;
+
+		/** @native: Array containing remaining native dependencies to wait on. */
+		struct xarray native;
+	} deps;
+
+	/** @pvr_dev: Device pointer. */
+	struct pvr_device *pvr_dev;
+
+	/** @ctx: Pointer to owning context. */
+	struct pvr_context *ctx;
+
+	/** @cmd: Command data. Format depends on @type. */
+	void *cmd;
+
+	/** @cmd_len: Length of command data, in bytes. */
+	u32 cmd_len;
+
+	/**
+	 * @fw_ccb_cmd_type: Firmware CCB command type. Must be one of %ROGUE_FWIF_CCB_CMD_TYPE_*.
+	 */
+	u32 fw_ccb_cmd_type;
+
+	/** @hwrt: HWRT object. Will be NULL for compute and transfer jobs. */
+	struct pvr_hwrt_data *hwrt;
+};
+
+/**
+ * pvr_job_get() - Take additional reference on job.
+ * @job: Job pointer.
+ *
+ * Call pvr_job_put() to release.
+ *
+ * Returns:
+ *  * The requested job on success, or
+ *  * %NULL if no job pointer passed.
+ */
+static __always_inline struct pvr_job *
+pvr_job_get(struct pvr_job *job)
+{
+	if (job)
+		kref_get(&job->ref_count);
+
+	return job;
+}
+
+void pvr_job_put(struct pvr_job *job);
+
+void pvr_job_evict_signaled_native_deps(struct pvr_job *job);
+
+int pvr_job_wait_first_non_signaled_native_dep(struct pvr_job *job);
+
+bool pvr_job_non_native_deps_done(struct pvr_job *job);
+
+int pvr_job_fits_in_cccb(struct pvr_job *job);
+
+void pvr_job_submit(struct pvr_job *job);
+
+int pvr_submit_jobs(struct pvr_device *pvr_dev, struct pvr_file *pvr_file,
+		    struct drm_pvr_ioctl_submit_jobs_args *args);
+
+#endif /* __PVR_JOB_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_params.c b/drivers/gpu/drm/imagination/pvr_params.c
new file mode 100644
index 000000000000..20f2ba8915f4
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_params.c
@@ -0,0 +1,147 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_params.h"
+
+#include <linux/cache.h>
+#include <linux/moduleparam.h>
+
+static struct pvr_device_params pvr_device_param_defaults __read_mostly = {
+#define X(type_, name_, value_, desc_, ...) .name_ = (value_),
+	PVR_DEVICE_PARAMS
+#undef X
+};
+
+#define PVR_DEVICE_PARAM_NAMED(name_, type_, desc_) \
+	module_param_named(name_, pvr_device_param_defaults.name_, type_, \
+			   0400);                                         \
+	MODULE_PARM_DESC(name_, desc_);
+
+/*
+ * This list of defines must contain every type specified in "pvr_params.h" as
+ * ``PVR_PARAM_TYPE_*_C``.
+ */
+#define PVR_PARAM_TYPE_X32_MODPARAM uint
+
+#define X(type_, name_, value_, desc_, ...) \
+	PVR_DEVICE_PARAM_NAMED(name_, PVR_PARAM_TYPE_##type_##_MODPARAM, desc_);
+PVR_DEVICE_PARAMS
+#undef X
+
+int
+pvr_device_params_init(struct pvr_device_params *params)
+{
+	/*
+	 * If heap-allocated parameters are added in the future (e.g.
+	 * modparam's charp type), they must be handled specially here (via
+	 * kstrdup() in the case of charp). Since that's not necessary yet,
+	 * a straight copy will do for now. This change will also require a
+	 * pvr_device_params_fini() function to free any heap-allocated copies.
+	 */
+
+	*params = pvr_device_param_defaults;
+
+	return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#include "pvr_device.h"
+
+#include <linux/dcache.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/stddef.h>
+
+/*
+ * This list of defines must contain every type specified in "pvr_params.h" as
+ * ``PVR_PARAM_TYPE_*_C``.
+ */
+#define PVR_PARAM_TYPE_X32_FMT "0x%08llx"
+
+#define X_SET(name_, mode_) X_SET_##mode_(name_)
+#define X_SET_DEF(name_, update_, mode_) X_SET_DEF_##mode_(name_, update_)
+
+#define X_SET_RO(name_) NULL
+#define X_SET_RW(name_) __pvr_device_param_##name_##set
+
+#define X_SET_DEF_RO(name_, update_)
+#define X_SET_DEF_RW(name_, update_)                                    \
+	static int                                                      \
+	X_SET_RW(name_)(void *data, u64 val)                            \
+	{                                                               \
+		struct pvr_device *pvr_dev = data;                      \
+		/* This is not just (update_) to suppress -Waddress. */ \
+		if ((void *)(update_) != NULL)                          \
+			(update_)(pvr_dev, pvr_dev->params.name_, val); \
+		pvr_dev->params.name_ = val;                            \
+		return 0;                                               \
+	}
+
+#define X(type_, name_, value_, desc_, mode_, update_)                     \
+	static int                                                         \
+	__pvr_device_param_##name_##_get(void *data, u64 *val)             \
+	{                                                                  \
+		struct pvr_device *pvr_dev = data;                         \
+		*val = pvr_dev->params.name_;                              \
+		return 0;                                                  \
+	}                                                                  \
+	X_SET_DEF(name_, update_, mode_)                                   \
+	static int                                                         \
+	__pvr_device_param_##name_##_open(struct inode *inode,             \
+					  struct file *file)               \
+	{                                                                  \
+		__simple_attr_check_format(PVR_PARAM_TYPE_##type_##_FMT,   \
+					   0ull);                          \
+		return simple_attr_open(inode, file,                       \
+					__pvr_device_param_##name_##_get,  \
+					X_SET(name_, mode_),               \
+					PVR_PARAM_TYPE_##type_##_FMT);     \
+	}
+PVR_DEVICE_PARAMS
+#undef X
+
+#undef X_SET
+#undef X_SET_RO
+#undef X_SET_RW
+#undef X_SET_DEF
+#undef X_SET_DEF_RO
+#undef X_SET_DEF_RW
+
+static struct {
+#define X(type_, name_, value_, desc_, mode_, update_) \
+	const struct file_operations name_;
+	PVR_DEVICE_PARAMS
+#undef X
+} pvr_device_param_debugfs_fops = {
+#define X(type_, name_, value_, desc_, mode_, update_)     \
+	.name_ = {                                         \
+		.owner = THIS_MODULE,                      \
+		.open = __pvr_device_param_##name_##_open, \
+		.release = simple_attr_release,            \
+		.read = simple_attr_read,                  \
+		.write = simple_attr_write,                \
+		.llseek = generic_file_llseek,             \
+	},
+	PVR_DEVICE_PARAMS
+#undef X
+};
+
+void
+pvr_params_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir)
+{
+#define X_MODE(mode_) X_MODE_##mode_
+#define X_MODE_RO 0400
+#define X_MODE_RW 0600
+
+#define X(type_, name_, value_, desc_, mode_, update_)             \
+	debugfs_create_file(#name_, X_MODE(mode_), dir, pvr_dev,   \
+			    &pvr_device_param_debugfs_fops.name_);
+	PVR_DEVICE_PARAMS
+#undef X
+
+#undef X_MODE
+#undef X_MODE_RO
+#undef X_MODE_RW
+}
+#endif
diff --git a/drivers/gpu/drm/imagination/pvr_params.h b/drivers/gpu/drm/imagination/pvr_params.h
new file mode 100644
index 000000000000..7c4b43fc7c03
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_params.h
@@ -0,0 +1,72 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_PARAMS_H__
+#define __PVR_PARAMS_H__
+
+#include "pvr_rogue_fwif.h"
+
+#include <linux/cache.h>
+#include <linux/compiler_attributes.h>
+
+/*
+ * This is the definitive list of types allowed in the definition of
+ * %PVR_DEVICE_PARAMS.
+ */
+#define PVR_PARAM_TYPE_X32_C u32
+
+/*
+ * This macro defines all device-specific parameters; that is parameters which
+ * are set independently per device.
+ *
+ * The X-macro accepts the following arguments. Arguments marked with [debugfs]
+ * are ignored when debugfs is disabled; values used for these arguments may
+ * safely be gated behind CONFIG_DEBUG_FS.
+ *
+ * @type_: The definitive list of allowed values is PVR_PARAM_TYPE_*_C.
+ * @name_: Name of the parameter. This is used both as the field name in C and
+ *         stringified as the parameter name.
+ * @value_: Initial/default value.
+ * @desc_: String literal used as help text to describe the usage of this
+ *         parameter.
+ * @mode_: [debugfs] One of {RO,RW}. The access mode of the debugfs entry for
+ *         this parameter.
+ * @update_: [debugfs] When debugfs support is enabled, parameters may be
+ *           updated at runtime. When this happens, this function will be
+ *           called to allow changes to propagate. The signature of this
+ *           function is:
+ *
+ *              void (*)(struct pvr_device *pvr_dev, T old_val, T new_val)
+ *
+ *           Where T is the C type associated with @type_.
+ *
+ *           If @mode_ does not allow write access, this function will never be
+ *           called. In this case, or if no update callback is required, you
+ *           should specify NULL for this argument.
+ */
+#define PVR_DEVICE_PARAMS                                                    \
+	X(X32, fw_trace_mask, ROGUE_FWIF_LOG_TYPE_NONE,                      \
+	  "Enable FW trace for the specified groups. Specifying 0 disables " \
+	  "all FW tracing.",                                                 \
+	  RW, pvr_fw_trace_mask_update)
+
+struct pvr_device_params {
+#define X(type_, name_, value_, desc_, ...) \
+	PVR_PARAM_TYPE_##type_##_C name_;
+	PVR_DEVICE_PARAMS
+#undef X
+};
+
+int pvr_device_params_init(struct pvr_device_params *params);
+
+#if defined(CONFIG_DEBUG_FS)
+/* Forward declaration from "pvr_device.h". */
+struct pvr_device;
+
+/* Forward declaration from <linux/dcache.h>. */
+struct dentry;
+
+void pvr_params_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir);
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* __PVR_PARAMS_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
new file mode 100644
index 000000000000..e460309c82f6
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -0,0 +1,196 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw.h"
+#include "pvr_fw_startstop.h"
+#include "pvr_power.h"
+#include "pvr_rogue_fwif.h"
+
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#define POWER_SYNC_TIMEOUT_US (1000000) /* 1s */
+
+#define POWER_IDLE_DELAY_JIFFIES (1)
+
+static int
+pvr_power_send_command(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *pow_cmd)
+{
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	u32 slot_nr;
+	u32 value;
+	int err;
+
+	WRITE_ONCE(*fw_dev->power_sync, 0);
+
+	err = pvr_kccb_send_cmd_power_locked(pvr_dev, pow_cmd, &slot_nr);
+	if (err)
+		goto err_out;
+
+	/* Wait for FW to acknowledge. */
+	err = readl_poll_timeout(pvr_dev->fw_dev.power_sync, value, value != 0, 100,
+				 POWER_SYNC_TIMEOUT_US);
+	if (err)
+		goto err_out;
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static int
+pvr_power_request_idle(struct pvr_device *pvr_dev)
+{
+	struct rogue_fwif_kccb_cmd pow_cmd;
+
+	/* Send FORCED_IDLE request to FW. */
+	pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
+	pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_FORCED_IDLE_REQ;
+	pow_cmd.cmd_data.pow_data.power_req_data.pow_request_type = ROGUE_FWIF_POWER_FORCE_IDLE;
+
+	return pvr_power_send_command(pvr_dev, &pow_cmd);
+}
+
+static int
+pvr_power_request_pwr_off(struct pvr_device *pvr_dev)
+{
+	struct rogue_fwif_kccb_cmd pow_cmd;
+
+	/* Send POW_OFF request to firmware. */
+	pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW;
+	pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_OFF_REQ;
+	pow_cmd.cmd_data.pow_data.power_req_data.forced = true;
+
+	return pvr_power_send_command(pvr_dev, &pow_cmd);
+}
+
+/**
+ * pvr_power_set_state() - Change GPU power state
+ * @pvr_dev: Target PowerVR device.
+ * @new_state: Desired power state.
+ *
+ * If the GPU is already in the desired power state then this function is a no-op.
+ *
+ * Caller must hold the device's power lock.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * Any appropriate error on failure.
+ */
+int
+pvr_power_set_state(struct pvr_device *pvr_dev, enum pvr_power_state new_state)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct device *dev = drm_dev->dev;
+	int err;
+
+	lockdep_assert_held(&pvr_dev->power_lock);
+
+	cancel_delayed_work(&pvr_dev->delayed_idle_work);
+
+	if (pvr_dev->power_state == new_state) {
+		err = 0;
+		goto err_out;
+	}
+
+	switch (new_state) {
+	case PVR_POWER_STATE_OFF:
+		/* Force idle */
+		err = pvr_power_request_idle(pvr_dev);
+		if (err)
+			goto err_out;
+
+		err = pvr_power_request_pwr_off(pvr_dev);
+		if (err)
+			goto err_out;
+
+		err = pvr_fw_stop(pvr_dev);
+		if (err)
+			goto err_out;
+
+		pm_runtime_put_sync_suspend(dev);
+		break;
+
+	case PVR_POWER_STATE_ON:
+		err = pm_runtime_resume_and_get(dev);
+		if (err)
+			goto err_out;
+
+		/* Restart FW */
+		err = pvr_fw_start(pvr_dev);
+		if (err)
+			goto err_out;
+
+		err = pvr_wait_for_fw_boot(pvr_dev);
+		if (err) {
+			drm_err(drm_dev, "Firmware failed to boot\n");
+			goto err_out;
+		}
+		break;
+
+	default:
+		WARN_ON(true);
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	/* Set power state */
+	pvr_dev->power_state = new_state;
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+static void
+pvr_delayed_idle_worker(struct work_struct *work)
+{
+	struct pvr_device *pvr_dev = container_of(work, struct pvr_device,
+						  delayed_idle_work.work);
+
+	mutex_lock(&pvr_dev->power_lock);
+
+	if (pvr_dev->fw_dev.fwif_sysdata->pow_state == ROGUE_FWIF_POW_IDLE)
+		pvr_power_set_state(pvr_dev, PVR_POWER_STATE_OFF);
+
+	mutex_unlock(&pvr_dev->power_lock);
+}
+
+/**
+ * pvr_power_check_idle() - Check for GPU idle, and schedule power off if required
+ * @pvr_dev: Target PowerVR device
+ *
+ * The actual power off is performed by a delayed work item. This implements hysteresis.
+ */
+void
+pvr_power_check_idle(struct pvr_device *pvr_dev)
+{
+	enum rogue_fwif_pow_state pow_state = READ_ONCE(pvr_dev->fw_dev.fwif_sysdata->pow_state);
+
+	if (pow_state == ROGUE_FWIF_POW_IDLE &&
+	    !delayed_work_pending(&pvr_dev->delayed_idle_work)) {
+		queue_delayed_work(pvr_dev->irq_wq, &pvr_dev->delayed_idle_work,
+				   POWER_IDLE_DELAY_JIFFIES);
+	} else if (pow_state != ROGUE_FWIF_POW_IDLE &&
+		   delayed_work_pending(&pvr_dev->delayed_idle_work)) {
+		cancel_delayed_work(&pvr_dev->delayed_idle_work);
+	}
+}
+
+/**
+ * pvr_power_init() - Initialise power management for device
+ * @pvr_dev: Target PowerVR device.
+ */
+void
+pvr_power_init(struct pvr_device *pvr_dev)
+{
+	mutex_init(&pvr_dev->power_lock);
+	pvr_dev->power_state = PVR_POWER_STATE_OFF;
+	INIT_DELAYED_WORK(&pvr_dev->delayed_idle_work, pvr_delayed_idle_worker);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h
new file mode 100644
index 000000000000..44042cb4323c
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_power.h
@@ -0,0 +1,37 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_POWER_H__
+#define __PVR_POWER_H__
+
+#include "pvr_device.h"
+
+#include <linux/mutex.h>
+
+void pvr_power_init(struct pvr_device *pvr_dev);
+int pvr_power_set_state(struct pvr_device *pvr_dev, enum pvr_power_state new_state);
+void pvr_power_check_idle(struct pvr_device *pvr_dev);
+
+/**
+ * pvr_power_lock() - Take device power lock
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This must be held before attempting to change power state.
+ */
+static __always_inline void
+pvr_power_lock(struct pvr_device *pvr_dev)
+{
+	mutex_lock(&pvr_dev->power_lock);
+}
+
+/**
+ * pvr_power_unlock() - Release device power lock
+ * @pvr_dev: Target PowerVR device.
+ */
+static __always_inline void
+pvr_power_unlock(struct pvr_device *pvr_dev)
+{
+	mutex_unlock(&pvr_dev->power_lock);
+}
+
+#endif /* __PVR_POWER_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
new file mode 100644
index 000000000000..87ab4aa137ca
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h
@@ -0,0 +1,6193 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+/*  *** Autogenerated C -- do not edit ***  */
+
+#ifndef __PVR_ROGUE_CR_DEFS_H__
+#define __PVR_ROGUE_CR_DEFS_H__
+
+/* clang-format off */
+
+#define ROGUE_CR_DEFS_REVISION 1
+
+/* Register ROGUE_CR_RASTERISATION_INDIRECT */
+#define ROGUE_CR_RASTERISATION_INDIRECT 0x8238U
+#define ROGUE_CR_RASTERISATION_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_PBE_INDIRECT */
+#define ROGUE_CR_PBE_INDIRECT 0x83E0U
+#define ROGUE_CR_PBE_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_PBE_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_PBE_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_PBE_PERF_INDIRECT */
+#define ROGUE_CR_PBE_PERF_INDIRECT 0x83D8U
+#define ROGUE_CR_PBE_PERF_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_TPU_PERF_INDIRECT */
+#define ROGUE_CR_TPU_PERF_INDIRECT 0x83F0U
+#define ROGUE_CR_TPU_PERF_INDIRECT_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF8U
+
+/* Register ROGUE_CR_RASTERISATION_PERF_INDIRECT */
+#define ROGUE_CR_RASTERISATION_PERF_INDIRECT 0x8318U
+#define ROGUE_CR_RASTERISATION_PERF_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT */
+#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT 0x8028U
+#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF8U
+
+/* Register ROGUE_CR_USC_PERF_INDIRECT */
+#define ROGUE_CR_USC_PERF_INDIRECT 0x8030U
+#define ROGUE_CR_USC_PERF_INDIRECT_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_BLACKPEARL_INDIRECT */
+#define ROGUE_CR_BLACKPEARL_INDIRECT 0x8388U
+#define ROGUE_CR_BLACKPEARL_INDIRECT_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_BLACKPEARL_PERF_INDIRECT */
+#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT 0x83F8U
+#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_TEXAS3_PERF_INDIRECT */
+#define ROGUE_CR_TEXAS3_PERF_INDIRECT 0x83D0U
+#define ROGUE_CR_TEXAS3_PERF_INDIRECT_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF8U
+
+/* Register ROGUE_CR_TEXAS_PERF_INDIRECT */
+#define ROGUE_CR_TEXAS_PERF_INDIRECT 0x8288U
+#define ROGUE_CR_TEXAS_PERF_INDIRECT_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_BX_TU_PERF_INDIRECT */
+#define ROGUE_CR_BX_TU_PERF_INDIRECT 0xC900U
+#define ROGUE_CR_BX_TU_PERF_INDIRECT_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_CLK_CTRL */
+#define ROGUE_CR_CLK_CTRL 0x0000U
+#define ROGUE_CR_CLK_CTRL__PBE2_XE__MASKFULL 0xFFFFFF003F3FFFFFULL
+#define ROGUE_CR_CLK_CTRL__S7_TOP__MASKFULL 0xCFCF03000F3F3F0FULL
+#define ROGUE_CR_CLK_CTRL_MASKFULL 0xFFFFFF003F3FFFFFULL
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_SHIFT 62U
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_CLRMSK 0x3FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_ON 0x4000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_AUTO 0x8000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_IPP_SHIFT 60U
+#define ROGUE_CR_CLK_CTRL_IPP_CLRMSK 0xCFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_IPP_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_IPP_ON 0x1000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_IPP_AUTO 0x2000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBC_SHIFT 58U
+#define ROGUE_CR_CLK_CTRL_FBC_CLRMSK 0xF3FFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_FBC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBC_ON 0x0400000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBC_AUTO 0x0800000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBDC_SHIFT 56U
+#define ROGUE_CR_CLK_CTRL_FBDC_CLRMSK 0xFCFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_FBDC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBDC_ON 0x0100000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FBDC_AUTO 0x0200000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_SHIFT 54U
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_CLRMSK 0xFF3FFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_ON 0x0040000000000000ULL
+#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_AUTO 0x0080000000000000ULL
+#define ROGUE_CR_CLK_CTRL_USCS_SHIFT 52U
+#define ROGUE_CR_CLK_CTRL_USCS_CLRMSK 0xFFCFFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_USCS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_USCS_ON 0x0010000000000000ULL
+#define ROGUE_CR_CLK_CTRL_USCS_AUTO 0x0020000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PBE_SHIFT 50U
+#define ROGUE_CR_CLK_CTRL_PBE_CLRMSK 0xFFF3FFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_PBE_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PBE_ON 0x0004000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PBE_AUTO 0x0008000000000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L1_SHIFT 48U
+#define ROGUE_CR_CLK_CTRL_MCU_L1_CLRMSK 0xFFFCFFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_MCU_L1_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L1_ON 0x0001000000000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L1_AUTO 0x0002000000000000ULL
+#define ROGUE_CR_CLK_CTRL_CDM_SHIFT 46U
+#define ROGUE_CR_CLK_CTRL_CDM_CLRMSK 0xFFFF3FFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_CDM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_CDM_ON 0x0000400000000000ULL
+#define ROGUE_CR_CLK_CTRL_CDM_AUTO 0x0000800000000000ULL
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_SHIFT 44U
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_CLRMSK 0xFFFFCFFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_ON 0x0000100000000000ULL
+#define ROGUE_CR_CLK_CTRL_SIDEKICK_AUTO 0x0000200000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT 42U
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK 0xFFFFF3FFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_ON 0x0000040000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_AUTO 0x0000080000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_SHIFT 40U
+#define ROGUE_CR_CLK_CTRL_BIF_CLRMSK 0xFFFFFCFFFFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_BIF_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_ON 0x0000010000000000ULL
+#define ROGUE_CR_CLK_CTRL_BIF_AUTO 0x0000020000000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT 28U
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK 0xFFFFFFFFCFFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_ON 0x0000000010000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO 0x0000000020000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L0_SHIFT 26U
+#define ROGUE_CR_CLK_CTRL_MCU_L0_CLRMSK 0xFFFFFFFFF3FFFFFFULL
+#define ROGUE_CR_CLK_CTRL_MCU_L0_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L0_ON 0x0000000004000000ULL
+#define ROGUE_CR_CLK_CTRL_MCU_L0_AUTO 0x0000000008000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_SHIFT 24U
+#define ROGUE_CR_CLK_CTRL_TPU_CLRMSK 0xFFFFFFFFFCFFFFFFULL
+#define ROGUE_CR_CLK_CTRL_TPU_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_ON 0x0000000001000000ULL
+#define ROGUE_CR_CLK_CTRL_TPU_AUTO 0x0000000002000000ULL
+#define ROGUE_CR_CLK_CTRL_USC_SHIFT 20U
+#define ROGUE_CR_CLK_CTRL_USC_CLRMSK 0xFFFFFFFFFFCFFFFFULL
+#define ROGUE_CR_CLK_CTRL_USC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_USC_ON 0x0000000000100000ULL
+#define ROGUE_CR_CLK_CTRL_USC_AUTO 0x0000000000200000ULL
+#define ROGUE_CR_CLK_CTRL_TLA_SHIFT 18U
+#define ROGUE_CR_CLK_CTRL_TLA_CLRMSK 0xFFFFFFFFFFF3FFFFULL
+#define ROGUE_CR_CLK_CTRL_TLA_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TLA_ON 0x0000000000040000ULL
+#define ROGUE_CR_CLK_CTRL_TLA_AUTO 0x0000000000080000ULL
+#define ROGUE_CR_CLK_CTRL_SLC_SHIFT 16U
+#define ROGUE_CR_CLK_CTRL_SLC_CLRMSK 0xFFFFFFFFFFFCFFFFULL
+#define ROGUE_CR_CLK_CTRL_SLC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_SLC_ON 0x0000000000010000ULL
+#define ROGUE_CR_CLK_CTRL_SLC_AUTO 0x0000000000020000ULL
+#define ROGUE_CR_CLK_CTRL_UVS_SHIFT 14U
+#define ROGUE_CR_CLK_CTRL_UVS_CLRMSK 0xFFFFFFFFFFFF3FFFULL
+#define ROGUE_CR_CLK_CTRL_UVS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_UVS_ON 0x0000000000004000ULL
+#define ROGUE_CR_CLK_CTRL_UVS_AUTO 0x0000000000008000ULL
+#define ROGUE_CR_CLK_CTRL_PDS_SHIFT 12U
+#define ROGUE_CR_CLK_CTRL_PDS_CLRMSK 0xFFFFFFFFFFFFCFFFULL
+#define ROGUE_CR_CLK_CTRL_PDS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PDS_ON 0x0000000000001000ULL
+#define ROGUE_CR_CLK_CTRL_PDS_AUTO 0x0000000000002000ULL
+#define ROGUE_CR_CLK_CTRL_VDM_SHIFT 10U
+#define ROGUE_CR_CLK_CTRL_VDM_CLRMSK 0xFFFFFFFFFFFFF3FFULL
+#define ROGUE_CR_CLK_CTRL_VDM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_VDM_ON 0x0000000000000400ULL
+#define ROGUE_CR_CLK_CTRL_VDM_AUTO 0x0000000000000800ULL
+#define ROGUE_CR_CLK_CTRL_PM_SHIFT 8U
+#define ROGUE_CR_CLK_CTRL_PM_CLRMSK 0xFFFFFFFFFFFFFCFFULL
+#define ROGUE_CR_CLK_CTRL_PM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_PM_ON 0x0000000000000100ULL
+#define ROGUE_CR_CLK_CTRL_PM_AUTO 0x0000000000000200ULL
+#define ROGUE_CR_CLK_CTRL_GPP_SHIFT 6U
+#define ROGUE_CR_CLK_CTRL_GPP_CLRMSK 0xFFFFFFFFFFFFFF3FULL
+#define ROGUE_CR_CLK_CTRL_GPP_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_GPP_ON 0x0000000000000040ULL
+#define ROGUE_CR_CLK_CTRL_GPP_AUTO 0x0000000000000080ULL
+#define ROGUE_CR_CLK_CTRL_TE_SHIFT 4U
+#define ROGUE_CR_CLK_CTRL_TE_CLRMSK 0xFFFFFFFFFFFFFFCFULL
+#define ROGUE_CR_CLK_CTRL_TE_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TE_ON 0x0000000000000010ULL
+#define ROGUE_CR_CLK_CTRL_TE_AUTO 0x0000000000000020ULL
+#define ROGUE_CR_CLK_CTRL_TSP_SHIFT 2U
+#define ROGUE_CR_CLK_CTRL_TSP_CLRMSK 0xFFFFFFFFFFFFFFF3ULL
+#define ROGUE_CR_CLK_CTRL_TSP_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_TSP_ON 0x0000000000000004ULL
+#define ROGUE_CR_CLK_CTRL_TSP_AUTO 0x0000000000000008ULL
+#define ROGUE_CR_CLK_CTRL_ISP_SHIFT 0U
+#define ROGUE_CR_CLK_CTRL_ISP_CLRMSK 0xFFFFFFFFFFFFFFFCULL
+#define ROGUE_CR_CLK_CTRL_ISP_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL_ISP_ON 0x0000000000000001ULL
+#define ROGUE_CR_CLK_CTRL_ISP_AUTO 0x0000000000000002ULL
+
+/* Register ROGUE_CR_CLK_STATUS */
+#define ROGUE_CR_CLK_STATUS 0x0008U
+#define ROGUE_CR_CLK_STATUS__PBE2_XE__MASKFULL 0x00000001FFF077FFULL
+#define ROGUE_CR_CLK_STATUS__S7_TOP__MASKFULL 0x00000001B3101773ULL
+#define ROGUE_CR_CLK_STATUS_MASKFULL 0x00000001FFF077FFULL
+#define ROGUE_CR_CLK_STATUS_MCU_FBTC_SHIFT 32U
+#define ROGUE_CR_CLK_STATUS_MCU_FBTC_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_MCU_FBTC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_FBTC_RUNNING 0x0000000100000000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_SHIFT 31U
+#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_RUNNING 0x0000000080000000ULL
+#define ROGUE_CR_CLK_STATUS_IPP_SHIFT 30U
+#define ROGUE_CR_CLK_STATUS_IPP_CLRMSK 0xFFFFFFFFBFFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_IPP_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_IPP_RUNNING 0x0000000040000000ULL
+#define ROGUE_CR_CLK_STATUS_FBC_SHIFT 29U
+#define ROGUE_CR_CLK_STATUS_FBC_CLRMSK 0xFFFFFFFFDFFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_FBC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_FBC_RUNNING 0x0000000020000000ULL
+#define ROGUE_CR_CLK_STATUS_FBDC_SHIFT 28U
+#define ROGUE_CR_CLK_STATUS_FBDC_CLRMSK 0xFFFFFFFFEFFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_FBDC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_FBDC_RUNNING 0x0000000010000000ULL
+#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_SHIFT 27U
+#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_CLRMSK 0xFFFFFFFFF7FFFFFFULL
+#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_RUNNING 0x0000000008000000ULL
+#define ROGUE_CR_CLK_STATUS_USCS_SHIFT 26U
+#define ROGUE_CR_CLK_STATUS_USCS_CLRMSK 0xFFFFFFFFFBFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_USCS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_USCS_RUNNING 0x0000000004000000ULL
+#define ROGUE_CR_CLK_STATUS_PBE_SHIFT 25U
+#define ROGUE_CR_CLK_STATUS_PBE_CLRMSK 0xFFFFFFFFFDFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_PBE_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_PBE_RUNNING 0x0000000002000000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_L1_SHIFT 24U
+#define ROGUE_CR_CLK_STATUS_MCU_L1_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_CLK_STATUS_MCU_L1_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_L1_RUNNING 0x0000000001000000ULL
+#define ROGUE_CR_CLK_STATUS_CDM_SHIFT 23U
+#define ROGUE_CR_CLK_STATUS_CDM_CLRMSK 0xFFFFFFFFFF7FFFFFULL
+#define ROGUE_CR_CLK_STATUS_CDM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_CDM_RUNNING 0x0000000000800000ULL
+#define ROGUE_CR_CLK_STATUS_SIDEKICK_SHIFT 22U
+#define ROGUE_CR_CLK_STATUS_SIDEKICK_CLRMSK 0xFFFFFFFFFFBFFFFFULL
+#define ROGUE_CR_CLK_STATUS_SIDEKICK_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_SIDEKICK_RUNNING 0x0000000000400000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT 21U
+#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING 0x0000000000200000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_SHIFT 20U
+#define ROGUE_CR_CLK_STATUS_BIF_CLRMSK 0xFFFFFFFFFFEFFFFFULL
+#define ROGUE_CR_CLK_STATUS_BIF_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_BIF_RUNNING 0x0000000000100000ULL
+#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT 14U
+#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING 0x0000000000004000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_L0_SHIFT 13U
+#define ROGUE_CR_CLK_STATUS_MCU_L0_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_CLK_STATUS_MCU_L0_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_MCU_L0_RUNNING 0x0000000000002000ULL
+#define ROGUE_CR_CLK_STATUS_TPU_SHIFT 12U
+#define ROGUE_CR_CLK_STATUS_TPU_CLRMSK 0xFFFFFFFFFFFFEFFFULL
+#define ROGUE_CR_CLK_STATUS_TPU_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TPU_RUNNING 0x0000000000001000ULL
+#define ROGUE_CR_CLK_STATUS_USC_SHIFT 10U
+#define ROGUE_CR_CLK_STATUS_USC_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_CLK_STATUS_USC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_USC_RUNNING 0x0000000000000400ULL
+#define ROGUE_CR_CLK_STATUS_TLA_SHIFT 9U
+#define ROGUE_CR_CLK_STATUS_TLA_CLRMSK 0xFFFFFFFFFFFFFDFFULL
+#define ROGUE_CR_CLK_STATUS_TLA_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TLA_RUNNING 0x0000000000000200ULL
+#define ROGUE_CR_CLK_STATUS_SLC_SHIFT 8U
+#define ROGUE_CR_CLK_STATUS_SLC_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_CLK_STATUS_SLC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_SLC_RUNNING 0x0000000000000100ULL
+#define ROGUE_CR_CLK_STATUS_UVS_SHIFT 7U
+#define ROGUE_CR_CLK_STATUS_UVS_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_CLK_STATUS_UVS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_UVS_RUNNING 0x0000000000000080ULL
+#define ROGUE_CR_CLK_STATUS_PDS_SHIFT 6U
+#define ROGUE_CR_CLK_STATUS_PDS_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_CLK_STATUS_PDS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_PDS_RUNNING 0x0000000000000040ULL
+#define ROGUE_CR_CLK_STATUS_VDM_SHIFT 5U
+#define ROGUE_CR_CLK_STATUS_VDM_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_CLK_STATUS_VDM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_VDM_RUNNING 0x0000000000000020ULL
+#define ROGUE_CR_CLK_STATUS_PM_SHIFT 4U
+#define ROGUE_CR_CLK_STATUS_PM_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_CLK_STATUS_PM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_PM_RUNNING 0x0000000000000010ULL
+#define ROGUE_CR_CLK_STATUS_GPP_SHIFT 3U
+#define ROGUE_CR_CLK_STATUS_GPP_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_CLK_STATUS_GPP_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_GPP_RUNNING 0x0000000000000008ULL
+#define ROGUE_CR_CLK_STATUS_TE_SHIFT 2U
+#define ROGUE_CR_CLK_STATUS_TE_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_CLK_STATUS_TE_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TE_RUNNING 0x0000000000000004ULL
+#define ROGUE_CR_CLK_STATUS_TSP_SHIFT 1U
+#define ROGUE_CR_CLK_STATUS_TSP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_CLK_STATUS_TSP_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_TSP_RUNNING 0x0000000000000002ULL
+#define ROGUE_CR_CLK_STATUS_ISP_SHIFT 0U
+#define ROGUE_CR_CLK_STATUS_ISP_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_CLK_STATUS_ISP_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS_ISP_RUNNING 0x0000000000000001ULL
+
+/* Register ROGUE_CR_CORE_ID */
+#define ROGUE_CR_CORE_ID__PBVNC 0x0020U
+#define ROGUE_CR_CORE_ID__PBVNC__MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT 48U
+#define ROGUE_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK 0x0000FFFFFFFFFFFFULL
+#define ROGUE_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT 32U
+#define ROGUE_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK 0xFFFF0000FFFFFFFFULL
+#define ROGUE_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT 16U
+#define ROGUE_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK 0xFFFFFFFF0000FFFFULL
+#define ROGUE_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT 0U
+#define ROGUE_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_CORE_ID */
+#define ROGUE_CR_CORE_ID 0x0018U
+#define ROGUE_CR_CORE_ID_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_CORE_ID_ID_SHIFT 16U
+#define ROGUE_CR_CORE_ID_ID_CLRMSK 0x0000FFFFU
+#define ROGUE_CR_CORE_ID_CONFIG_SHIFT 0U
+#define ROGUE_CR_CORE_ID_CONFIG_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_CORE_REVISION */
+#define ROGUE_CR_CORE_REVISION 0x0020U
+#define ROGUE_CR_CORE_REVISION_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_CORE_REVISION_DESIGNER_SHIFT 24U
+#define ROGUE_CR_CORE_REVISION_DESIGNER_CLRMSK 0x00FFFFFFU
+#define ROGUE_CR_CORE_REVISION_MAJOR_SHIFT 16U
+#define ROGUE_CR_CORE_REVISION_MAJOR_CLRMSK 0xFF00FFFFU
+#define ROGUE_CR_CORE_REVISION_MINOR_SHIFT 8U
+#define ROGUE_CR_CORE_REVISION_MINOR_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CORE_REVISION_MAINTENANCE_SHIFT 0U
+#define ROGUE_CR_CORE_REVISION_MAINTENANCE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_DESIGNER_REV_FIELD1 */
+#define ROGUE_CR_DESIGNER_REV_FIELD1 0x0028U
+#define ROGUE_CR_DESIGNER_REV_FIELD1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0U
+#define ROGUE_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_DESIGNER_REV_FIELD2 */
+#define ROGUE_CR_DESIGNER_REV_FIELD2 0x0030U
+#define ROGUE_CR_DESIGNER_REV_FIELD2_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0U
+#define ROGUE_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_CHANGESET_NUMBER */
+#define ROGUE_CR_CHANGESET_NUMBER 0x0040U
+#define ROGUE_CR_CHANGESET_NUMBER_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT 0U
+#define ROGUE_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_CLK_XTPLUS_CTRL */
+#define ROGUE_CR_CLK_XTPLUS_CTRL 0x0080U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_MASKFULL 0x0000003FFFFF0000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_SHIFT 36U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK 0xFFFFFFCFFFFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_ON 0x0000001000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_AUTO 0x0000002000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT 34U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK 0xFFFFFFF3FFFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_ON 0x0000000400000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_AUTO 0x0000000800000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_SHIFT 32U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK 0xFFFFFFFCFFFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_ON 0x0000000100000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_AUTO 0x0000000200000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT 30U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK 0xFFFFFFFF3FFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_ON 0x0000000040000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO 0x0000000080000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT 28U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK 0xFFFFFFFFCFFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_ON 0x0000000010000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO 0x0000000020000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT 26U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK 0xFFFFFFFFF3FFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_ON 0x0000000004000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO 0x0000000008000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT 24U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK 0xFFFFFFFFFCFFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_ON 0x0000000001000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_AUTO 0x0000000002000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT 22U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK 0xFFFFFFFFFF3FFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON 0x0000000000400000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO 0x0000000000800000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT 20U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK 0xFFFFFFFFFFCFFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON 0x0000000000100000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO 0x0000000000200000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT 18U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK 0xFFFFFFFFFFF3FFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON 0x0000000000040000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO 0x0000000000080000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT 16U
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK 0xFFFFFFFFFFFCFFFFULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON 0x0000000000010000ULL
+#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO 0x0000000000020000ULL
+
+/* Register ROGUE_CR_CLK_XTPLUS_STATUS */
+#define ROGUE_CR_CLK_XTPLUS_STATUS 0x0088U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_MASKFULL 0x00000000000007FFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_SHIFT 10U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_RUNNING 0x0000000000000400ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_SHIFT 9U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK 0xFFFFFFFFFFFFFDFFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_RUNNING 0x0000000000000200ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT 8U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING 0x0000000000000100ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT 7U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING 0x0000000000000080ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT 6U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING 0x0000000000000040ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT 5U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING 0x0000000000000020ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT 4U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING 0x0000000000000010ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT 3U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING 0x0000000000000008ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT 2U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING 0x0000000000000004ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT 1U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING 0x0000000000000002ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT 0U
+#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SOFT_RESET */
+#define ROGUE_CR_SOFT_RESET 0x0100U
+#define ROGUE_CR_SOFT_RESET__PBE2_XE__MASKFULL 0xFFEFFFFFFFFFFC3DULL
+#define ROGUE_CR_SOFT_RESET_MASKFULL 0x00E7FFFFFFFFFC3DULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM3_CORE_SHIFT 63U
+#define ROGUE_CR_SOFT_RESET_PHANTOM3_CORE_CLRMSK 0x7FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM3_CORE_EN 0x8000000000000000ULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM2_CORE_SHIFT 62U
+#define ROGUE_CR_SOFT_RESET_PHANTOM2_CORE_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM2_CORE_EN 0x4000000000000000ULL
+#define ROGUE_CR_SOFT_RESET_BERNADO2_CORE_SHIFT 61U
+#define ROGUE_CR_SOFT_RESET_BERNADO2_CORE_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BERNADO2_CORE_EN 0x2000000000000000ULL
+#define ROGUE_CR_SOFT_RESET_JONES_CORE_SHIFT 60U
+#define ROGUE_CR_SOFT_RESET_JONES_CORE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_JONES_CORE_EN 0x1000000000000000ULL
+#define ROGUE_CR_SOFT_RESET_TILING_CORE_SHIFT 59U
+#define ROGUE_CR_SOFT_RESET_TILING_CORE_CLRMSK 0xF7FFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TILING_CORE_EN 0x0800000000000000ULL
+#define ROGUE_CR_SOFT_RESET_TE3_SHIFT 58U
+#define ROGUE_CR_SOFT_RESET_TE3_CLRMSK 0xFBFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TE3_EN 0x0400000000000000ULL
+#define ROGUE_CR_SOFT_RESET_VCE_SHIFT 57U
+#define ROGUE_CR_SOFT_RESET_VCE_CLRMSK 0xFDFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_VCE_EN 0x0200000000000000ULL
+#define ROGUE_CR_SOFT_RESET_VBS_SHIFT 56U
+#define ROGUE_CR_SOFT_RESET_VBS_CLRMSK 0xFEFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_VBS_EN 0x0100000000000000ULL
+#define ROGUE_CR_SOFT_RESET_DPX1_CORE_SHIFT 55U
+#define ROGUE_CR_SOFT_RESET_DPX1_CORE_CLRMSK 0xFF7FFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DPX1_CORE_EN 0x0080000000000000ULL
+#define ROGUE_CR_SOFT_RESET_DPX0_CORE_SHIFT 54U
+#define ROGUE_CR_SOFT_RESET_DPX0_CORE_CLRMSK 0xFFBFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DPX0_CORE_EN 0x0040000000000000ULL
+#define ROGUE_CR_SOFT_RESET_FBA_SHIFT 53U
+#define ROGUE_CR_SOFT_RESET_FBA_CLRMSK 0xFFDFFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FBA_EN 0x0020000000000000ULL
+#define ROGUE_CR_SOFT_RESET_FB_CDC_SHIFT 51U
+#define ROGUE_CR_SOFT_RESET_FB_CDC_CLRMSK 0xFFF7FFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FB_CDC_EN 0x0008000000000000ULL
+#define ROGUE_CR_SOFT_RESET_SH_SHIFT 50U
+#define ROGUE_CR_SOFT_RESET_SH_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_SH_EN 0x0004000000000000ULL
+#define ROGUE_CR_SOFT_RESET_VRDM_SHIFT 49U
+#define ROGUE_CR_SOFT_RESET_VRDM_CLRMSK 0xFFFDFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_VRDM_EN 0x0002000000000000ULL
+#define ROGUE_CR_SOFT_RESET_MCU_FBTC_SHIFT 48U
+#define ROGUE_CR_SOFT_RESET_MCU_FBTC_CLRMSK 0xFFFEFFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_MCU_FBTC_EN 0x0001000000000000ULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT 47U
+#define ROGUE_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK 0xFFFF7FFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM1_CORE_EN 0x0000800000000000ULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT 46U
+#define ROGUE_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK 0xFFFFBFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PHANTOM0_CORE_EN 0x0000400000000000ULL
+#define ROGUE_CR_SOFT_RESET_BERNADO1_CORE_SHIFT 45U
+#define ROGUE_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK 0xFFFFDFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BERNADO1_CORE_EN 0x0000200000000000ULL
+#define ROGUE_CR_SOFT_RESET_BERNADO0_CORE_SHIFT 44U
+#define ROGUE_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK 0xFFFFEFFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BERNADO0_CORE_EN 0x0000100000000000ULL
+#define ROGUE_CR_SOFT_RESET_IPP_SHIFT 43U
+#define ROGUE_CR_SOFT_RESET_IPP_CLRMSK 0xFFFFF7FFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_IPP_EN 0x0000080000000000ULL
+#define ROGUE_CR_SOFT_RESET_BIF_TEXAS_SHIFT 42U
+#define ROGUE_CR_SOFT_RESET_BIF_TEXAS_CLRMSK 0xFFFFFBFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BIF_TEXAS_EN 0x0000040000000000ULL
+#define ROGUE_CR_SOFT_RESET_TORNADO_CORE_SHIFT 41U
+#define ROGUE_CR_SOFT_RESET_TORNADO_CORE_CLRMSK 0xFFFFFDFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TORNADO_CORE_EN 0x0000020000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_H_CORE_SHIFT 40U
+#define ROGUE_CR_SOFT_RESET_DUST_H_CORE_CLRMSK 0xFFFFFEFFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_H_CORE_EN 0x0000010000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_G_CORE_SHIFT 39U
+#define ROGUE_CR_SOFT_RESET_DUST_G_CORE_CLRMSK 0xFFFFFF7FFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_G_CORE_EN 0x0000008000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_F_CORE_SHIFT 38U
+#define ROGUE_CR_SOFT_RESET_DUST_F_CORE_CLRMSK 0xFFFFFFBFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_F_CORE_EN 0x0000004000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_E_CORE_SHIFT 37U
+#define ROGUE_CR_SOFT_RESET_DUST_E_CORE_CLRMSK 0xFFFFFFDFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_E_CORE_EN 0x0000002000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_D_CORE_SHIFT 36U
+#define ROGUE_CR_SOFT_RESET_DUST_D_CORE_CLRMSK 0xFFFFFFEFFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_D_CORE_EN 0x0000001000000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_C_CORE_SHIFT 35U
+#define ROGUE_CR_SOFT_RESET_DUST_C_CORE_CLRMSK 0xFFFFFFF7FFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_C_CORE_EN 0x0000000800000000ULL
+#define ROGUE_CR_SOFT_RESET_MMU_SHIFT 34U
+#define ROGUE_CR_SOFT_RESET_MMU_CLRMSK 0xFFFFFFFBFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_MMU_EN 0x0000000400000000ULL
+#define ROGUE_CR_SOFT_RESET_BIF1_SHIFT 33U
+#define ROGUE_CR_SOFT_RESET_BIF1_CLRMSK 0xFFFFFFFDFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_BIF1_EN 0x0000000200000000ULL
+#define ROGUE_CR_SOFT_RESET_GARTEN_SHIFT 32U
+#define ROGUE_CR_SOFT_RESET_GARTEN_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_GARTEN_EN 0x0000000100000000ULL
+#define ROGUE_CR_SOFT_RESET_CPU_SHIFT 32U
+#define ROGUE_CR_SOFT_RESET_CPU_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_CPU_EN 0x0000000100000000ULL
+#define ROGUE_CR_SOFT_RESET_RASCAL_CORE_SHIFT 31U
+#define ROGUE_CR_SOFT_RESET_RASCAL_CORE_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_RASCAL_CORE_EN 0x0000000080000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_B_CORE_SHIFT 30U
+#define ROGUE_CR_SOFT_RESET_DUST_B_CORE_CLRMSK 0xFFFFFFFFBFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_B_CORE_EN 0x0000000040000000ULL
+#define ROGUE_CR_SOFT_RESET_DUST_A_CORE_SHIFT 29U
+#define ROGUE_CR_SOFT_RESET_DUST_A_CORE_CLRMSK 0xFFFFFFFFDFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_DUST_A_CORE_EN 0x0000000020000000ULL
+#define ROGUE_CR_SOFT_RESET_FB_TLCACHE_SHIFT 28U
+#define ROGUE_CR_SOFT_RESET_FB_TLCACHE_CLRMSK 0xFFFFFFFFEFFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FB_TLCACHE_EN 0x0000000010000000ULL
+#define ROGUE_CR_SOFT_RESET_SLC_SHIFT 27U
+#define ROGUE_CR_SOFT_RESET_SLC_CLRMSK 0xFFFFFFFFF7FFFFFFULL
+#define ROGUE_CR_SOFT_RESET_SLC_EN 0x0000000008000000ULL
+#define ROGUE_CR_SOFT_RESET_TLA_SHIFT 26U
+#define ROGUE_CR_SOFT_RESET_TLA_CLRMSK 0xFFFFFFFFFBFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TLA_EN 0x0000000004000000ULL
+#define ROGUE_CR_SOFT_RESET_UVS_SHIFT 25U
+#define ROGUE_CR_SOFT_RESET_UVS_CLRMSK 0xFFFFFFFFFDFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_UVS_EN 0x0000000002000000ULL
+#define ROGUE_CR_SOFT_RESET_TE_SHIFT 24U
+#define ROGUE_CR_SOFT_RESET_TE_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_SOFT_RESET_TE_EN 0x0000000001000000ULL
+#define ROGUE_CR_SOFT_RESET_GPP_SHIFT 23U
+#define ROGUE_CR_SOFT_RESET_GPP_CLRMSK 0xFFFFFFFFFF7FFFFFULL
+#define ROGUE_CR_SOFT_RESET_GPP_EN 0x0000000000800000ULL
+#define ROGUE_CR_SOFT_RESET_FBDC_SHIFT 22U
+#define ROGUE_CR_SOFT_RESET_FBDC_CLRMSK 0xFFFFFFFFFFBFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FBDC_EN 0x0000000000400000ULL
+#define ROGUE_CR_SOFT_RESET_FBC_SHIFT 21U
+#define ROGUE_CR_SOFT_RESET_FBC_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_SOFT_RESET_FBC_EN 0x0000000000200000ULL
+#define ROGUE_CR_SOFT_RESET_PM_SHIFT 20U
+#define ROGUE_CR_SOFT_RESET_PM_CLRMSK 0xFFFFFFFFFFEFFFFFULL
+#define ROGUE_CR_SOFT_RESET_PM_EN 0x0000000000100000ULL
+#define ROGUE_CR_SOFT_RESET_PBE_SHIFT 19U
+#define ROGUE_CR_SOFT_RESET_PBE_CLRMSK 0xFFFFFFFFFFF7FFFFULL
+#define ROGUE_CR_SOFT_RESET_PBE_EN 0x0000000000080000ULL
+#define ROGUE_CR_SOFT_RESET_USC_SHARED_SHIFT 18U
+#define ROGUE_CR_SOFT_RESET_USC_SHARED_CLRMSK 0xFFFFFFFFFFFBFFFFULL
+#define ROGUE_CR_SOFT_RESET_USC_SHARED_EN 0x0000000000040000ULL
+#define ROGUE_CR_SOFT_RESET_MCU_L1_SHIFT 17U
+#define ROGUE_CR_SOFT_RESET_MCU_L1_CLRMSK 0xFFFFFFFFFFFDFFFFULL
+#define ROGUE_CR_SOFT_RESET_MCU_L1_EN 0x0000000000020000ULL
+#define ROGUE_CR_SOFT_RESET_BIF_SHIFT 16U
+#define ROGUE_CR_SOFT_RESET_BIF_CLRMSK 0xFFFFFFFFFFFEFFFFULL
+#define ROGUE_CR_SOFT_RESET_BIF_EN 0x0000000000010000ULL
+#define ROGUE_CR_SOFT_RESET_CDM_SHIFT 15U
+#define ROGUE_CR_SOFT_RESET_CDM_CLRMSK 0xFFFFFFFFFFFF7FFFULL
+#define ROGUE_CR_SOFT_RESET_CDM_EN 0x0000000000008000ULL
+#define ROGUE_CR_SOFT_RESET_VDM_SHIFT 14U
+#define ROGUE_CR_SOFT_RESET_VDM_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_SOFT_RESET_VDM_EN 0x0000000000004000ULL
+#define ROGUE_CR_SOFT_RESET_TESS_SHIFT 13U
+#define ROGUE_CR_SOFT_RESET_TESS_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_SOFT_RESET_TESS_EN 0x0000000000002000ULL
+#define ROGUE_CR_SOFT_RESET_PDS_SHIFT 12U
+#define ROGUE_CR_SOFT_RESET_PDS_CLRMSK 0xFFFFFFFFFFFFEFFFULL
+#define ROGUE_CR_SOFT_RESET_PDS_EN 0x0000000000001000ULL
+#define ROGUE_CR_SOFT_RESET_ISP_SHIFT 11U
+#define ROGUE_CR_SOFT_RESET_ISP_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_SOFT_RESET_ISP_EN 0x0000000000000800ULL
+#define ROGUE_CR_SOFT_RESET_TSP_SHIFT 10U
+#define ROGUE_CR_SOFT_RESET_TSP_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_SOFT_RESET_TSP_EN 0x0000000000000400ULL
+#define ROGUE_CR_SOFT_RESET_SYSARB_SHIFT 5U
+#define ROGUE_CR_SOFT_RESET_SYSARB_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_SOFT_RESET_SYSARB_EN 0x0000000000000020ULL
+#define ROGUE_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT 4U
+#define ROGUE_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_SOFT_RESET_TPU_MCU_DEMUX_EN 0x0000000000000010ULL
+#define ROGUE_CR_SOFT_RESET_MCU_L0_SHIFT 3U
+#define ROGUE_CR_SOFT_RESET_MCU_L0_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_SOFT_RESET_MCU_L0_EN 0x0000000000000008ULL
+#define ROGUE_CR_SOFT_RESET_TPU_SHIFT 2U
+#define ROGUE_CR_SOFT_RESET_TPU_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_SOFT_RESET_TPU_EN 0x0000000000000004ULL
+#define ROGUE_CR_SOFT_RESET_USC_SHIFT 0U
+#define ROGUE_CR_SOFT_RESET_USC_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_SOFT_RESET_USC_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SOFT_RESET2 */
+#define ROGUE_CR_SOFT_RESET2 0x0108U
+#define ROGUE_CR_SOFT_RESET2_MASKFULL 0x00000000001FFFFFULL
+#define ROGUE_CR_SOFT_RESET2_SPFILTER_SHIFT 12U
+#define ROGUE_CR_SOFT_RESET2_SPFILTER_CLRMSK 0xFFE00FFFU
+#define ROGUE_CR_SOFT_RESET2_TDM_SHIFT 11U
+#define ROGUE_CR_SOFT_RESET2_TDM_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_SOFT_RESET2_TDM_EN 0x00000800U
+#define ROGUE_CR_SOFT_RESET2_ASTC_SHIFT 10U
+#define ROGUE_CR_SOFT_RESET2_ASTC_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_SOFT_RESET2_ASTC_EN 0x00000400U
+#define ROGUE_CR_SOFT_RESET2_BLACKPEARL_SHIFT 9U
+#define ROGUE_CR_SOFT_RESET2_BLACKPEARL_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_SOFT_RESET2_BLACKPEARL_EN 0x00000200U
+#define ROGUE_CR_SOFT_RESET2_USCPS_SHIFT 8U
+#define ROGUE_CR_SOFT_RESET2_USCPS_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_SOFT_RESET2_USCPS_EN 0x00000100U
+#define ROGUE_CR_SOFT_RESET2_IPF_SHIFT 7U
+#define ROGUE_CR_SOFT_RESET2_IPF_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_SOFT_RESET2_IPF_EN 0x00000080U
+#define ROGUE_CR_SOFT_RESET2_GEOMETRY_SHIFT 6U
+#define ROGUE_CR_SOFT_RESET2_GEOMETRY_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SOFT_RESET2_GEOMETRY_EN 0x00000040U
+#define ROGUE_CR_SOFT_RESET2_USC_SHARED_SHIFT 5U
+#define ROGUE_CR_SOFT_RESET2_USC_SHARED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SOFT_RESET2_USC_SHARED_EN 0x00000020U
+#define ROGUE_CR_SOFT_RESET2_PDS_SHARED_SHIFT 4U
+#define ROGUE_CR_SOFT_RESET2_PDS_SHARED_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SOFT_RESET2_PDS_SHARED_EN 0x00000010U
+#define ROGUE_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT 3U
+#define ROGUE_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SOFT_RESET2_BIF_BLACKPEARL_EN 0x00000008U
+#define ROGUE_CR_SOFT_RESET2_PIXEL_SHIFT 2U
+#define ROGUE_CR_SOFT_RESET2_PIXEL_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SOFT_RESET2_PIXEL_EN 0x00000004U
+#define ROGUE_CR_SOFT_RESET2_CDM_SHIFT 1U
+#define ROGUE_CR_SOFT_RESET2_CDM_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SOFT_RESET2_CDM_EN 0x00000002U
+#define ROGUE_CR_SOFT_RESET2_VERTEX_SHIFT 0U
+#define ROGUE_CR_SOFT_RESET2_VERTEX_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SOFT_RESET2_VERTEX_EN 0x00000001U
+
+/* Register ROGUE_CR_EVENT_STATUS */
+#define ROGUE_CR_EVENT_STATUS 0x0130U
+#define ROGUE_CR_EVENT_STATUS__ROGUEXE__MASKFULL 0x00000000E01DFFFFULL
+#define ROGUE_CR_EVENT_STATUS__SIGNALS__MASKFULL 0x00000000E007FFFFULL
+#define ROGUE_CR_EVENT_STATUS_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT 31U
+#define ROGUE_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN 0x80000000U
+#define ROGUE_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT 30U
+#define ROGUE_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN 0x40000000U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT 29U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN 0x20000000U
+#define ROGUE_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT 28U
+#define ROGUE_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN 0x10000000U
+#define ROGUE_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT 27U
+#define ROGUE_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN 0x08000000U
+#define ROGUE_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT 26U
+#define ROGUE_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN 0x04000000U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT 25U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN 0x02000000U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT 24U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK 0xFEFFFFFFU
+#define ROGUE_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN 0x01000000U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT 23U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK 0xFF7FFFFFU
+#define ROGUE_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN 0x00800000U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT 22U
+#define ROGUE_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN 0x00400000U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT 21U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN 0x00200000U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT 20U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN 0x00100000U
+#define ROGUE_CR_EVENT_STATUS_SAFETY_SHIFT 20U
+#define ROGUE_CR_EVENT_STATUS_SAFETY_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_EVENT_STATUS_SAFETY_EN 0x00100000U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT 19U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN 0x00080000U
+#define ROGUE_CR_EVENT_STATUS_SLAVE_REQ_SHIFT 19U
+#define ROGUE_CR_EVENT_STATUS_SLAVE_REQ_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_EVENT_STATUS_SLAVE_REQ_EN 0x00080000U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT 18U
+#define ROGUE_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN 0x00040000U
+#define ROGUE_CR_EVENT_STATUS_SHG_FINISHED_SHIFT 17U
+#define ROGUE_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_STATUS_SHG_FINISHED_EN 0x00020000U
+#define ROGUE_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT 17U
+#define ROGUE_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN 0x00020000U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT 16U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN 0x00010000U
+#define ROGUE_CR_EVENT_STATUS_USC_TRIGGER_SHIFT 15U
+#define ROGUE_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK 0xFFFF7FFFU
+#define ROGUE_CR_EVENT_STATUS_USC_TRIGGER_EN 0x00008000U
+#define ROGUE_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT 14U
+#define ROGUE_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK 0xFFFFBFFFU
+#define ROGUE_CR_EVENT_STATUS_ZLS_FINISHED_EN 0x00004000U
+#define ROGUE_CR_EVENT_STATUS_GPIO_ACK_SHIFT 13U
+#define ROGUE_CR_EVENT_STATUS_GPIO_ACK_CLRMSK 0xFFFFDFFFU
+#define ROGUE_CR_EVENT_STATUS_GPIO_ACK_EN 0x00002000U
+#define ROGUE_CR_EVENT_STATUS_GPIO_REQ_SHIFT 12U
+#define ROGUE_CR_EVENT_STATUS_GPIO_REQ_CLRMSK 0xFFFFEFFFU
+#define ROGUE_CR_EVENT_STATUS_GPIO_REQ_EN 0x00001000U
+#define ROGUE_CR_EVENT_STATUS_POWER_ABORT_SHIFT 11U
+#define ROGUE_CR_EVENT_STATUS_POWER_ABORT_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_EVENT_STATUS_POWER_ABORT_EN 0x00000800U
+#define ROGUE_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT 10U
+#define ROGUE_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_EVENT_STATUS_POWER_COMPLETE_EN 0x00000400U
+#define ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT 9U
+#define ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN 0x00000200U
+#define ROGUE_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT 8U
+#define ROGUE_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN 0x00000100U
+#define ROGUE_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT 7U
+#define ROGUE_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN 0x00000080U
+#define ROGUE_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 6U
+#define ROGUE_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_EVENT_STATUS_TA_TERMINATE_EN 0x00000040U
+#define ROGUE_CR_EVENT_STATUS_TA_FINISHED_SHIFT 5U
+#define ROGUE_CR_EVENT_STATUS_TA_FINISHED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_EVENT_STATUS_TA_FINISHED_EN 0x00000020U
+#define ROGUE_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT 4U
+#define ROGUE_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_EVENT_STATUS_ISP_END_MACROTILE_EN 0x00000010U
+#define ROGUE_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 3U
+#define ROGUE_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN 0x00000008U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT 2U
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_EVENT_STATUS_COMPUTE_FINISHED_EN 0x00000004U
+#define ROGUE_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT 1U
+#define ROGUE_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_EVENT_STATUS_KERNEL_FINISHED_EN 0x00000002U
+#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT 0U
+#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_EN 0x00000001U
+
+/* Register ROGUE_CR_TIMER */
+#define ROGUE_CR_TIMER 0x0160U
+#define ROGUE_CR_TIMER_MASKFULL 0x8000FFFFFFFFFFFFULL
+#define ROGUE_CR_TIMER_BIT31_SHIFT 63U
+#define ROGUE_CR_TIMER_BIT31_CLRMSK 0x7FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_TIMER_BIT31_EN 0x8000000000000000ULL
+#define ROGUE_CR_TIMER_VALUE_SHIFT 0U
+#define ROGUE_CR_TIMER_VALUE_CLRMSK 0xFFFF000000000000ULL
+
+/* Register ROGUE_CR_TLA_STATUS */
+#define ROGUE_CR_TLA_STATUS 0x0178U
+#define ROGUE_CR_TLA_STATUS_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_TLA_STATUS_BLIT_COUNT_SHIFT 39U
+#define ROGUE_CR_TLA_STATUS_BLIT_COUNT_CLRMSK 0x0000007FFFFFFFFFULL
+#define ROGUE_CR_TLA_STATUS_REQUEST_SHIFT 7U
+#define ROGUE_CR_TLA_STATUS_REQUEST_CLRMSK 0xFFFFFF800000007FULL
+#define ROGUE_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT 1U
+#define ROGUE_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK 0xFFFFFFFFFFFFFF81ULL
+#define ROGUE_CR_TLA_STATUS_BUSY_SHIFT 0U
+#define ROGUE_CR_TLA_STATUS_BUSY_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_TLA_STATUS_BUSY_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_PM_PARTIAL_RENDER_ENABLE */
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE 0x0338U
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT 0U
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN 0x00000001U
+
+/* Register ROGUE_CR_SIDEKICK_IDLE */
+#define ROGUE_CR_SIDEKICK_IDLE 0x03C8U
+#define ROGUE_CR_SIDEKICK_IDLE_MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_SIDEKICK_IDLE_FB_CDC_SHIFT 6U
+#define ROGUE_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SIDEKICK_IDLE_FB_CDC_EN 0x00000040U
+#define ROGUE_CR_SIDEKICK_IDLE_MMU_SHIFT 5U
+#define ROGUE_CR_SIDEKICK_IDLE_MMU_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SIDEKICK_IDLE_MMU_EN 0x00000020U
+#define ROGUE_CR_SIDEKICK_IDLE_BIF128_SHIFT 4U
+#define ROGUE_CR_SIDEKICK_IDLE_BIF128_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SIDEKICK_IDLE_BIF128_EN 0x00000010U
+#define ROGUE_CR_SIDEKICK_IDLE_TLA_SHIFT 3U
+#define ROGUE_CR_SIDEKICK_IDLE_TLA_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SIDEKICK_IDLE_TLA_EN 0x00000008U
+#define ROGUE_CR_SIDEKICK_IDLE_GARTEN_SHIFT 2U
+#define ROGUE_CR_SIDEKICK_IDLE_GARTEN_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN 0x00000004U
+#define ROGUE_CR_SIDEKICK_IDLE_HOSTIF_SHIFT 1U
+#define ROGUE_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SIDEKICK_IDLE_HOSTIF_EN 0x00000002U
+#define ROGUE_CR_SIDEKICK_IDLE_SOCIF_SHIFT 0U
+#define ROGUE_CR_SIDEKICK_IDLE_SOCIF_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SIDEKICK_IDLE_SOCIF_EN 0x00000001U
+
+/* Register ROGUE_CR_MARS_IDLE */
+#define ROGUE_CR_MARS_IDLE 0x08F8U
+#define ROGUE_CR_MARS_IDLE_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_MARS_IDLE_MH_SYSARB0_SHIFT 2U
+#define ROGUE_CR_MARS_IDLE_MH_SYSARB0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_MARS_IDLE_MH_SYSARB0_EN 0x00000004U
+#define ROGUE_CR_MARS_IDLE_CPU_SHIFT 1U
+#define ROGUE_CR_MARS_IDLE_CPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_MARS_IDLE_CPU_EN 0x00000002U
+#define ROGUE_CR_MARS_IDLE_SOCIF_SHIFT 0U
+#define ROGUE_CR_MARS_IDLE_SOCIF_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MARS_IDLE_SOCIF_EN 0x00000001U
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_STATUS */
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS 0x0430U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL 0x00000000000000F3ULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT 4U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK 0xFFFFFF0FU
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT 1U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN 0x00000002U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN 0x00000001U
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_TASK0 */
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0 0x0438U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT 32U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_TASK1 */
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1 0x0440U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_TASK2 */
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2 0x0448U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT 32U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_VDM_CONTEXT_RESUME_TASK0 */
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0 0x0450U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT 32U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_VDM_CONTEXT_RESUME_TASK1 */
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1 0x0458U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_VDM_CONTEXT_RESUME_TASK2 */
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2 0x0460U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT 32U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_CDM_CONTEXT_STORE_STATUS */
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS 0x04A0U
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT 1U
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN 0x00000002U
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT 0U
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN 0x00000001U
+
+/* Register ROGUE_CR_CDM_CONTEXT_PDS0 */
+#define ROGUE_CR_CDM_CONTEXT_PDS0 0x04A8U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_MASKFULL 0xFFFFFFF0FFFFFFF0ULL
+#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT 36U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE 16U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK 0xFFFFFFFF0000000FULL
+#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_CDM_CONTEXT_PDS1 */
+#define ROGUE_CR_CDM_CONTEXT_PDS1 0x04B0U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL 0x000000007FFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_PDS1_MASKFULL 0x000000003FFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT 30U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN 0x40000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT 28U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN 0x10000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT 28U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN 0x10000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT 27U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TARGET_EN 0x08000000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT 22U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK 0xF03FFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT 21U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK 0xF81FFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT 21U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN 0x00200000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT 20U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN 0x00100000U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT 12U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK 0xFFE00FFFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT 11U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK 0xFFF007FFU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK 0xFFFFF87FU
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK 0xFFFFF07FU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT 1U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK 0xFFFFFF81U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT 0U
+#define ROGUE_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_CDM_CONTEXT_PDS1_FENCE_EN 0x00000001U
+
+/* Register ROGUE_CR_CDM_TERMINATE_PDS */
+#define ROGUE_CR_CDM_TERMINATE_PDS 0x04B8U
+#define ROGUE_CR_CDM_TERMINATE_PDS_MASKFULL 0xFFFFFFF0FFFFFFF0ULL
+#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT 36U
+#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE 16U
+#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT 4U
+#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK 0xFFFFFFFF0000000FULL
+#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_CDM_TERMINATE_PDS1 */
+#define ROGUE_CR_CDM_TERMINATE_PDS1 0x04C0U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL 0x000000007FFFFFFFULL
+#define ROGUE_CR_CDM_TERMINATE_PDS1_MASKFULL 0x000000003FFFFFFFULL
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT 30U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN 0x40000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT 28U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN 0x10000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT 28U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN 0x10000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT 27U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TARGET_EN 0x08000000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT 22U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK 0xF03FFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT 21U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK 0xF81FFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT 21U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN 0x00200000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT 20U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN 0x00100000U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT 12U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK 0xFFE00FFFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT 11U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK 0xFFF007FFU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK 0xFFFFF87FU
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK 0xFFFFF07FU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT 1U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK 0xFFFFFF81U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT 0U
+#define ROGUE_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_CDM_TERMINATE_PDS1_FENCE_EN 0x00000001U
+
+/* Register ROGUE_CR_CDM_CONTEXT_LOAD_PDS0 */
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0 0x04D8U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL 0xFFFFFFF0FFFFFFF0ULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT 36U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE 16U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK 0xFFFFFFFF0000000FULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_CDM_CONTEXT_LOAD_PDS1 */
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1 0x04E0U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL 0x000000007FFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL 0x000000003FFFFFFFULL
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT 30U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN 0x40000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT 29U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN 0x20000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT 28U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN 0x10000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT 28U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN 0x10000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT 27U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN 0x08000000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT 22U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK 0xF03FFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT 21U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK 0xF81FFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT 21U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK 0xFFDFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN 0x00200000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT 20U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN 0x00100000U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT 12U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK 0xFFE00FFFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT 11U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK 0xFFF007FFU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK 0xFFFFF87FU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT 7U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK 0xFFFFF07FU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT 1U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK 0xFFFFFF81U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT 0U
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_CONFIG */
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG 0x0810U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_MASKFULL 0x000001030F01FFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_SHIFT 40U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_CLRMSK 0xFFFFFEFFFFFFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_EN 0x0000010000000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_SHIFT 33U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_CLRMSK 0xFFFFFFFDFFFFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_EN 0x0000000200000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT 32U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN 0x0000000100000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT 25U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK 0xFFFFFFFFF1FFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT 24U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN 0x0000000001000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT 16U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK 0xFFFFFFFFFFFEFFFFULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32 0x0000000000000000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS 0x0000000000010000ULL
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1 0x0818U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2 0x0820U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1 0x0828U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2 0x0830U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1 0x0838U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2 0x0840U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1 0x0848U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2 0x0850U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1 */
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1 0x0858U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL 0x00000000FFFFF001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2 */
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2 0x0860U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT 6U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT 5U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN 0x0000000000000020ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS */
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS 0x0868U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL 0x00000001FFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT 32U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN 0x0000000100000000ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR */
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR 0x0870U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG */
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG 0x0878U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL 0xFFFFFFF7FFFFFFBFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT 36U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT 32U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK 0xFFFFFFF8FFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT 11U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN 0x0000000000000800ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT 7U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK 0xFFFFFFFFFFFFF87FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB 0x0000000000000000ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB 0x0000000000000080ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB 0x0000000000000100ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB 0x0000000000000180ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB 0x0000000000000200ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB 0x0000000000000280ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB 0x0000000000000300ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB 0x0000000000000380ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB 0x0000000000000400ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT 1U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK 0xFFFFFFFFFFFFFFC1ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ */
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ 0x0880U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL 0x000000000000003FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT 1U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK 0xFFFFFFC1U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA */
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA 0x0888U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL 0xFFFFFFF7FFFFFF81ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT 36U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK 0x0000000FFFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT 32U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK 0xFFFFFFF8FFFFFFFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT 12U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT 11U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN 0x0000000000000800ULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT 7U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK 0xFFFFFFFFFFFFF87FULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT 0U
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE */
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE 0x08A0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS */
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS 0x08A8U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR */
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR 0x08B0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE */
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE 0x08B8U
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_NMI_EVENT */
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT 0x08C0U
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_DEBUG_CONFIG */
+#define ROGUE_CR_MIPS_DEBUG_CONFIG 0x08C8U
+#define ROGUE_CR_MIPS_DEBUG_CONFIG_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT 0U
+#define ROGUE_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_EXCEPTION_STATUS */
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS 0x08D0U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_MASKFULL 0x000000000000003FULL
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT 5U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN 0x00000020U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT 4U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN 0x00000010U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT 3U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN 0x00000008U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT 2U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN 0x00000004U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT 1U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN 0x00000002U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT 0U
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN 0x00000001U
+
+/* Register ROGUE_CR_MIPS_WRAPPER_STATUS */
+#define ROGUE_CR_MIPS_WRAPPER_STATUS 0x08E8U
+#define ROGUE_CR_MIPS_WRAPPER_STATUS_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_SHIFT 0U
+#define ROGUE_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_XPU_BROADCAST */
+#define ROGUE_CR_XPU_BROADCAST 0x0890U
+#define ROGUE_CR_XPU_BROADCAST_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_XPU_BROADCAST_MASK_SHIFT 0U
+#define ROGUE_CR_XPU_BROADCAST_MASK_CLRMSK 0xFFFFFE00U
+
+/* Register ROGUE_CR_META_SP_MSLVDATAX */
+#define ROGUE_CR_META_SP_MSLVDATAX 0x0A00U
+#define ROGUE_CR_META_SP_MSLVDATAX_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_META_SP_MSLVDATAT */
+#define ROGUE_CR_META_SP_MSLVDATAT 0x0A08U
+#define ROGUE_CR_META_SP_MSLVDATAT_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_META_SP_MSLVCTRL0 */
+#define ROGUE_CR_META_SP_MSLVCTRL0 0x0A10U
+#define ROGUE_CR_META_SP_MSLVCTRL0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_META_SP_MSLVCTRL0_ADDR_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK 0x00000003U
+#define ROGUE_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT 1U
+#define ROGUE_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_META_SP_MSLVCTRL0_AUTOINCR_EN 0x00000002U
+#define ROGUE_CR_META_SP_MSLVCTRL0_RD_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVCTRL0_RD_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_META_SP_MSLVCTRL0_RD_EN 0x00000001U
+
+/* Register ROGUE_CR_META_SP_MSLVCTRL1 */
+#define ROGUE_CR_META_SP_MSLVCTRL1 0x0A18U
+#define ROGUE_CR_META_SP_MSLVCTRL1_MASKFULL 0x00000000F7F4003FULL
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT 30U
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK 0x3FFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT 29U
+#define ROGUE_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN 0x20000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT 28U
+#define ROGUE_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN 0x10000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT 26U
+#define ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN 0x04000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT 25U
+#define ROGUE_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN 0x02000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_READY_SHIFT 24U
+#define ROGUE_CR_META_SP_MSLVCTRL1_READY_CLRMSK 0xFEFFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_READY_EN 0x01000000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT 21U
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK 0xFF1FFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT 20U
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERR_EN 0x00100000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT 18U
+#define ROGUE_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN 0x00040000U
+#define ROGUE_CR_META_SP_MSLVCTRL1_THREAD_SHIFT 4U
+#define ROGUE_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK 0xFFFFFFCFU
+#define ROGUE_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_META_SP_MSLVHANDSHKE */
+#define ROGUE_CR_META_SP_MSLVHANDSHKE 0x0A50U
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_META_SP_MSLVT0KICK */
+#define ROGUE_CR_META_SP_MSLVT0KICK 0x0A80U
+#define ROGUE_CR_META_SP_MSLVT0KICK_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT0KICKI */
+#define ROGUE_CR_META_SP_MSLVT0KICKI 0x0A88U
+#define ROGUE_CR_META_SP_MSLVT0KICKI_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT1KICK */
+#define ROGUE_CR_META_SP_MSLVT1KICK 0x0A90U
+#define ROGUE_CR_META_SP_MSLVT1KICK_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT1KICKI */
+#define ROGUE_CR_META_SP_MSLVT1KICKI 0x0A98U
+#define ROGUE_CR_META_SP_MSLVT1KICKI_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT2KICK */
+#define ROGUE_CR_META_SP_MSLVT2KICK 0x0AA0U
+#define ROGUE_CR_META_SP_MSLVT2KICK_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT2KICKI */
+#define ROGUE_CR_META_SP_MSLVT2KICKI 0x0AA8U
+#define ROGUE_CR_META_SP_MSLVT2KICKI_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT3KICK */
+#define ROGUE_CR_META_SP_MSLVT3KICK 0x0AB0U
+#define ROGUE_CR_META_SP_MSLVT3KICK_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVT3KICKI */
+#define ROGUE_CR_META_SP_MSLVT3KICKI 0x0AB8U
+#define ROGUE_CR_META_SP_MSLVT3KICKI_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_META_SP_MSLVRST */
+#define ROGUE_CR_META_SP_MSLVRST 0x0AC0U
+#define ROGUE_CR_META_SP_MSLVRST_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_META_SP_MSLVRST_SOFTRESET_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_META_SP_MSLVRST_SOFTRESET_EN 0x00000001U
+
+/* Register ROGUE_CR_META_SP_MSLVIRQSTATUS */
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS 0x0AC8U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_MASKFULL 0x000000000000000CULL
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT 3U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN 0x00000008U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN 0x00000004U
+
+/* Register ROGUE_CR_META_SP_MSLVIRQENABLE */
+#define ROGUE_CR_META_SP_MSLVIRQENABLE 0x0AD0U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_MASKFULL 0x000000000000000CULL
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT 3U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT1_EN 0x00000008U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT 2U
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT0_EN 0x00000004U
+
+/* Register ROGUE_CR_META_SP_MSLVIRQLEVEL */
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL 0x0AD8U
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT 0U
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MODE_EN 0x00000001U
+
+/* Register ROGUE_CR_MTS_SCHEDULE */
+#define ROGUE_CR_MTS_SCHEDULE 0x0B00U
+#define ROGUE_CR_MTS_SCHEDULE_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE1 */
+#define ROGUE_CR_MTS_SCHEDULE1 0x10B00U
+#define ROGUE_CR_MTS_SCHEDULE1_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE1_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE1_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE1_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE1_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE1_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE1_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE1_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE2 */
+#define ROGUE_CR_MTS_SCHEDULE2 0x20B00U
+#define ROGUE_CR_MTS_SCHEDULE2_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE2_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE2_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE2_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE2_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE2_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE2_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE2_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE3 */
+#define ROGUE_CR_MTS_SCHEDULE3 0x30B00U
+#define ROGUE_CR_MTS_SCHEDULE3_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE3_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE3_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE3_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE3_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE3_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE3_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE3_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE4 */
+#define ROGUE_CR_MTS_SCHEDULE4 0x40B00U
+#define ROGUE_CR_MTS_SCHEDULE4_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE4_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE4_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE4_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE4_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE4_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE4_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE4_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE5 */
+#define ROGUE_CR_MTS_SCHEDULE5 0x50B00U
+#define ROGUE_CR_MTS_SCHEDULE5_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE5_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE5_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE5_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE5_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE5_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE5_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE5_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE6 */
+#define ROGUE_CR_MTS_SCHEDULE6 0x60B00U
+#define ROGUE_CR_MTS_SCHEDULE6_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE6_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE6_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE6_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE6_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE6_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE6_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE6_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_SCHEDULE7 */
+#define ROGUE_CR_MTS_SCHEDULE7 0x70B00U
+#define ROGUE_CR_MTS_SCHEDULE7_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_MTS_SCHEDULE7_HOST_SHIFT 8U
+#define ROGUE_CR_MTS_SCHEDULE7_HOST_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_MTS_SCHEDULE7_HOST_BG_TIMER 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_HOST_HOST 0x00000100U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_SHIFT 6U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT1 0x00000040U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT2 0x00000080U
+#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT3 0x000000C0U
+#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_SHIFT 5U
+#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_BGCTX 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_INTCTX 0x00000020U
+#define ROGUE_CR_MTS_SCHEDULE7_TASK_SHIFT 4U
+#define ROGUE_CR_MTS_SCHEDULE7_TASK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SCHEDULE7_TASK_NON_COUNTED 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_TASK_COUNTED 0x00000010U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_CLRMSK 0xFFFFFFF0U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM0 0x00000000U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM1 0x00000001U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM2 0x00000002U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM3 0x00000003U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM4 0x00000004U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM5 0x00000005U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM6 0x00000006U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM7 0x00000007U
+#define ROGUE_CR_MTS_SCHEDULE7_DM_DM_ALL 0x0000000FU
+
+/* Register ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC */
+#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC 0x0B30U
+#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT 0U
+#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC */
+#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC 0x0B38U
+#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT 0U
+#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC */
+#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC 0x0B40U
+#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT 0U
+#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC */
+#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC 0x0B48U
+#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT 0U
+#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG */
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG 0x0B50U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__MASKFULL 0x000FF0FFFFFFF701ULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL 0x0000FFFFFFFFF001ULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT 44U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK 0xFFFF0FFFFFFFFFFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT 44U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK 0xFFF00FFFFFFFFFFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT 40U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT 12U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_SHIFT 9U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_CLRMSK 0xFFFFFFFFFFFFF9FFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT 8U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN 0x0000000000000100ULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT 0U
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META 0x0000000000000000ULL
+#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE 0x0B58U
+#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE 0x0B60U
+#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE 0x0B68U
+#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE 0x0B70U
+#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE 0x0B78U
+#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE */
+#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE 0x0B80U
+#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U
+#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_INTCTX */
+#define ROGUE_CR_MTS_INTCTX 0x0B98U
+#define ROGUE_CR_MTS_INTCTX_MASKFULL 0x000000003FFFFFFFULL
+#define ROGUE_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT 22U
+#define ROGUE_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK 0xC03FFFFFU
+#define ROGUE_CR_MTS_INTCTX_DM_PTR_SHIFT 18U
+#define ROGUE_CR_MTS_INTCTX_DM_PTR_CLRMSK 0xFFC3FFFFU
+#define ROGUE_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT 16U
+#define ROGUE_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK 0xFFFCFFFFU
+#define ROGUE_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT 8U
+#define ROGUE_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT 0U
+#define ROGUE_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MTS_BGCTX */
+#define ROGUE_CR_MTS_BGCTX 0x0BA0U
+#define ROGUE_CR_MTS_BGCTX_MASKFULL 0x0000000000003FFFULL
+#define ROGUE_CR_MTS_BGCTX_DM_PTR_SHIFT 10U
+#define ROGUE_CR_MTS_BGCTX_DM_PTR_CLRMSK 0xFFFFC3FFU
+#define ROGUE_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT 8U
+#define ROGUE_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK 0xFFFFFCFFU
+#define ROGUE_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT 0U
+#define ROGUE_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE */
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE 0x0BA8U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT 56U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK 0x00FFFFFFFFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT 48U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK 0xFF00FFFFFFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT 40U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK 0xFFFF00FFFFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT 32U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK 0xFFFFFF00FFFFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT 24U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK 0xFFFFFFFF00FFFFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT 16U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK 0xFFFFFFFFFF00FFFFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT 8U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK 0xFFFFFFFFFFFF00FFULL
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT 0U
+#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK 0xFFFFFFFFFFFFFF00ULL
+
+/* Register ROGUE_CR_MTS_GPU_INT_STATUS */
+#define ROGUE_CR_MTS_GPU_INT_STATUS 0x0BB0U
+#define ROGUE_CR_MTS_GPU_INT_STATUS_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT 0U
+#define ROGUE_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_MTS_SCHEDULE_ENABLE */
+#define ROGUE_CR_MTS_SCHEDULE_ENABLE 0x0BC8U
+#define ROGUE_CR_MTS_SCHEDULE_ENABLE_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT 0U
+#define ROGUE_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_IRQ_OS0_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS 0x0BD8U
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS0_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR 0x0BE8U
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS1_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS 0x10BD8U
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS1_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR 0x10BE8U
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS2_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS 0x20BD8U
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS2_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR 0x20BE8U
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS3_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS 0x30BD8U
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS3_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR 0x30BE8U
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS4_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS 0x40BD8U
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS4_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR 0x40BE8U
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS5_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS 0x50BD8U
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS5_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR 0x50BE8U
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS6_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS 0x60BD8U
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS6_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR 0x60BE8U
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS7_EVENT_STATUS */
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS 0x70BD8U
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_IRQ_OS7_EVENT_CLEAR */
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR 0x70BE8U
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT 0U
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_EN 0x00000001U
+
+/* Register ROGUE_CR_META_BOOT */
+#define ROGUE_CR_META_BOOT 0x0BF8U
+#define ROGUE_CR_META_BOOT_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_META_BOOT_MODE_SHIFT 0U
+#define ROGUE_CR_META_BOOT_MODE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_META_BOOT_MODE_EN 0x00000001U
+
+/* Register ROGUE_CR_GARTEN_SLC */
+#define ROGUE_CR_GARTEN_SLC 0x0BB8U
+#define ROGUE_CR_GARTEN_SLC_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT 0U
+#define ROGUE_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_GARTEN_SLC_FORCE_COHERENCY_EN 0x00000001U
+
+/* Register ROGUE_CR_PPP */
+#define ROGUE_CR_PPP 0x0CD0U
+#define ROGUE_CR_PPP_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PPP_CHECKSUM_SHIFT 0U
+#define ROGUE_CR_PPP_CHECKSUM_CLRMSK 0x00000000U
+
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_MASK 0x00000003U
+/* Top-left to bottom-right */
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_TL2BR 0x00000000U
+/* Top-right to bottom-left */
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_TR2BL 0x00000001U
+/* Bottom-left to top-right */
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_BL2TR 0x00000002U
+/* Bottom-right to top-left */
+#define ROGUE_CR_ISP_RENDER_DIR_TYPE_BR2TL 0x00000003U
+
+#define ROGUE_CR_ISP_RENDER_MODE_TYPE_MASK 0x00000003U
+/* Normal render */
+#define ROGUE_CR_ISP_RENDER_MODE_TYPE_NORM 0x00000000U
+/* Fast 2D render */
+#define ROGUE_CR_ISP_RENDER_MODE_TYPE_FAST_2D 0x00000002U
+/* Fast scale render */
+#define ROGUE_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE 0x00000003U
+
+/* Register ROGUE_CR_ISP_RENDER */
+#define ROGUE_CR_ISP_RENDER 0x0F08U
+#define ROGUE_CR_ISP_RENDER_MASKFULL 0x00000000000001FFULL
+#define ROGUE_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_SHIFT 8U
+#define ROGUE_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_EN 0x00000100U
+#define ROGUE_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT 7U
+#define ROGUE_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN 0x00000080U
+#define ROGUE_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT 6U
+#define ROGUE_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN 0x00000040U
+#define ROGUE_CR_ISP_RENDER_DISABLE_EOMT_SHIFT 5U
+#define ROGUE_CR_ISP_RENDER_DISABLE_EOMT_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_ISP_RENDER_DISABLE_EOMT_EN 0x00000020U
+#define ROGUE_CR_ISP_RENDER_RESUME_SHIFT 4U
+#define ROGUE_CR_ISP_RENDER_RESUME_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_ISP_RENDER_RESUME_EN 0x00000010U
+#define ROGUE_CR_ISP_RENDER_DIR_SHIFT 2U
+#define ROGUE_CR_ISP_RENDER_DIR_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_ISP_RENDER_DIR_TL2BR 0x00000000U
+#define ROGUE_CR_ISP_RENDER_DIR_TR2BL 0x00000004U
+#define ROGUE_CR_ISP_RENDER_DIR_BL2TR 0x00000008U
+#define ROGUE_CR_ISP_RENDER_DIR_BR2TL 0x0000000CU
+#define ROGUE_CR_ISP_RENDER_MODE_SHIFT 0U
+#define ROGUE_CR_ISP_RENDER_MODE_CLRMSK 0xFFFFFFFCU
+#define ROGUE_CR_ISP_RENDER_MODE_NORM 0x00000000U
+#define ROGUE_CR_ISP_RENDER_MODE_FAST_2D 0x00000002U
+#define ROGUE_CR_ISP_RENDER_MODE_FAST_SCALE 0x00000003U
+
+/* Register ROGUE_CR_ISP_CTL */
+#define ROGUE_CR_ISP_CTL 0x0F38U
+#define ROGUE_CR_ISP_CTL_MASKFULL 0x00000000FFFFF3FFULL
+#define ROGUE_CR_ISP_CTL_SKIP_INIT_HDRS_SHIFT 31U
+#define ROGUE_CR_ISP_CTL_SKIP_INIT_HDRS_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_ISP_CTL_SKIP_INIT_HDRS_EN 0x80000000U
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_SHIFT 30U
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_EN 0x40000000U
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_PIX_SHIFT 29U
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_PIX_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_ISP_CTL_LINE_STYLE_PIX_EN 0x20000000U
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_VERT_SHIFT 28U
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_VERT_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_VERT_EN 0x10000000U
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_SHIFT 27U
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_CLRMSK 0xF7FFFFFFU
+#define ROGUE_CR_ISP_CTL_PAIR_TILES_EN 0x08000000U
+#define ROGUE_CR_ISP_CTL_CREQ_BUF_EN_SHIFT 26U
+#define ROGUE_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK 0xFBFFFFFFU
+#define ROGUE_CR_ISP_CTL_CREQ_BUF_EN_EN 0x04000000U
+#define ROGUE_CR_ISP_CTL_TILE_AGE_EN_SHIFT 25U
+#define ROGUE_CR_ISP_CTL_TILE_AGE_EN_CLRMSK 0xFDFFFFFFU
+#define ROGUE_CR_ISP_CTL_TILE_AGE_EN_EN 0x02000000U
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT 23U
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK 0xFE7FFFFFU
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9 0x00000000U
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10 0x00800000U
+#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL 0x01000000U
+#define ROGUE_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT 21U
+#define ROGUE_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK 0xFF9FFFFFU
+#define ROGUE_CR_ISP_CTL_DBIAS_IS_INT_SHIFT 20U
+#define ROGUE_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_ISP_CTL_DBIAS_IS_INT_EN 0x00100000U
+#define ROGUE_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT 19U
+#define ROGUE_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN 0x00080000U
+#define ROGUE_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT 18U
+#define ROGUE_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN 0x00040000U
+#define ROGUE_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT 17U
+#define ROGUE_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN 0x00020000U
+#define ROGUE_CR_ISP_CTL_SAMPLE_POS_SHIFT 16U
+#define ROGUE_CR_ISP_CTL_SAMPLE_POS_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_ISP_CTL_SAMPLE_POS_EN 0x00010000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_SHIFT 12U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE 0x00000000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO 0x00001000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE 0x00002000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR 0x00003000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE 0x00004000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX 0x00005000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN 0x00006000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT 0x00007000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE 0x00008000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN 0x00009000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN 0x0000A000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE 0x0000B000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN 0x0000C000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN 0x0000D000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN 0x0000E000U
+#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN 0x0000F000U
+#define ROGUE_CR_ISP_CTL_VALID_ID_SHIFT 4U
+#define ROGUE_CR_ISP_CTL_VALID_ID_CLRMSK 0xFFFFFC0FU
+#define ROGUE_CR_ISP_CTL_UPASS_START_SHIFT 0U
+#define ROGUE_CR_ISP_CTL_UPASS_START_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_ISP_STATUS */
+#define ROGUE_CR_ISP_STATUS 0x1038U
+#define ROGUE_CR_ISP_STATUS_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_ISP_STATUS_SPLIT_MAX_SHIFT 2U
+#define ROGUE_CR_ISP_STATUS_SPLIT_MAX_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_ISP_STATUS_SPLIT_MAX_EN 0x00000004U
+#define ROGUE_CR_ISP_STATUS_ACTIVE_SHIFT 1U
+#define ROGUE_CR_ISP_STATUS_ACTIVE_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_ISP_STATUS_ACTIVE_EN 0x00000002U
+#define ROGUE_CR_ISP_STATUS_EOR_SHIFT 0U
+#define ROGUE_CR_ISP_STATUS_EOR_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_ISP_STATUS_EOR_EN 0x00000001U
+
+/* Register group: ROGUE_CR_ISP_XTP_RESUME, with 64 repeats */
+#define ROGUE_CR_ISP_XTP_RESUME_REPEATCOUNT 64U
+/* Register ROGUE_CR_ISP_XTP_RESUME0 */
+#define ROGUE_CR_ISP_XTP_RESUME0 0x3A00U
+#define ROGUE_CR_ISP_XTP_RESUME0_MASKFULL 0x00000000003FF3FFULL
+#define ROGUE_CR_ISP_XTP_RESUME0_TILE_X_SHIFT 12U
+#define ROGUE_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK 0xFFC00FFFU
+#define ROGUE_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT 0U
+#define ROGUE_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK 0xFFFFFC00U
+
+/* Register group: ROGUE_CR_ISP_XTP_STORE, with 32 repeats */
+#define ROGUE_CR_ISP_XTP_STORE_REPEATCOUNT 32U
+/* Register ROGUE_CR_ISP_XTP_STORE0 */
+#define ROGUE_CR_ISP_XTP_STORE0 0x3C00U
+#define ROGUE_CR_ISP_XTP_STORE0_MASKFULL 0x000000007F3FF3FFULL
+#define ROGUE_CR_ISP_XTP_STORE0_ACTIVE_SHIFT 30U
+#define ROGUE_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK 0xBFFFFFFFU
+#define ROGUE_CR_ISP_XTP_STORE0_ACTIVE_EN 0x40000000U
+#define ROGUE_CR_ISP_XTP_STORE0_EOR_SHIFT 29U
+#define ROGUE_CR_ISP_XTP_STORE0_EOR_CLRMSK 0xDFFFFFFFU
+#define ROGUE_CR_ISP_XTP_STORE0_EOR_EN 0x20000000U
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT 28U
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_LAST_EN 0x10000000U
+#define ROGUE_CR_ISP_XTP_STORE0_MT_SHIFT 24U
+#define ROGUE_CR_ISP_XTP_STORE0_MT_CLRMSK 0xF0FFFFFFU
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_X_SHIFT 12U
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_X_CLRMSK 0xFFC00FFFU
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_Y_SHIFT 0U
+#define ROGUE_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK 0xFFFFFC00U
+
+/* Register group: ROGUE_CR_BIF_CAT_BASE, with 8 repeats */
+#define ROGUE_CR_BIF_CAT_BASE_REPEATCOUNT 8U
+/* Register ROGUE_CR_BIF_CAT_BASE0 */
+#define ROGUE_CR_BIF_CAT_BASE0 0x1200U
+#define ROGUE_CR_BIF_CAT_BASE0_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE0_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE1 */
+#define ROGUE_CR_BIF_CAT_BASE1 0x1208U
+#define ROGUE_CR_BIF_CAT_BASE1_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE1_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE2 */
+#define ROGUE_CR_BIF_CAT_BASE2 0x1210U
+#define ROGUE_CR_BIF_CAT_BASE2_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE2_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE2_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE3 */
+#define ROGUE_CR_BIF_CAT_BASE3 0x1218U
+#define ROGUE_CR_BIF_CAT_BASE3_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE3_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE3_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE4 */
+#define ROGUE_CR_BIF_CAT_BASE4 0x1220U
+#define ROGUE_CR_BIF_CAT_BASE4_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE4_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE4_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE5 */
+#define ROGUE_CR_BIF_CAT_BASE5 0x1228U
+#define ROGUE_CR_BIF_CAT_BASE5_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE5_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE5_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE6 */
+#define ROGUE_CR_BIF_CAT_BASE6 0x1230U
+#define ROGUE_CR_BIF_CAT_BASE6_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE6_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE6_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE7 */
+#define ROGUE_CR_BIF_CAT_BASE7 0x1238U
+#define ROGUE_CR_BIF_CAT_BASE7_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_BIF_CAT_BASE7_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE7_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_CAT_BASE_INDEX */
+#define ROGUE_CR_BIF_CAT_BASE_INDEX 0x1240U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_MASKFULL 0x00070707073F0707ULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT 48U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK 0xFFF8FFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT 40U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT 32U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK 0xFFFFFFF8FFFFFFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT 24U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK 0xFFFFFFFFF8FFFFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TDM_SHIFT 19U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TDM_CLRMSK 0xFFFFFFFFFFC7FFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT 16U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK 0xFFFFFFFFFFF8FFFFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT 8U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK 0xFFFFFFFFFFFFF8FFULL
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TA_SHIFT 0U
+#define ROGUE_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK 0xFFFFFFFFFFFFFFF8ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_VCE0 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0 0x1248U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_TE0 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0 0x1250U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_ALIST0 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0 0x1260U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_VCE1 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1 0x1268U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_TE1 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1 0x1270U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_PM_CAT_BASE_ALIST1 */
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1 0x1280U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL 0x0FFFFFFFFFFFF003ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT 40U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT 12U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT 1U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN 0x0000000000000002ULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT 0U
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_BIF_MMU_ENTRY_STATUS */
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS 0x1288U
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_MASKFULL 0x000000FFFFFFF0F3ULL
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT 12U
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT 4U
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK 0xFFFFFFFFFFFFFF0FULL
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT 0U
+#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK 0xFFFFFFFFFFFFFFFCULL
+
+/* Register ROGUE_CR_BIF_MMU_ENTRY */
+#define ROGUE_CR_BIF_MMU_ENTRY 0x1290U
+#define ROGUE_CR_BIF_MMU_ENTRY_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_BIF_MMU_ENTRY_ENABLE_SHIFT 1U
+#define ROGUE_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_MMU_ENTRY_ENABLE_EN 0x00000002U
+#define ROGUE_CR_BIF_MMU_ENTRY_PENDING_SHIFT 0U
+#define ROGUE_CR_BIF_MMU_ENTRY_PENDING_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_MMU_ENTRY_PENDING_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_CTRL_INVAL */
+#define ROGUE_CR_BIF_CTRL_INVAL 0x12A0U
+#define ROGUE_CR_BIF_CTRL_INVAL_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_BIF_CTRL_INVAL_TLB1_SHIFT 3U
+#define ROGUE_CR_BIF_CTRL_INVAL_TLB1_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BIF_CTRL_INVAL_TLB1_EN 0x00000008U
+#define ROGUE_CR_BIF_CTRL_INVAL_PC_SHIFT 2U
+#define ROGUE_CR_BIF_CTRL_INVAL_PC_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_CTRL_INVAL_PC_EN 0x00000004U
+#define ROGUE_CR_BIF_CTRL_INVAL_PD_SHIFT 1U
+#define ROGUE_CR_BIF_CTRL_INVAL_PD_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_CTRL_INVAL_PD_EN 0x00000002U
+#define ROGUE_CR_BIF_CTRL_INVAL_PT_SHIFT 0U
+#define ROGUE_CR_BIF_CTRL_INVAL_PT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_CTRL_INVAL_PT_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_CTRL */
+#define ROGUE_CR_BIF_CTRL 0x12A8U
+#define ROGUE_CR_BIF_CTRL__XE_MEM__MASKFULL 0x000000000000033FULL
+#define ROGUE_CR_BIF_CTRL_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_CPU_SHIFT 9U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_CPU_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_CPU_EN 0x00000200U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF4_SHIFT 8U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF4_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF4_EN 0x00000100U
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT 7U
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN 0x00000080U
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT 6U
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN 0x00000040U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT 5U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN 0x00000020U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT 4U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN 0x00000010U
+#define ROGUE_CR_BIF_CTRL_PAUSE_BIF1_SHIFT 3U
+#define ROGUE_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BIF_CTRL_PAUSE_BIF1_EN 0x00000008U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT 2U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_PM_EN 0x00000004U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT 1U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN 0x00000002U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT 0U
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS */
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS 0x12B0U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL 0x000000000000F775ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS */
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS 0x12B8U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__MASKFULL 0x001FFFFFFFFFFFF0ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL 0x0007FFFFFFFFFFF0ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_SHIFT 52U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_CLRMSK 0xFFEFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN 0x0010000000000000ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT 50U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN 0x0004000000000000ULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT 46U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK 0xFFF03FFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT 44U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK 0xFFFC0FFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT 40U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT 40U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK 0xFFFFC0FFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS */
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS 0x12C0U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL 0x000000000000F775ULL
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS */
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS 0x12C8U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL 0x0007FFFFFFFFFFF0ULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT 50U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN 0x0004000000000000ULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT 44U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK 0xFFFC0FFFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT 40U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_BIF_MMU_STATUS */
+#define ROGUE_CR_BIF_MMU_STATUS 0x12D0U
+#define ROGUE_CR_BIF_MMU_STATUS__XE_MEM__MASKFULL 0x000000001FFFFFF7ULL
+#define ROGUE_CR_BIF_MMU_STATUS_MASKFULL 0x000000001FFFFFF7ULL
+#define ROGUE_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT 28U
+#define ROGUE_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK 0xEFFFFFFFU
+#define ROGUE_CR_BIF_MMU_STATUS_PM_FAULT_EN 0x10000000U
+#define ROGUE_CR_BIF_MMU_STATUS_PC_DATA_SHIFT 20U
+#define ROGUE_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK 0xF00FFFFFU
+#define ROGUE_CR_BIF_MMU_STATUS_PD_DATA_SHIFT 12U
+#define ROGUE_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK 0xFFF00FFFU
+#define ROGUE_CR_BIF_MMU_STATUS_PT_DATA_SHIFT 4U
+#define ROGUE_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK 0xFFFFF00FU
+#define ROGUE_CR_BIF_MMU_STATUS_STALLED_SHIFT 2U
+#define ROGUE_CR_BIF_MMU_STATUS_STALLED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_MMU_STATUS_STALLED_EN 0x00000004U
+#define ROGUE_CR_BIF_MMU_STATUS_PAUSED_SHIFT 1U
+#define ROGUE_CR_BIF_MMU_STATUS_PAUSED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_MMU_STATUS_PAUSED_EN 0x00000002U
+#define ROGUE_CR_BIF_MMU_STATUS_BUSY_SHIFT 0U
+#define ROGUE_CR_BIF_MMU_STATUS_BUSY_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_MMU_STATUS_BUSY_EN 0x00000001U
+
+/* Register group: ROGUE_CR_BIF_TILING_CFG, with 8 repeats */
+#define ROGUE_CR_BIF_TILING_CFG_REPEATCOUNT 8U
+/* Register ROGUE_CR_BIF_TILING_CFG0 */
+#define ROGUE_CR_BIF_TILING_CFG0 0x12D8U
+#define ROGUE_CR_BIF_TILING_CFG0_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG0_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG0_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG0_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG0_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG0_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG1 */
+#define ROGUE_CR_BIF_TILING_CFG1 0x12E0U
+#define ROGUE_CR_BIF_TILING_CFG1_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG1_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG1_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG1_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG1_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG1_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG2 */
+#define ROGUE_CR_BIF_TILING_CFG2 0x12E8U
+#define ROGUE_CR_BIF_TILING_CFG2_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG2_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG2_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG2_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG2_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG2_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG3 */
+#define ROGUE_CR_BIF_TILING_CFG3 0x12F0U
+#define ROGUE_CR_BIF_TILING_CFG3_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG3_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG3_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG3_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG3_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG3_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG4 */
+#define ROGUE_CR_BIF_TILING_CFG4 0x12F8U
+#define ROGUE_CR_BIF_TILING_CFG4_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG4_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG4_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG4_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG4_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG4_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG5 */
+#define ROGUE_CR_BIF_TILING_CFG5 0x1300U
+#define ROGUE_CR_BIF_TILING_CFG5_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG5_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG5_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG5_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG5_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG5_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG6 */
+#define ROGUE_CR_BIF_TILING_CFG6 0x1308U
+#define ROGUE_CR_BIF_TILING_CFG6_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG6_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG6_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG6_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG6_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG6_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_TILING_CFG7 */
+#define ROGUE_CR_BIF_TILING_CFG7 0x1310U
+#define ROGUE_CR_BIF_TILING_CFG7_MASKFULL 0xFFFFFFFF0FFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG7_XSTRIDE_SHIFT 61U
+#define ROGUE_CR_BIF_TILING_CFG7_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG7_ENABLE_SHIFT 60U
+#define ROGUE_CR_BIF_TILING_CFG7_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG7_ENABLE_EN 0x1000000000000000ULL
+#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_SHIFT 32U
+#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL
+#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE 4096U
+#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_SHIFT 0U
+#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL
+#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT 12U
+#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_BIF_READS_EXT_STATUS */
+#define ROGUE_CR_BIF_READS_EXT_STATUS 0x1320U
+#define ROGUE_CR_BIF_READS_EXT_STATUS_MASKFULL 0x000000000FFFFFFFULL
+#define ROGUE_CR_BIF_READS_EXT_STATUS_MMU_SHIFT 16U
+#define ROGUE_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK 0xF000FFFFU
+#define ROGUE_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT 0U
+#define ROGUE_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIF_READS_INT_STATUS */
+#define ROGUE_CR_BIF_READS_INT_STATUS 0x1328U
+#define ROGUE_CR_BIF_READS_INT_STATUS_MASKFULL 0x0000000007FFFFFFULL
+#define ROGUE_CR_BIF_READS_INT_STATUS_MMU_SHIFT 16U
+#define ROGUE_CR_BIF_READS_INT_STATUS_MMU_CLRMSK 0xF800FFFFU
+#define ROGUE_CR_BIF_READS_INT_STATUS_BANK1_SHIFT 0U
+#define ROGUE_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIFPM_READS_INT_STATUS */
+#define ROGUE_CR_BIFPM_READS_INT_STATUS 0x1330U
+#define ROGUE_CR_BIFPM_READS_INT_STATUS_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT 0U
+#define ROGUE_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIFPM_READS_EXT_STATUS */
+#define ROGUE_CR_BIFPM_READS_EXT_STATUS 0x1338U
+#define ROGUE_CR_BIFPM_READS_EXT_STATUS_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT 0U
+#define ROGUE_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIFPM_STATUS_MMU */
+#define ROGUE_CR_BIFPM_STATUS_MMU 0x1350U
+#define ROGUE_CR_BIFPM_STATUS_MMU_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT 0U
+#define ROGUE_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_BIF_STATUS_MMU */
+#define ROGUE_CR_BIF_STATUS_MMU 0x1358U
+#define ROGUE_CR_BIF_STATUS_MMU_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_BIF_STATUS_MMU_REQUESTS_SHIFT 0U
+#define ROGUE_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_BIF_FAULT_READ */
+#define ROGUE_CR_BIF_FAULT_READ 0x13E0U
+#define ROGUE_CR_BIF_FAULT_READ_MASKFULL 0x000000FFFFFFFFF0ULL
+#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_SHIFT 4U
+#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS */
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS 0x1430U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL 0x000000000000F775ULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS */
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS 0x1438U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL 0x0007FFFFFFFFFFF0ULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT 50U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN 0x0004000000000000ULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT 44U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK 0xFFFC0FFFFFFFFFFFULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT 40U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT 4U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_MCU_FENCE */
+#define ROGUE_CR_MCU_FENCE 0x1740U
+#define ROGUE_CR_MCU_FENCE_MASKFULL 0x000007FFFFFFFFE0ULL
+#define ROGUE_CR_MCU_FENCE_DM_SHIFT 40U
+#define ROGUE_CR_MCU_FENCE_DM_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_MCU_FENCE_DM_VERTEX 0x0000000000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_PIXEL 0x0000010000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_COMPUTE 0x0000020000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_RAY_VERTEX 0x0000030000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_RAY 0x0000040000000000ULL
+#define ROGUE_CR_MCU_FENCE_DM_FASTRENDER 0x0000050000000000ULL
+#define ROGUE_CR_MCU_FENCE_ADDR_SHIFT 5U
+#define ROGUE_CR_MCU_FENCE_ADDR_CLRMSK 0xFFFFFF000000001FULL
+#define ROGUE_CR_MCU_FENCE_ADDR_ALIGNSHIFT 5U
+#define ROGUE_CR_MCU_FENCE_ADDR_ALIGNSIZE 32U
+
+/* Register group: ROGUE_CR_SCRATCH, with 16 repeats */
+#define ROGUE_CR_SCRATCH_REPEATCOUNT 16U
+/* Register ROGUE_CR_SCRATCH0 */
+#define ROGUE_CR_SCRATCH0 0x1A00U
+#define ROGUE_CR_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH1 */
+#define ROGUE_CR_SCRATCH1 0x1A08U
+#define ROGUE_CR_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH2 */
+#define ROGUE_CR_SCRATCH2 0x1A10U
+#define ROGUE_CR_SCRATCH2_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH2_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH3 */
+#define ROGUE_CR_SCRATCH3 0x1A18U
+#define ROGUE_CR_SCRATCH3_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH3_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH4 */
+#define ROGUE_CR_SCRATCH4 0x1A20U
+#define ROGUE_CR_SCRATCH4_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH4_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH4_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH5 */
+#define ROGUE_CR_SCRATCH5 0x1A28U
+#define ROGUE_CR_SCRATCH5_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH5_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH5_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH6 */
+#define ROGUE_CR_SCRATCH6 0x1A30U
+#define ROGUE_CR_SCRATCH6_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH6_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH6_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH7 */
+#define ROGUE_CR_SCRATCH7 0x1A38U
+#define ROGUE_CR_SCRATCH7_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH7_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH7_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH8 */
+#define ROGUE_CR_SCRATCH8 0x1A40U
+#define ROGUE_CR_SCRATCH8_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH8_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH8_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH9 */
+#define ROGUE_CR_SCRATCH9 0x1A48U
+#define ROGUE_CR_SCRATCH9_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH9_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH9_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH10 */
+#define ROGUE_CR_SCRATCH10 0x1A50U
+#define ROGUE_CR_SCRATCH10_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH10_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH10_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH11 */
+#define ROGUE_CR_SCRATCH11 0x1A58U
+#define ROGUE_CR_SCRATCH11_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH11_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH11_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH12 */
+#define ROGUE_CR_SCRATCH12 0x1A60U
+#define ROGUE_CR_SCRATCH12_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH12_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH12_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH13 */
+#define ROGUE_CR_SCRATCH13 0x1A68U
+#define ROGUE_CR_SCRATCH13_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH13_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH13_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH14 */
+#define ROGUE_CR_SCRATCH14 0x1A70U
+#define ROGUE_CR_SCRATCH14_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH14_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH14_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SCRATCH15 */
+#define ROGUE_CR_SCRATCH15 0x1A78U
+#define ROGUE_CR_SCRATCH15_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SCRATCH15_DATA_SHIFT 0U
+#define ROGUE_CR_SCRATCH15_DATA_CLRMSK 0x00000000U
+
+/* Register group: ROGUE_CR_OS0_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS0_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS0_SCRATCH0 */
+#define ROGUE_CR_OS0_SCRATCH0 0x1A80U
+#define ROGUE_CR_OS0_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS0_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS0_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS0_SCRATCH1 */
+#define ROGUE_CR_OS0_SCRATCH1 0x1A88U
+#define ROGUE_CR_OS0_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS0_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS0_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS0_SCRATCH2 */
+#define ROGUE_CR_OS0_SCRATCH2 0x1A90U
+#define ROGUE_CR_OS0_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS0_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS0_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS0_SCRATCH3 */
+#define ROGUE_CR_OS0_SCRATCH3 0x1A98U
+#define ROGUE_CR_OS0_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS0_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS0_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS1_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS1_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS1_SCRATCH0 */
+#define ROGUE_CR_OS1_SCRATCH0 0x11A80U
+#define ROGUE_CR_OS1_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS1_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS1_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS1_SCRATCH1 */
+#define ROGUE_CR_OS1_SCRATCH1 0x11A88U
+#define ROGUE_CR_OS1_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS1_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS1_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS1_SCRATCH2 */
+#define ROGUE_CR_OS1_SCRATCH2 0x11A90U
+#define ROGUE_CR_OS1_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS1_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS1_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS1_SCRATCH3 */
+#define ROGUE_CR_OS1_SCRATCH3 0x11A98U
+#define ROGUE_CR_OS1_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS1_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS1_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS2_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS2_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS2_SCRATCH0 */
+#define ROGUE_CR_OS2_SCRATCH0 0x21A80U
+#define ROGUE_CR_OS2_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS2_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS2_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS2_SCRATCH1 */
+#define ROGUE_CR_OS2_SCRATCH1 0x21A88U
+#define ROGUE_CR_OS2_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS2_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS2_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS2_SCRATCH2 */
+#define ROGUE_CR_OS2_SCRATCH2 0x21A90U
+#define ROGUE_CR_OS2_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS2_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS2_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS2_SCRATCH3 */
+#define ROGUE_CR_OS2_SCRATCH3 0x21A98U
+#define ROGUE_CR_OS2_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS2_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS2_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS3_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS3_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS3_SCRATCH0 */
+#define ROGUE_CR_OS3_SCRATCH0 0x31A80U
+#define ROGUE_CR_OS3_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS3_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS3_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS3_SCRATCH1 */
+#define ROGUE_CR_OS3_SCRATCH1 0x31A88U
+#define ROGUE_CR_OS3_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS3_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS3_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS3_SCRATCH2 */
+#define ROGUE_CR_OS3_SCRATCH2 0x31A90U
+#define ROGUE_CR_OS3_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS3_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS3_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS3_SCRATCH3 */
+#define ROGUE_CR_OS3_SCRATCH3 0x31A98U
+#define ROGUE_CR_OS3_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS3_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS3_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS4_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS4_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS4_SCRATCH0 */
+#define ROGUE_CR_OS4_SCRATCH0 0x41A80U
+#define ROGUE_CR_OS4_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS4_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS4_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS4_SCRATCH1 */
+#define ROGUE_CR_OS4_SCRATCH1 0x41A88U
+#define ROGUE_CR_OS4_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS4_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS4_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS4_SCRATCH2 */
+#define ROGUE_CR_OS4_SCRATCH2 0x41A90U
+#define ROGUE_CR_OS4_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS4_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS4_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS4_SCRATCH3 */
+#define ROGUE_CR_OS4_SCRATCH3 0x41A98U
+#define ROGUE_CR_OS4_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS4_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS4_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS5_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS5_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS5_SCRATCH0 */
+#define ROGUE_CR_OS5_SCRATCH0 0x51A80U
+#define ROGUE_CR_OS5_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS5_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS5_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS5_SCRATCH1 */
+#define ROGUE_CR_OS5_SCRATCH1 0x51A88U
+#define ROGUE_CR_OS5_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS5_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS5_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS5_SCRATCH2 */
+#define ROGUE_CR_OS5_SCRATCH2 0x51A90U
+#define ROGUE_CR_OS5_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS5_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS5_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS5_SCRATCH3 */
+#define ROGUE_CR_OS5_SCRATCH3 0x51A98U
+#define ROGUE_CR_OS5_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS5_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS5_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS6_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS6_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS6_SCRATCH0 */
+#define ROGUE_CR_OS6_SCRATCH0 0x61A80U
+#define ROGUE_CR_OS6_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS6_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS6_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS6_SCRATCH1 */
+#define ROGUE_CR_OS6_SCRATCH1 0x61A88U
+#define ROGUE_CR_OS6_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS6_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS6_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS6_SCRATCH2 */
+#define ROGUE_CR_OS6_SCRATCH2 0x61A90U
+#define ROGUE_CR_OS6_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS6_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS6_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS6_SCRATCH3 */
+#define ROGUE_CR_OS6_SCRATCH3 0x61A98U
+#define ROGUE_CR_OS6_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS6_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS6_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register group: ROGUE_CR_OS7_SCRATCH, with 2 repeats */
+#define ROGUE_CR_OS7_SCRATCH_REPEATCOUNT 2U
+/* Register ROGUE_CR_OS7_SCRATCH0 */
+#define ROGUE_CR_OS7_SCRATCH0 0x71A80U
+#define ROGUE_CR_OS7_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS7_SCRATCH0_DATA_SHIFT 0U
+#define ROGUE_CR_OS7_SCRATCH0_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS7_SCRATCH1 */
+#define ROGUE_CR_OS7_SCRATCH1 0x71A88U
+#define ROGUE_CR_OS7_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_OS7_SCRATCH1_DATA_SHIFT 0U
+#define ROGUE_CR_OS7_SCRATCH1_DATA_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OS7_SCRATCH2 */
+#define ROGUE_CR_OS7_SCRATCH2 0x71A90U
+#define ROGUE_CR_OS7_SCRATCH2_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS7_SCRATCH2_DATA_SHIFT 0U
+#define ROGUE_CR_OS7_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_OS7_SCRATCH3 */
+#define ROGUE_CR_OS7_SCRATCH3 0x71A98U
+#define ROGUE_CR_OS7_SCRATCH3_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_OS7_SCRATCH3_DATA_SHIFT 0U
+#define ROGUE_CR_OS7_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_SPFILTER_SIGNAL_DESCR */
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR 0x2700U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT 0U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK 0xFFFF0000U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT 4U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN */
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN 0x2708U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL 0x000000FFFFFFFFF0ULL
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT 4U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT 4U
+#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE 16U
+
+/* Register group: ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG, with 16 repeats */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT 16U
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 0x3000U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1 0x3008U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2 0x3010U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3 0x3018U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4 0x3020U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5 0x3028U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6 0x3030U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7 0x3038U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8 0x3040U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9 0x3048U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10 0x3050U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11 0x3058U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12 0x3060U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13 0x3068U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14 0x3070U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15 */
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15 0x3078U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL 0x7FFFF7FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_SHIFT 62U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_EN 0x4000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT 61U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN 0x2000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT 60U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN 0x1000000000000000ULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT 44U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_SHIFT 40U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_BOOT */
+#define ROGUE_CR_FWCORE_BOOT 0x3090U
+#define ROGUE_CR_FWCORE_BOOT_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_FWCORE_BOOT_ENABLE_SHIFT 0U
+#define ROGUE_CR_FWCORE_BOOT_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_BOOT_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_RESET_ADDR */
+#define ROGUE_CR_FWCORE_RESET_ADDR 0x3098U
+#define ROGUE_CR_FWCORE_RESET_ADDR_MASKFULL 0x00000000FFFFFFFEULL
+#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_SHIFT 1U
+#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_CLRMSK 0x00000001U
+#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSHIFT 1U
+#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSIZE 2U
+
+/* Register ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR */
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR 0x30A0U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_MASKFULL 0x00000000FFFFFFFEULL
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_SHIFT 1U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_CLRMSK 0x00000001U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSHIFT 1U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSIZE 2U
+
+/* Register ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT */
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT 0x30A8U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT 0U
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS */
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS 0x30B0U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_MASKFULL 0x000000000000F771ULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS */
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS 0x30B8U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_MASKFULL 0x001FFFFFFFFFFFF0ULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_SHIFT 52U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_CLRMSK 0xFFEFFFFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN 0x0010000000000000ULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT 46U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK 0xFFF03FFFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT 40U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFC0FFFFFFFFFFULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT 4U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register ROGUE_CR_FWCORE_MEM_CTRL_INVAL */
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL 0x30C0U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_TLB_SHIFT 3U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_TLB_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_TLB_EN 0x00000008U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PC_SHIFT 2U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PC_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PC_EN 0x00000004U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PD_SHIFT 1U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PD_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PD_EN 0x00000002U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PT_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PT_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_MEM_MMU_STATUS */
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS 0x30C8U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_MASKFULL 0x000000000FFFFFF7ULL
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_SHIFT 20U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_CLRMSK 0xF00FFFFFU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_CLRMSK 0xFFF00FFFU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_SHIFT 4U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_CLRMSK 0xFFFFF00FU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_STALLED_SHIFT 2U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_STALLED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_STALLED_EN 0x00000004U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PAUSED_SHIFT 1U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PAUSED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PAUSED_EN 0x00000002U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_BUSY_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_BUSY_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_BUSY_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS */
+#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS 0x30D8U
+#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS_MASKFULL 0x0000000000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_CLRMSK 0xFFFFF000U
+
+/* Register ROGUE_CR_FWCORE_MEM_READS_INT_STATUS */
+#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS 0x30E0U
+#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS_MASKFULL 0x00000000000007FFULL
+#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS_MMU_SHIFT 0U
+#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS_MMU_CLRMSK 0xFFFFF800U
+
+/* Register ROGUE_CR_FWCORE_WRAPPER_FENCE */
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE 0x30E8U
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE_ID_SHIFT 0U
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE_ID_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_WRAPPER_FENCE_ID_EN 0x00000001U
+
+/* Register group: ROGUE_CR_FWCORE_MEM_CAT_BASE, with 8 repeats */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE_REPEATCOUNT 8U
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE0 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0 0x30F0U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE1 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1 0x30F8U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE2 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2 0x3100U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE3 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3 0x3108U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE4 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4 0x3110U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE5 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5 0x3118U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE6 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6 0x3120U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE7 */
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7 0x3128U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_MASKFULL 0x000000FFFFFFF000ULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_SHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_CLRMSK 0xFFFFFF0000000FFFULL
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_FWCORE_WDT_RESET */
+#define ROGUE_CR_FWCORE_WDT_RESET 0x3130U
+#define ROGUE_CR_FWCORE_WDT_RESET_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_FWCORE_WDT_RESET_EN_SHIFT 0U
+#define ROGUE_CR_FWCORE_WDT_RESET_EN_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_WDT_RESET_EN_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_WDT_CTRL */
+#define ROGUE_CR_FWCORE_WDT_CTRL 0x3138U
+#define ROGUE_CR_FWCORE_WDT_CTRL_MASKFULL 0x00000000FFFF1F01ULL
+#define ROGUE_CR_FWCORE_WDT_CTRL_PROT_SHIFT 16U
+#define ROGUE_CR_FWCORE_WDT_CTRL_PROT_CLRMSK 0x0000FFFFU
+#define ROGUE_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT 8U
+#define ROGUE_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK 0xFFFFE0FFU
+#define ROGUE_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_FWCORE_WDT_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_FWCORE_WDT_COUNT */
+#define ROGUE_CR_FWCORE_WDT_COUNT 0x3140U
+#define ROGUE_CR_FWCORE_WDT_COUNT_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FWCORE_WDT_COUNT_VALUE_SHIFT 0U
+#define ROGUE_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK 0x00000000U
+
+/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED0, with 4 repeats */
+#define ROGUE_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT 4U
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED00 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED00 0x3400U
+#define ROGUE_CR_FWCORE_DMI_RESERVED00_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED01 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED01 0x3408U
+#define ROGUE_CR_FWCORE_DMI_RESERVED01_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED02 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED02 0x3410U
+#define ROGUE_CR_FWCORE_DMI_RESERVED02_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED03 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED03 0x3418U
+#define ROGUE_CR_FWCORE_DMI_RESERVED03_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_DATA0 */
+#define ROGUE_CR_FWCORE_DMI_DATA0 0x3420U
+#define ROGUE_CR_FWCORE_DMI_DATA0_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_DATA1 */
+#define ROGUE_CR_FWCORE_DMI_DATA1 0x3428U
+#define ROGUE_CR_FWCORE_DMI_DATA1_MASKFULL 0x0000000000000000ULL
+
+/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED1, with 5 repeats */
+#define ROGUE_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT 5U
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED10 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED10 0x3430U
+#define ROGUE_CR_FWCORE_DMI_RESERVED10_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED11 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED11 0x3438U
+#define ROGUE_CR_FWCORE_DMI_RESERVED11_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED12 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED12 0x3440U
+#define ROGUE_CR_FWCORE_DMI_RESERVED12_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED13 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED13 0x3448U
+#define ROGUE_CR_FWCORE_DMI_RESERVED13_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED14 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED14 0x3450U
+#define ROGUE_CR_FWCORE_DMI_RESERVED14_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_DMCONTROL */
+#define ROGUE_CR_FWCORE_DMI_DMCONTROL 0x3480U
+#define ROGUE_CR_FWCORE_DMI_DMCONTROL_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_DMSTATUS */
+#define ROGUE_CR_FWCORE_DMI_DMSTATUS 0x3488U
+#define ROGUE_CR_FWCORE_DMI_DMSTATUS_MASKFULL 0x0000000000000000ULL
+
+/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED2, with 4 repeats */
+#define ROGUE_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT 4U
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED20 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED20 0x3490U
+#define ROGUE_CR_FWCORE_DMI_RESERVED20_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED21 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED21 0x3498U
+#define ROGUE_CR_FWCORE_DMI_RESERVED21_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED22 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED22 0x34A0U
+#define ROGUE_CR_FWCORE_DMI_RESERVED22_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED23 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED23 0x34A8U
+#define ROGUE_CR_FWCORE_DMI_RESERVED23_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_ABSTRACTCS */
+#define ROGUE_CR_FWCORE_DMI_ABSTRACTCS 0x34B0U
+#define ROGUE_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_COMMAND */
+#define ROGUE_CR_FWCORE_DMI_COMMAND 0x34B8U
+#define ROGUE_CR_FWCORE_DMI_COMMAND_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBCS */
+#define ROGUE_CR_FWCORE_DMI_SBCS 0x35C0U
+#define ROGUE_CR_FWCORE_DMI_SBCS_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBADDRESS0 */
+#define ROGUE_CR_FWCORE_DMI_SBADDRESS0 0x35C8U
+#define ROGUE_CR_FWCORE_DMI_SBADDRESS0_MASKFULL 0x0000000000000000ULL
+
+/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED3, with 2 repeats */
+#define ROGUE_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT 2U
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED30 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED30 0x34D0U
+#define ROGUE_CR_FWCORE_DMI_RESERVED30_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_RESERVED31 */
+#define ROGUE_CR_FWCORE_DMI_RESERVED31 0x34D8U
+#define ROGUE_CR_FWCORE_DMI_RESERVED31_MASKFULL 0x0000000000000000ULL
+
+/* Register group: ROGUE_CR_FWCORE_DMI_SBDATA, with 4 repeats */
+#define ROGUE_CR_FWCORE_DMI_SBDATA_REPEATCOUNT 4U
+/* Register ROGUE_CR_FWCORE_DMI_SBDATA0 */
+#define ROGUE_CR_FWCORE_DMI_SBDATA0 0x35E0U
+#define ROGUE_CR_FWCORE_DMI_SBDATA0_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBDATA1 */
+#define ROGUE_CR_FWCORE_DMI_SBDATA1 0x35E8U
+#define ROGUE_CR_FWCORE_DMI_SBDATA1_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBDATA2 */
+#define ROGUE_CR_FWCORE_DMI_SBDATA2 0x35F0U
+#define ROGUE_CR_FWCORE_DMI_SBDATA2_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_SBDATA3 */
+#define ROGUE_CR_FWCORE_DMI_SBDATA3 0x35F8U
+#define ROGUE_CR_FWCORE_DMI_SBDATA3_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_FWCORE_DMI_HALTSUM0 */
+#define ROGUE_CR_FWCORE_DMI_HALTSUM0 0x3600U
+#define ROGUE_CR_FWCORE_DMI_HALTSUM0_MASKFULL 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC_CTRL_MISC */
+#define ROGUE_CR_SLC_CTRL_MISC 0x3800U
+#define ROGUE_CR_SLC_CTRL_MISC_MASKFULL 0xFFFFFFFF01FF010FULL
+#define ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT 32U
+#define ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_SHIFT 24U
+#define ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN 0x0000000001000000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT 16U
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK 0xFFFFFFFFFF00FFFFULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE 0x0000000000000000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE 0x0000000000010000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 0x0000000000100000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 0x0000000000110000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1 0x0000000000200000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE 0x0000000000210000ULL
+#define ROGUE_CR_SLC_CTRL_MISC_PAUSE_SHIFT 8U
+#define ROGUE_CR_SLC_CTRL_MISC_PAUSE_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_SLC_CTRL_MISC_PAUSE_EN 0x0000000000000100ULL
+#define ROGUE_CR_SLC_CTRL_MISC_RESP_PRIORITY_SHIFT 3U
+#define ROGUE_CR_SLC_CTRL_MISC_RESP_PRIORITY_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_SLC_CTRL_MISC_RESP_PRIORITY_EN 0x0000000000000008ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT 2U
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN 0x0000000000000004ULL
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT 1U
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN 0x0000000000000002ULL
+#define ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT 0U
+#define ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SLC_CTRL_FLUSH_INVAL */
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL 0x3818U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL 0x0000000080000FFFULL
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT 31U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK 0x7FFFFFFFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN 0x80000000U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_SHIFT 11U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_EN 0x00000800U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT 10U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN 0x00000400U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT 9U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN 0x00000200U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT 8U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN 0x00000100U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT 7U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN 0x00000080U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT 6U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN 0x00000040U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT 5U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN 0x00000020U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT 4U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN 0x00000010U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT 3U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN 0x00000008U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT 2U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN 0x00000004U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT 1U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN 0x00000002U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT 0U
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN 0x00000001U
+
+/* Register ROGUE_CR_SLC_STATUS0 */
+#define ROGUE_CR_SLC_STATUS0 0x3820U
+#define ROGUE_CR_SLC_STATUS0_MASKFULL 0x0000000000000007ULL
+#define ROGUE_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT 2U
+#define ROGUE_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN 0x00000004U
+#define ROGUE_CR_SLC_STATUS0_INVAL_PENDING_SHIFT 1U
+#define ROGUE_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SLC_STATUS0_INVAL_PENDING_EN 0x00000002U
+#define ROGUE_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT 0U
+#define ROGUE_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC_STATUS0_FLUSH_PENDING_EN 0x00000001U
+
+/* Register ROGUE_CR_SLC_CTRL_BYPASS */
+#define ROGUE_CR_SLC_CTRL_BYPASS 0x3828U
+#define ROGUE_CR_SLC_CTRL_BYPASS__XE_MEM__MASKFULL 0x0FFFFFFFFFFF7FFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_MASKFULL 0x000000000FFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_SHIFT 59U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_CLRMSK 0xF7FFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_EN 0x0800000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_SHIFT 58U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_CLRMSK 0xFBFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_EN 0x0400000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_SHIFT 57U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_CLRMSK 0xFDFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_EN 0x0200000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_SHIFT 56U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_CLRMSK 0xFEFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_EN 0x0100000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_SHIFT 55U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_CLRMSK 0xFF7FFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_EN 0x0080000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_SHIFT 54U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_CLRMSK 0xFFBFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_EN 0x0040000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_SHIFT 53U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_CLRMSK 0xFFDFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_EN 0x0020000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_SHIFT 52U
+#define ROGUE_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_CLRMSK 0xFFEFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_EN 0x0010000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_SHIFT 51U
+#define ROGUE_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_CLRMSK 0xFFF7FFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_EN 0x0008000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBC_SHIFT 50U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBC_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBC_EN 0x0004000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_SHIFT 49U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_CLRMSK 0xFFFDFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_EN 0x0002000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_SHIFT 48U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_CLRMSK 0xFFFEFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_EN 0x0001000000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_SHIFT 47U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_CLRMSK 0xFFFF7FFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_EN 0x0000800000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_SHIFT 46U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_CLRMSK 0xFFFFBFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_EN 0x0000400000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_SHIFT 45U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_CLRMSK 0xFFFFDFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_EN 0x0000200000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PBE_SHIFT 44U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PBE_CLRMSK 0xFFFFEFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PBE_EN 0x0000100000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_ISP_SHIFT 43U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_ISP_CLRMSK 0xFFFFF7FFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_ISP_EN 0x0000080000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PM_SHIFT 42U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PM_CLRMSK 0xFFFFFBFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PM_EN 0x0000040000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TDM_SHIFT 41U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TDM_CLRMSK 0xFFFFFDFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TDM_EN 0x0000020000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_CDM_SHIFT 40U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_CDM_CLRMSK 0xFFFFFEFFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_CDM_EN 0x0000010000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_SHIFT 39U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_CLRMSK 0xFFFFFF7FFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_EN 0x0000008000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_SHIFT 38U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_CLRMSK 0xFFFFFFBFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_EN 0x0000004000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_SHIFT 37U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_CLRMSK 0xFFFFFFDFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_EN 0x0000002000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_VDM_SHIFT 36U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_VDM_CLRMSK 0xFFFFFFEFFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_VDM_EN 0x0000001000000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_SHIFT 35U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_CLRMSK 0xFFFFFFF7FFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_EN 0x0000000800000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_SHIFT 34U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_CLRMSK 0xFFFFFFFBFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_EN 0x0000000400000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_SHIFT 33U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_CLRMSK 0xFFFFFFFDFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_EN 0x0000000200000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_SHIFT 32U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_EN 0x0000000100000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_SHIFT 31U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_EN 0x0000000080000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_SHIFT 30U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_CLRMSK 0xFFFFFFFFBFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_EN 0x0000000040000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_TE_SHIFT 29U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_TE_CLRMSK 0xFFFFFFFFDFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_TE_EN 0x0000000020000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_VCE_SHIFT 28U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_VCE_CLRMSK 0xFFFFFFFFEFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_VCE_EN 0x0000000010000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT 27U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK 0xFFFFFFFFF7FFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN 0x0000000008000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT 26U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK 0xFFFFFFFFFBFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_EN 0x0000000004000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT 25U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK 0xFFFFFFFFFDFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN 0x0000000002000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT 24U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK 0xFFFFFFFFFEFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPU_EN 0x0000000001000000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT 23U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK 0xFFFFFFFFFF7FFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN 0x0000000000800000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT 22U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK 0xFFFFFFFFFFBFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TLA_EN 0x0000000000400000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT 21U
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN 0x0000000000200000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT 20U
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK 0xFFFFFFFFFFEFFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_EN 0x0000000000100000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT 19U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK 0xFFFFFFFFFFF7FFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MCU_EN 0x0000000000080000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT 18U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK 0xFFFFFFFFFFFBFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PDS_EN 0x0000000000040000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT 17U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK 0xFFFFFFFFFFFDFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPF_EN 0x0000000000020000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT 16U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK 0xFFFFFFFFFFFEFFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN 0x0000000000010000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT 15U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK 0xFFFFFFFFFFFF7FFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN 0x0000000000008000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT 14U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_USC_EN 0x0000000000004000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT 13U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_META_EN 0x0000000000002000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT 12U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK 0xFFFFFFFFFFFFEFFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_HOST_EN 0x0000000000001000ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT 11U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN 0x0000000000000800ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT 10U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN 0x0000000000000400ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT 9U
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK 0xFFFFFFFFFFFFFDFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN 0x0000000000000200ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT 8U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FRC_EN 0x0000000000000100ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT 7U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXE_EN 0x0000000000000080ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT 6U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXD_EN 0x0000000000000040ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT 5U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN 0x0000000000000020ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT 4U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_MMU_EN 0x0000000000000010ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT 3U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN 0x0000000000000008ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT 2U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN 0x0000000000000004ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT 1U
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_DM_TA_EN 0x0000000000000002ULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_ALL_SHIFT 0U
+#define ROGUE_CR_SLC_CTRL_BYPASS_ALL_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_SLC_CTRL_BYPASS_ALL_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SLC_STATUS1 */
+#define ROGUE_CR_SLC_STATUS1 0x3870U
+#define ROGUE_CR_SLC_STATUS1_MASKFULL 0x800003FF03FFFFFFULL
+#define ROGUE_CR_SLC_STATUS1_PAUSED_SHIFT 63U
+#define ROGUE_CR_SLC_STATUS1_PAUSED_CLRMSK 0x7FFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC_STATUS1_PAUSED_EN 0x8000000000000000ULL
+#define ROGUE_CR_SLC_STATUS1_READS1_SHIFT 32U
+#define ROGUE_CR_SLC_STATUS1_READS1_CLRMSK 0xFFFFFC00FFFFFFFFULL
+#define ROGUE_CR_SLC_STATUS1_READS0_SHIFT 16U
+#define ROGUE_CR_SLC_STATUS1_READS0_CLRMSK 0xFFFFFFFFFC00FFFFULL
+#define ROGUE_CR_SLC_STATUS1_READS1_EXT_SHIFT 8U
+#define ROGUE_CR_SLC_STATUS1_READS1_EXT_CLRMSK 0xFFFFFFFFFFFF00FFULL
+#define ROGUE_CR_SLC_STATUS1_READS0_EXT_SHIFT 0U
+#define ROGUE_CR_SLC_STATUS1_READS0_EXT_CLRMSK 0xFFFFFFFFFFFFFF00ULL
+
+/* Register ROGUE_CR_SLC_IDLE */
+#define ROGUE_CR_SLC_IDLE 0x3898U
+#define ROGUE_CR_SLC_IDLE__XE_MEM__MASKFULL 0x00000000000003FFULL
+#define ROGUE_CR_SLC_IDLE_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB1_SHIFT 9U
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB1_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB1_EN 0x00000200U
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB0_SHIFT 8U
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB0_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_SLC_IDLE_MH_SYSARB0_EN 0x00000100U
+#define ROGUE_CR_SLC_IDLE_IMGBV4_SHIFT 7U
+#define ROGUE_CR_SLC_IDLE_IMGBV4_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_SLC_IDLE_IMGBV4_EN 0x00000080U
+#define ROGUE_CR_SLC_IDLE_CACHE_BANKS_SHIFT 6U
+#define ROGUE_CR_SLC_IDLE_CACHE_BANKS_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SLC_IDLE_CACHE_BANKS_EN 0x00000040U
+#define ROGUE_CR_SLC_IDLE_RBOFIFO_SHIFT 5U
+#define ROGUE_CR_SLC_IDLE_RBOFIFO_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SLC_IDLE_RBOFIFO_EN 0x00000020U
+#define ROGUE_CR_SLC_IDLE_FRC_CONV_SHIFT 4U
+#define ROGUE_CR_SLC_IDLE_FRC_CONV_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SLC_IDLE_FRC_CONV_EN 0x00000010U
+#define ROGUE_CR_SLC_IDLE_VXE_CONV_SHIFT 3U
+#define ROGUE_CR_SLC_IDLE_VXE_CONV_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SLC_IDLE_VXE_CONV_EN 0x00000008U
+#define ROGUE_CR_SLC_IDLE_VXD_CONV_SHIFT 2U
+#define ROGUE_CR_SLC_IDLE_VXD_CONV_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SLC_IDLE_VXD_CONV_EN 0x00000004U
+#define ROGUE_CR_SLC_IDLE_BIF1_CONV_SHIFT 1U
+#define ROGUE_CR_SLC_IDLE_BIF1_CONV_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SLC_IDLE_BIF1_CONV_EN 0x00000002U
+#define ROGUE_CR_SLC_IDLE_CBAR_SHIFT 0U
+#define ROGUE_CR_SLC_IDLE_CBAR_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC_IDLE_CBAR_EN 0x00000001U
+
+/* Register ROGUE_CR_SLC_STATUS2 */
+#define ROGUE_CR_SLC_STATUS2 0x3908U
+#define ROGUE_CR_SLC_STATUS2_MASKFULL 0x000003FF03FFFFFFULL
+#define ROGUE_CR_SLC_STATUS2_READS3_SHIFT 32U
+#define ROGUE_CR_SLC_STATUS2_READS3_CLRMSK 0xFFFFFC00FFFFFFFFULL
+#define ROGUE_CR_SLC_STATUS2_READS2_SHIFT 16U
+#define ROGUE_CR_SLC_STATUS2_READS2_CLRMSK 0xFFFFFFFFFC00FFFFULL
+#define ROGUE_CR_SLC_STATUS2_READS3_EXT_SHIFT 8U
+#define ROGUE_CR_SLC_STATUS2_READS3_EXT_CLRMSK 0xFFFFFFFFFFFF00FFULL
+#define ROGUE_CR_SLC_STATUS2_READS2_EXT_SHIFT 0U
+#define ROGUE_CR_SLC_STATUS2_READS2_EXT_CLRMSK 0xFFFFFFFFFFFFFF00ULL
+
+/* Register ROGUE_CR_SLC_CTRL_MISC2 */
+#define ROGUE_CR_SLC_CTRL_MISC2 0x3930U
+#define ROGUE_CR_SLC_CTRL_MISC2_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT 0U
+#define ROGUE_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE */
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE 0x3938U
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT 0U
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN 0x00000001U
+
+/* Register ROGUE_CR_USC_UVS0_CHECKSUM */
+#define ROGUE_CR_USC_UVS0_CHECKSUM 0x5000U
+#define ROGUE_CR_USC_UVS0_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS1_CHECKSUM */
+#define ROGUE_CR_USC_UVS1_CHECKSUM 0x5008U
+#define ROGUE_CR_USC_UVS1_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS2_CHECKSUM */
+#define ROGUE_CR_USC_UVS2_CHECKSUM 0x5010U
+#define ROGUE_CR_USC_UVS2_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS3_CHECKSUM */
+#define ROGUE_CR_USC_UVS3_CHECKSUM 0x5018U
+#define ROGUE_CR_USC_UVS3_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PPP_SIGNATURE */
+#define ROGUE_CR_PPP_SIGNATURE 0x5020U
+#define ROGUE_CR_PPP_SIGNATURE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PPP_SIGNATURE_VALUE_SHIFT 0U
+#define ROGUE_CR_PPP_SIGNATURE_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TE_SIGNATURE */
+#define ROGUE_CR_TE_SIGNATURE 0x5028U
+#define ROGUE_CR_TE_SIGNATURE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TE_SIGNATURE_VALUE_SHIFT 0U
+#define ROGUE_CR_TE_SIGNATURE_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TE_CHECKSUM */
+#define ROGUE_CR_TE_CHECKSUM 0x5110U
+#define ROGUE_CR_TE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TE_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_TE_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVB_CHECKSUM */
+#define ROGUE_CR_USC_UVB_CHECKSUM 0x5118U
+#define ROGUE_CR_USC_UVB_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVB_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_VCE_CHECKSUM */
+#define ROGUE_CR_VCE_CHECKSUM 0x5030U
+#define ROGUE_CR_VCE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_VCE_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_VCE_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_ISP_PDS_CHECKSUM */
+#define ROGUE_CR_ISP_PDS_CHECKSUM 0x5038U
+#define ROGUE_CR_ISP_PDS_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_ISP_TPF_CHECKSUM */
+#define ROGUE_CR_ISP_TPF_CHECKSUM 0x5040U
+#define ROGUE_CR_ISP_TPF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TFPU_PLANE0_CHECKSUM */
+#define ROGUE_CR_TFPU_PLANE0_CHECKSUM 0x5048U
+#define ROGUE_CR_TFPU_PLANE0_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TFPU_PLANE1_CHECKSUM */
+#define ROGUE_CR_TFPU_PLANE1_CHECKSUM 0x5050U
+#define ROGUE_CR_TFPU_PLANE1_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PBE_CHECKSUM */
+#define ROGUE_CR_PBE_CHECKSUM 0x5058U
+#define ROGUE_CR_PBE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PBE_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_PBE_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PDS_DOUTM_STM_SIGNATURE */
+#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE 0x5060U
+#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT 0U
+#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_IFPU_ISP_CHECKSUM */
+#define ROGUE_CR_IFPU_ISP_CHECKSUM 0x5068U
+#define ROGUE_CR_IFPU_ISP_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS4_CHECKSUM */
+#define ROGUE_CR_USC_UVS4_CHECKSUM 0x5100U
+#define ROGUE_CR_USC_UVS4_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_UVS5_CHECKSUM */
+#define ROGUE_CR_USC_UVS5_CHECKSUM 0x5108U
+#define ROGUE_CR_USC_UVS5_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PPP_CLIP_CHECKSUM */
+#define ROGUE_CR_PPP_CLIP_CHECKSUM 0x5120U
+#define ROGUE_CR_PPP_CLIP_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_TA_PHASE */
+#define ROGUE_CR_PERF_TA_PHASE 0x6008U
+#define ROGUE_CR_PERF_TA_PHASE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_TA_PHASE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_TA_PHASE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_3D_PHASE */
+#define ROGUE_CR_PERF_3D_PHASE 0x6010U
+#define ROGUE_CR_PERF_3D_PHASE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_3D_PHASE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_3D_PHASE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_COMPUTE_PHASE */
+#define ROGUE_CR_PERF_COMPUTE_PHASE 0x6018U
+#define ROGUE_CR_PERF_COMPUTE_PHASE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_TA_CYCLE */
+#define ROGUE_CR_PERF_TA_CYCLE 0x6020U
+#define ROGUE_CR_PERF_TA_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_TA_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_TA_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_3D_CYCLE */
+#define ROGUE_CR_PERF_3D_CYCLE 0x6028U
+#define ROGUE_CR_PERF_3D_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_3D_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_3D_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_COMPUTE_CYCLE */
+#define ROGUE_CR_PERF_COMPUTE_CYCLE 0x6030U
+#define ROGUE_CR_PERF_COMPUTE_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_TA_OR_3D_CYCLE */
+#define ROGUE_CR_PERF_TA_OR_3D_CYCLE 0x6038U
+#define ROGUE_CR_PERF_TA_OR_3D_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_INITIAL_TA_CYCLE */
+#define ROGUE_CR_PERF_INITIAL_TA_CYCLE 0x6040U
+#define ROGUE_CR_PERF_INITIAL_TA_CYCLE_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC0_READ_STALL */
+#define ROGUE_CR_PERF_SLC0_READ_STALL 0x60B8U
+#define ROGUE_CR_PERF_SLC0_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC0_WRITE_STALL */
+#define ROGUE_CR_PERF_SLC0_WRITE_STALL 0x60C0U
+#define ROGUE_CR_PERF_SLC0_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC1_READ_STALL */
+#define ROGUE_CR_PERF_SLC1_READ_STALL 0x60E0U
+#define ROGUE_CR_PERF_SLC1_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC1_WRITE_STALL */
+#define ROGUE_CR_PERF_SLC1_WRITE_STALL 0x60E8U
+#define ROGUE_CR_PERF_SLC1_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC2_READ_STALL */
+#define ROGUE_CR_PERF_SLC2_READ_STALL 0x6158U
+#define ROGUE_CR_PERF_SLC2_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC2_WRITE_STALL */
+#define ROGUE_CR_PERF_SLC2_WRITE_STALL 0x6160U
+#define ROGUE_CR_PERF_SLC2_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC3_READ_STALL */
+#define ROGUE_CR_PERF_SLC3_READ_STALL 0x6180U
+#define ROGUE_CR_PERF_SLC3_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_SLC3_WRITE_STALL */
+#define ROGUE_CR_PERF_SLC3_WRITE_STALL 0x6188U
+#define ROGUE_CR_PERF_SLC3_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT 0U
+#define ROGUE_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PERF_3D_SPINUP */
+#define ROGUE_CR_PERF_3D_SPINUP 0x6220U
+#define ROGUE_CR_PERF_3D_SPINUP_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PERF_3D_SPINUP_CYCLES_SHIFT 0U
+#define ROGUE_CR_PERF_3D_SPINUP_CYCLES_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_AXI_ACE_LITE_CONFIGURATION */
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION 0x38C0U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL 0x00003FFFFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT 45U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_CLRMSK 0xFFFFDFFFFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_EN 0x0000200000000000ULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT 37U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK 0xFFFFE01FFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT 36U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK \
+	0xFFFFFFEFFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN \
+	0x0000001000000000ULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT 35U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK 0xFFFFFFF7FFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN 0x0000000800000000ULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT 34U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK 0xFFFFFFFBFFFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN 0x0000000400000000ULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT 30U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK 0xFFFFFFFC3FFFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT 26U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK 0xFFFFFFFFC3FFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT 22U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK 0xFFFFFFFFFC3FFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT 20U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK 0xFFFFFFFFFFCFFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT 18U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK 0xFFFFFFFFFFF3FFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT 16U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK 0xFFFFFFFFFFFCFFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT 14U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK 0xFFFFFFFFFFFF3FFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT 12U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK 0xFFFFFFFFFFFFCFFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT 10U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFF3FFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT 8U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFFCFFULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT 4U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFFF0FULL
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT 0U
+#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFFFF0ULL
+
+/* Register ROGUE_CR_POWER_ESTIMATE_RESULT */
+#define ROGUE_CR_POWER_ESTIMATE_RESULT 0x6328U
+#define ROGUE_CR_POWER_ESTIMATE_RESULT_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT 0U
+#define ROGUE_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TA_PERF */
+#define ROGUE_CR_TA_PERF 0x7600U
+#define ROGUE_CR_TA_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_TA_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_TA_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TA_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_TA_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_TA_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_TA_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_TA_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_TA_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TA_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_TA_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_TA_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_TA_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_TA_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_TA_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TA_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_TA_PERF_SELECT0 */
+#define ROGUE_CR_TA_PERF_SELECT0 0x7608U
+#define ROGUE_CR_TA_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_TA_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_SELECT1 */
+#define ROGUE_CR_TA_PERF_SELECT1 0x7610U
+#define ROGUE_CR_TA_PERF_SELECT1_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_MODE_SHIFT 21U
+#define ROGUE_CR_TA_PERF_SELECT1_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_SELECT2 */
+#define ROGUE_CR_TA_PERF_SELECT2 0x7618U
+#define ROGUE_CR_TA_PERF_SELECT2_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_MODE_SHIFT 21U
+#define ROGUE_CR_TA_PERF_SELECT2_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_SELECT3 */
+#define ROGUE_CR_TA_PERF_SELECT3 0x7620U
+#define ROGUE_CR_TA_PERF_SELECT3_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_MODE_SHIFT 21U
+#define ROGUE_CR_TA_PERF_SELECT3_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_SELECTED_BITS */
+#define ROGUE_CR_TA_PERF_SELECTED_BITS 0x7648U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT 48U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK 0x0000FFFFFFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT 32U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK 0xFFFF0000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT 16U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK 0xFFFFFFFF0000FFFFULL
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT 0U
+#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TA_PERF_COUNTER_0 */
+#define ROGUE_CR_TA_PERF_COUNTER_0 0x7650U
+#define ROGUE_CR_TA_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_TA_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TA_PERF_COUNTER_1 */
+#define ROGUE_CR_TA_PERF_COUNTER_1 0x7658U
+#define ROGUE_CR_TA_PERF_COUNTER_1_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_COUNTER_1_REG_SHIFT 0U
+#define ROGUE_CR_TA_PERF_COUNTER_1_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TA_PERF_COUNTER_2 */
+#define ROGUE_CR_TA_PERF_COUNTER_2 0x7660U
+#define ROGUE_CR_TA_PERF_COUNTER_2_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_COUNTER_2_REG_SHIFT 0U
+#define ROGUE_CR_TA_PERF_COUNTER_2_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TA_PERF_COUNTER_3 */
+#define ROGUE_CR_TA_PERF_COUNTER_3 0x7668U
+#define ROGUE_CR_TA_PERF_COUNTER_3_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TA_PERF_COUNTER_3_REG_SHIFT 0U
+#define ROGUE_CR_TA_PERF_COUNTER_3_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_RASTERISATION_PERF */
+#define ROGUE_CR_RASTERISATION_PERF 0x7700U
+#define ROGUE_CR_RASTERISATION_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_RASTERISATION_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_RASTERISATION_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_RASTERISATION_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_RASTERISATION_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_RASTERISATION_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_RASTERISATION_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_RASTERISATION_PERF_SELECT0 */
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0 0x7708U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_RASTERISATION_PERF_COUNTER_0 */
+#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0 0x7750U
+#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_HUB_BIFPMCACHE_PERF */
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF 0x7800U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0 */
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0 0x7808U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 */
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 0x7850U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TPU_MCU_L0_PERF */
+#define ROGUE_CR_TPU_MCU_L0_PERF 0x7900U
+#define ROGUE_CR_TPU_MCU_L0_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_TPU_MCU_L0_PERF_SELECT0 */
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0 0x7908U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0 */
+#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0 0x7950U
+#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_USC_PERF */
+#define ROGUE_CR_USC_PERF 0x8100U
+#define ROGUE_CR_USC_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_USC_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_USC_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_USC_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_USC_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_USC_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_USC_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_USC_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_USC_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_USC_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_USC_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_USC_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_USC_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_USC_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_USC_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_USC_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_USC_PERF_SELECT0 */
+#define ROGUE_CR_USC_PERF_SELECT0 0x8108U
+#define ROGUE_CR_USC_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_USC_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_USC_PERF_COUNTER_0 */
+#define ROGUE_CR_USC_PERF_COUNTER_0 0x8150U
+#define ROGUE_CR_USC_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_USC_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_USC_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_JONES_IDLE */
+#define ROGUE_CR_JONES_IDLE 0x8328U
+#define ROGUE_CR_JONES_IDLE_MASKFULL 0x0000000000007FFFULL
+#define ROGUE_CR_JONES_IDLE_TDM_SHIFT 14U
+#define ROGUE_CR_JONES_IDLE_TDM_CLRMSK 0xFFFFBFFFU
+#define ROGUE_CR_JONES_IDLE_TDM_EN 0x00004000U
+#define ROGUE_CR_JONES_IDLE_FB_CDC_TLA_SHIFT 13U
+#define ROGUE_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK 0xFFFFDFFFU
+#define ROGUE_CR_JONES_IDLE_FB_CDC_TLA_EN 0x00002000U
+#define ROGUE_CR_JONES_IDLE_FB_CDC_SHIFT 12U
+#define ROGUE_CR_JONES_IDLE_FB_CDC_CLRMSK 0xFFFFEFFFU
+#define ROGUE_CR_JONES_IDLE_FB_CDC_EN 0x00001000U
+#define ROGUE_CR_JONES_IDLE_MMU_SHIFT 11U
+#define ROGUE_CR_JONES_IDLE_MMU_CLRMSK 0xFFFFF7FFU
+#define ROGUE_CR_JONES_IDLE_MMU_EN 0x00000800U
+#define ROGUE_CR_JONES_IDLE_TLA_SHIFT 10U
+#define ROGUE_CR_JONES_IDLE_TLA_CLRMSK 0xFFFFFBFFU
+#define ROGUE_CR_JONES_IDLE_TLA_EN 0x00000400U
+#define ROGUE_CR_JONES_IDLE_GARTEN_SHIFT 9U
+#define ROGUE_CR_JONES_IDLE_GARTEN_CLRMSK 0xFFFFFDFFU
+#define ROGUE_CR_JONES_IDLE_GARTEN_EN 0x00000200U
+#define ROGUE_CR_JONES_IDLE_HOSTIF_SHIFT 8U
+#define ROGUE_CR_JONES_IDLE_HOSTIF_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_JONES_IDLE_HOSTIF_EN 0x00000100U
+#define ROGUE_CR_JONES_IDLE_SOCIF_SHIFT 7U
+#define ROGUE_CR_JONES_IDLE_SOCIF_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_JONES_IDLE_SOCIF_EN 0x00000080U
+#define ROGUE_CR_JONES_IDLE_TILING_SHIFT 6U
+#define ROGUE_CR_JONES_IDLE_TILING_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_JONES_IDLE_TILING_EN 0x00000040U
+#define ROGUE_CR_JONES_IDLE_IPP_SHIFT 5U
+#define ROGUE_CR_JONES_IDLE_IPP_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_JONES_IDLE_IPP_EN 0x00000020U
+#define ROGUE_CR_JONES_IDLE_USCS_SHIFT 4U
+#define ROGUE_CR_JONES_IDLE_USCS_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_JONES_IDLE_USCS_EN 0x00000010U
+#define ROGUE_CR_JONES_IDLE_PM_SHIFT 3U
+#define ROGUE_CR_JONES_IDLE_PM_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_JONES_IDLE_PM_EN 0x00000008U
+#define ROGUE_CR_JONES_IDLE_CDM_SHIFT 2U
+#define ROGUE_CR_JONES_IDLE_CDM_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_JONES_IDLE_CDM_EN 0x00000004U
+#define ROGUE_CR_JONES_IDLE_VDM_SHIFT 1U
+#define ROGUE_CR_JONES_IDLE_VDM_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_JONES_IDLE_VDM_EN 0x00000002U
+#define ROGUE_CR_JONES_IDLE_BIF_SHIFT 0U
+#define ROGUE_CR_JONES_IDLE_BIF_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_JONES_IDLE_BIF_EN 0x00000001U
+
+/* Register ROGUE_CR_TORNADO_PERF */
+#define ROGUE_CR_TORNADO_PERF 0x8228U
+#define ROGUE_CR_TORNADO_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_TORNADO_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_TORNADO_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TORNADO_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_TORNADO_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_TORNADO_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_TORNADO_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_TORNADO_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_TORNADO_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TORNADO_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_TORNADO_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_TORNADO_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_TORNADO_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TORNADO_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_TORNADO_PERF_SELECT0 */
+#define ROGUE_CR_TORNADO_PERF_SELECT0 0x8230U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TORNADO_PERF_COUNTER_0 */
+#define ROGUE_CR_TORNADO_PERF_COUNTER_0 0x8268U
+#define ROGUE_CR_TORNADO_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_TEXAS_PERF */
+#define ROGUE_CR_TEXAS_PERF 0x8290U
+#define ROGUE_CR_TEXAS_PERF_MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_TEXAS_PERF_CLR_5_SHIFT 6U
+#define ROGUE_CR_TEXAS_PERF_CLR_5_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_TEXAS_PERF_CLR_5_EN 0x00000040U
+#define ROGUE_CR_TEXAS_PERF_CLR_4_SHIFT 5U
+#define ROGUE_CR_TEXAS_PERF_CLR_4_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_TEXAS_PERF_CLR_4_EN 0x00000020U
+#define ROGUE_CR_TEXAS_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_TEXAS_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_TEXAS_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_TEXAS_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_TEXAS_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_TEXAS_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_TEXAS_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_TEXAS_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_TEXAS_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_TEXAS_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_TEXAS_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_TEXAS_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_TEXAS_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_TEXAS_PERF_SELECT0 */
+#define ROGUE_CR_TEXAS_PERF_SELECT0 0x8298U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_MASKFULL 0x3FFF3FFF803FFFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_MODE_SHIFT 31U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_MODE_EN 0x0000000080000000ULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFC0FFFFULL
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_TEXAS_PERF_COUNTER_0 */
+#define ROGUE_CR_TEXAS_PERF_COUNTER_0 0x82D8U
+#define ROGUE_CR_TEXAS_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_JONES_PERF */
+#define ROGUE_CR_JONES_PERF 0x8330U
+#define ROGUE_CR_JONES_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_JONES_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_JONES_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_JONES_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_JONES_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_JONES_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_JONES_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_JONES_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_JONES_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_JONES_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_JONES_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_JONES_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_JONES_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_JONES_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_JONES_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_JONES_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_JONES_PERF_SELECT0 */
+#define ROGUE_CR_JONES_PERF_SELECT0 0x8338U
+#define ROGUE_CR_JONES_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_JONES_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_JONES_PERF_COUNTER_0 */
+#define ROGUE_CR_JONES_PERF_COUNTER_0 0x8368U
+#define ROGUE_CR_JONES_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_JONES_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_JONES_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_BLACKPEARL_PERF */
+#define ROGUE_CR_BLACKPEARL_PERF 0x8400U
+#define ROGUE_CR_BLACKPEARL_PERF_MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_5_SHIFT 6U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_5_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_5_EN 0x00000040U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_4_SHIFT 5U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_4_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_4_EN 0x00000020U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BLACKPEARL_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_BLACKPEARL_PERF_SELECT0 */
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0 0x8408U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MASKFULL 0x3FFF3FFF803FFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT 31U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MODE_EN 0x0000000080000000ULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFC0FFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_BLACKPEARL_PERF_COUNTER_0 */
+#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0 0x8448U
+#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_PBE_PERF */
+#define ROGUE_CR_PBE_PERF 0x8478U
+#define ROGUE_CR_PBE_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_PBE_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_PBE_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_PBE_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_PBE_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_PBE_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_PBE_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_PBE_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_PBE_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_PBE_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_PBE_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_PBE_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_PBE_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_PBE_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_PBE_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_PBE_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_PBE_PERF_SELECT0 */
+#define ROGUE_CR_PBE_PERF_SELECT0 0x8480U
+#define ROGUE_CR_PBE_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_PBE_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_PBE_PERF_COUNTER_0 */
+#define ROGUE_CR_PBE_PERF_COUNTER_0 0x84B0U
+#define ROGUE_CR_PBE_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_PBE_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_PBE_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_OCP_REVINFO */
+#define ROGUE_CR_OCP_REVINFO 0x9000U
+#define ROGUE_CR_OCP_REVINFO_MASKFULL 0x00000007FFFFFFFFULL
+#define ROGUE_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT 33U
+#define ROGUE_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK 0xFFFFFFF9FFFFFFFFULL
+#define ROGUE_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT 32U
+#define ROGUE_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_OCP_REVINFO_HWINFO_MEMBUS_EN 0x0000000100000000ULL
+#define ROGUE_CR_OCP_REVINFO_REVISION_SHIFT 0U
+#define ROGUE_CR_OCP_REVINFO_REVISION_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_OCP_SYSCONFIG */
+#define ROGUE_CR_OCP_SYSCONFIG 0x9010U
+#define ROGUE_CR_OCP_SYSCONFIG_MASKFULL 0x0000000000000FFFULL
+#define ROGUE_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT 10U
+#define ROGUE_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK 0xFFFFF3FFU
+#define ROGUE_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT 8U
+#define ROGUE_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK 0xFFFFFCFFU
+#define ROGUE_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT 6U
+#define ROGUE_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK 0xFFFFFF3FU
+#define ROGUE_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT 4U
+#define ROGUE_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK 0xFFFFFFCFU
+#define ROGUE_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT 2U
+#define ROGUE_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT 0U
+#define ROGUE_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK 0xFFFFFFFCU
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_RAW_0 */
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0 0x9020U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_RAW_1 */
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1 0x9028U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_RAW_2 */
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2 0x9030U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_0 */
+#define ROGUE_CR_OCP_IRQSTATUS_0 0x9038U
+#define ROGUE_CR_OCP_IRQSTATUS_0_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_1 */
+#define ROGUE_CR_OCP_IRQSTATUS_1 0x9040U
+#define ROGUE_CR_OCP_IRQSTATUS_1_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQSTATUS_2 */
+#define ROGUE_CR_OCP_IRQSTATUS_2 0x9048U
+#define ROGUE_CR_OCP_IRQSTATUS_2_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT 0U
+#define ROGUE_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_SET_0 */
+#define ROGUE_CR_OCP_IRQENABLE_SET_0 0x9050U
+#define ROGUE_CR_OCP_IRQENABLE_SET_0_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_SET_1 */
+#define ROGUE_CR_OCP_IRQENABLE_SET_1 0x9058U
+#define ROGUE_CR_OCP_IRQENABLE_SET_1_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_SET_2 */
+#define ROGUE_CR_OCP_IRQENABLE_SET_2 0x9060U
+#define ROGUE_CR_OCP_IRQENABLE_SET_2_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_CLR_0 */
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0 0x9068U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_CLR_1 */
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1 0x9070U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQENABLE_CLR_2 */
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2 0x9078U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT 0U
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_IRQ_EVENT */
+#define ROGUE_CR_OCP_IRQ_EVENT 0x9080U
+#define ROGUE_CR_OCP_IRQ_EVENT_MASKFULL 0x00000000000FFFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT 19U
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK 0xFFFFFFFFFFF7FFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN 0x0000000000080000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT 18U
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK 0xFFFFFFFFFFFBFFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN 0x0000000000040000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT 17U
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK 0xFFFFFFFFFFFDFFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN 0x0000000000020000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT 16U
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK 0xFFFFFFFFFFFEFFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN 0x0000000000010000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT 15U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFF7FFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000008000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT 14U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN 0x0000000000004000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT 13U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN 0x0000000000002000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT 12U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFEFFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN 0x0000000000001000ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT 11U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFFF7FFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000000800ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT 10U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN 0x0000000000000400ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT 9U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFFDFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN 0x0000000000000200ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT 8U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFFEFFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN 0x0000000000000100ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT 7U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000000080ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT 6U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN 0x0000000000000040ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT 5U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN 0x0000000000000020ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT 4U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN 0x0000000000000010ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT 3U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000000008ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT 2U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN 0x0000000000000004ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT 1U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFFFFDULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN 0x0000000000000002ULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT 0U
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_OCP_DEBUG_CONFIG */
+#define ROGUE_CR_OCP_DEBUG_CONFIG 0x9088U
+#define ROGUE_CR_OCP_DEBUG_CONFIG_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_OCP_DEBUG_CONFIG_REG_SHIFT 0U
+#define ROGUE_CR_OCP_DEBUG_CONFIG_REG_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_OCP_DEBUG_CONFIG_REG_EN 0x00000001U
+
+/* Register ROGUE_CR_OCP_DEBUG_STATUS */
+#define ROGUE_CR_OCP_DEBUG_STATUS 0x9090U
+#define ROGUE_CR_OCP_DEBUG_STATUS_MASKFULL 0x001F1F77FFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT 51U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK 0xFFE7FFFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT 50U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK 0xFFFBFFFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN 0x0004000000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT 48U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK 0xFFFCFFFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT 43U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK 0xFFFFE7FFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT 42U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK 0xFFFFFBFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN 0x0000040000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT 40U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK 0xFFFFFCFFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT 38U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK 0xFFFFFFBFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN 0x0000004000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT 37U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK 0xFFFFFFDFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN 0x0000002000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT 36U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK 0xFFFFFFEFFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN 0x0000001000000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT 34U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK 0xFFFFFFFBFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN 0x0000000400000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT 33U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK 0xFFFFFFFDFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN 0x0000000200000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT 32U
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK 0xFFFFFFFEFFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN 0x0000000100000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT 31U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK 0xFFFFFFFF7FFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN 0x0000000080000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT 30U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK 0xFFFFFFFFBFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN 0x0000000040000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT 29U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK 0xFFFFFFFFDFFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN 0x0000000020000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT 27U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK 0xFFFFFFFFE7FFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT 26U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK 0xFFFFFFFFFBFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN 0x0000000004000000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT 24U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK 0xFFFFFFFFFCFFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT 23U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK 0xFFFFFFFFFF7FFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN 0x0000000000800000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT 22U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK 0xFFFFFFFFFFBFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN 0x0000000000400000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT 21U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN 0x0000000000200000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT 19U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK 0xFFFFFFFFFFE7FFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT 18U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK 0xFFFFFFFFFFFBFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN 0x0000000000040000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT 16U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK 0xFFFFFFFFFFFCFFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT 15U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK 0xFFFFFFFFFFFF7FFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN 0x0000000000008000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT 14U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK 0xFFFFFFFFFFFFBFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN 0x0000000000004000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT 13U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK 0xFFFFFFFFFFFFDFFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN 0x0000000000002000ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT 11U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK 0xFFFFFFFFFFFFE7FFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT 10U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK 0xFFFFFFFFFFFFFBFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN 0x0000000000000400ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT 8U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK 0xFFFFFFFFFFFFFCFFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT 7U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK 0xFFFFFFFFFFFFFF7FULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN 0x0000000000000080ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT 6U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK 0xFFFFFFFFFFFFFFBFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN 0x0000000000000040ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT 5U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK 0xFFFFFFFFFFFFFFDFULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN 0x0000000000000020ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT 3U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK 0xFFFFFFFFFFFFFFE7ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT 2U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN 0x0000000000000004ULL
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT 0U
+#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK 0xFFFFFFFFFFFFFFFCULL
+
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT 6U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN 0x00000040U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT 5U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_HOST_EN 0x00000020U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_META_SHIFT 4U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_META_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_META_EN 0x00000010U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT 3U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN 0x00000008U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT 2U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_TE_EN 0x00000004U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT 1U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN 0x00000002U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT 0U
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_TRUST_DM_TYPE_TLA_EN 0x00000001U
+
+#define ROGUE_CR_BIF_TRUST_DM_MASK 0x0000007FU
+
+/* Register ROGUE_CR_BIF_TRUST */
+#define ROGUE_CR_BIF_TRUST 0xA000U
+#define ROGUE_CR_BIF_TRUST_MASKFULL 0x00000000001FFFFFULL
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT 20U
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK 0xFFEFFFFFU
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN 0x00100000U
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT 19U
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK 0xFFF7FFFFU
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN 0x00080000U
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT 18U
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK 0xFFFBFFFFU
+#define ROGUE_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN 0x00040000U
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT 17U
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN 0x00020000U
+#define ROGUE_CR_BIF_TRUST_ENABLE_SHIFT 16U
+#define ROGUE_CR_BIF_TRUST_ENABLE_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_BIF_TRUST_ENABLE_EN 0x00010000U
+#define ROGUE_CR_BIF_TRUST_DM_TRUSTED_SHIFT 9U
+#define ROGUE_CR_BIF_TRUST_DM_TRUSTED_CLRMSK 0xFFFF01FFU
+#define ROGUE_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT 8U
+#define ROGUE_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN 0x00000100U
+#define ROGUE_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT 7U
+#define ROGUE_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK 0xFFFFFF7FU
+#define ROGUE_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN 0x00000080U
+#define ROGUE_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT 6U
+#define ROGUE_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN 0x00000040U
+#define ROGUE_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT 5U
+#define ROGUE_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN 0x00000020U
+#define ROGUE_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT 4U
+#define ROGUE_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN 0x00000010U
+#define ROGUE_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT 3U
+#define ROGUE_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN 0x00000008U
+#define ROGUE_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT 2U
+#define ROGUE_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN 0x00000004U
+#define ROGUE_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT 1U
+#define ROGUE_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN 0x00000002U
+#define ROGUE_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT 0U
+#define ROGUE_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN 0x00000001U
+
+/* Register ROGUE_CR_SYS_BUS_SECURE */
+#define ROGUE_CR_SYS_BUS_SECURE 0xA100U
+#define ROGUE_CR_SYS_BUS_SECURE__SECR__MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_SYS_BUS_SECURE_MASKFULL 0x0000000000000001ULL
+#define ROGUE_CR_SYS_BUS_SECURE_ENABLE_SHIFT 0U
+#define ROGUE_CR_SYS_BUS_SECURE_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SYS_BUS_SECURE_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_FBA_FC0_CHECKSUM */
+#define ROGUE_CR_FBA_FC0_CHECKSUM 0xD170U
+#define ROGUE_CR_FBA_FC0_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_FBA_FC1_CHECKSUM */
+#define ROGUE_CR_FBA_FC1_CHECKSUM 0xD178U
+#define ROGUE_CR_FBA_FC1_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_FBA_FC2_CHECKSUM */
+#define ROGUE_CR_FBA_FC2_CHECKSUM 0xD180U
+#define ROGUE_CR_FBA_FC2_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_FBA_FC3_CHECKSUM */
+#define ROGUE_CR_FBA_FC3_CHECKSUM 0xD188U
+#define ROGUE_CR_FBA_FC3_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_CLK_CTRL2 */
+#define ROGUE_CR_CLK_CTRL2 0xD200U
+#define ROGUE_CR_CLK_CTRL2_MASKFULL 0x0000000000000F33ULL
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_SHIFT 10U
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_CLRMSK 0xFFFFFFFFFFFFF3FFULL
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_ON 0x0000000000000400ULL
+#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_AUTO 0x0000000000000800ULL
+#define ROGUE_CR_CLK_CTRL2_VRDM_SHIFT 8U
+#define ROGUE_CR_CLK_CTRL2_VRDM_CLRMSK 0xFFFFFFFFFFFFFCFFULL
+#define ROGUE_CR_CLK_CTRL2_VRDM_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL2_VRDM_ON 0x0000000000000100ULL
+#define ROGUE_CR_CLK_CTRL2_VRDM_AUTO 0x0000000000000200ULL
+#define ROGUE_CR_CLK_CTRL2_SH_SHIFT 4U
+#define ROGUE_CR_CLK_CTRL2_SH_CLRMSK 0xFFFFFFFFFFFFFFCFULL
+#define ROGUE_CR_CLK_CTRL2_SH_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL2_SH_ON 0x0000000000000010ULL
+#define ROGUE_CR_CLK_CTRL2_SH_AUTO 0x0000000000000020ULL
+#define ROGUE_CR_CLK_CTRL2_FBA_SHIFT 0U
+#define ROGUE_CR_CLK_CTRL2_FBA_CLRMSK 0xFFFFFFFFFFFFFFFCULL
+#define ROGUE_CR_CLK_CTRL2_FBA_OFF 0x0000000000000000ULL
+#define ROGUE_CR_CLK_CTRL2_FBA_ON 0x0000000000000001ULL
+#define ROGUE_CR_CLK_CTRL2_FBA_AUTO 0x0000000000000002ULL
+
+/* Register ROGUE_CR_CLK_STATUS2 */
+#define ROGUE_CR_CLK_STATUS2 0xD208U
+#define ROGUE_CR_CLK_STATUS2_MASKFULL 0x0000000000000015ULL
+#define ROGUE_CR_CLK_STATUS2_VRDM_SHIFT 4U
+#define ROGUE_CR_CLK_STATUS2_VRDM_CLRMSK 0xFFFFFFFFFFFFFFEFULL
+#define ROGUE_CR_CLK_STATUS2_VRDM_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS2_VRDM_RUNNING 0x0000000000000010ULL
+#define ROGUE_CR_CLK_STATUS2_SH_SHIFT 2U
+#define ROGUE_CR_CLK_STATUS2_SH_CLRMSK 0xFFFFFFFFFFFFFFFBULL
+#define ROGUE_CR_CLK_STATUS2_SH_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS2_SH_RUNNING 0x0000000000000004ULL
+#define ROGUE_CR_CLK_STATUS2_FBA_SHIFT 0U
+#define ROGUE_CR_CLK_STATUS2_FBA_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_CLK_STATUS2_FBA_GATED 0x0000000000000000ULL
+#define ROGUE_CR_CLK_STATUS2_FBA_RUNNING 0x0000000000000001ULL
+
+/* Register ROGUE_CR_RPM_SHF_FPL */
+#define ROGUE_CR_RPM_SHF_FPL 0xD520U
+#define ROGUE_CR_RPM_SHF_FPL_MASKFULL 0x3FFFFFFFFFFFFFFCULL
+#define ROGUE_CR_RPM_SHF_FPL_SIZE_SHIFT 40U
+#define ROGUE_CR_RPM_SHF_FPL_SIZE_CLRMSK 0xC00000FFFFFFFFFFULL
+#define ROGUE_CR_RPM_SHF_FPL_BASE_SHIFT 2U
+#define ROGUE_CR_RPM_SHF_FPL_BASE_CLRMSK 0xFFFFFF0000000003ULL
+#define ROGUE_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT 2U
+#define ROGUE_CR_RPM_SHF_FPL_BASE_ALIGNSIZE 4U
+
+/* Register ROGUE_CR_RPM_SHF_FPL_READ */
+#define ROGUE_CR_RPM_SHF_FPL_READ 0xD528U
+#define ROGUE_CR_RPM_SHF_FPL_READ_MASKFULL 0x00000000007FFFFFULL
+#define ROGUE_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT 22U
+#define ROGUE_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_RPM_SHF_FPL_READ_TOGGLE_EN 0x00400000U
+#define ROGUE_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT 0U
+#define ROGUE_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK 0xFFC00000U
+
+/* Register ROGUE_CR_RPM_SHF_FPL_WRITE */
+#define ROGUE_CR_RPM_SHF_FPL_WRITE 0xD530U
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_MASKFULL 0x00000000007FFFFFULL
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT 22U
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN 0x00400000U
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT 0U
+#define ROGUE_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK 0xFFC00000U
+
+/* Register ROGUE_CR_RPM_SHG_FPL */
+#define ROGUE_CR_RPM_SHG_FPL 0xD538U
+#define ROGUE_CR_RPM_SHG_FPL_MASKFULL 0x3FFFFFFFFFFFFFFCULL
+#define ROGUE_CR_RPM_SHG_FPL_SIZE_SHIFT 40U
+#define ROGUE_CR_RPM_SHG_FPL_SIZE_CLRMSK 0xC00000FFFFFFFFFFULL
+#define ROGUE_CR_RPM_SHG_FPL_BASE_SHIFT 2U
+#define ROGUE_CR_RPM_SHG_FPL_BASE_CLRMSK 0xFFFFFF0000000003ULL
+#define ROGUE_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT 2U
+#define ROGUE_CR_RPM_SHG_FPL_BASE_ALIGNSIZE 4U
+
+/* Register ROGUE_CR_RPM_SHG_FPL_READ */
+#define ROGUE_CR_RPM_SHG_FPL_READ 0xD540U
+#define ROGUE_CR_RPM_SHG_FPL_READ_MASKFULL 0x00000000007FFFFFULL
+#define ROGUE_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT 22U
+#define ROGUE_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_RPM_SHG_FPL_READ_TOGGLE_EN 0x00400000U
+#define ROGUE_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT 0U
+#define ROGUE_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK 0xFFC00000U
+
+/* Register ROGUE_CR_RPM_SHG_FPL_WRITE */
+#define ROGUE_CR_RPM_SHG_FPL_WRITE 0xD548U
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_MASKFULL 0x00000000007FFFFFULL
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT 22U
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK 0xFFBFFFFFU
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN 0x00400000U
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT 0U
+#define ROGUE_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK 0xFFC00000U
+
+/* Register ROGUE_CR_SH_PERF */
+#define ROGUE_CR_SH_PERF 0xD5F8U
+#define ROGUE_CR_SH_PERF_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_SH_PERF_CLR_3_SHIFT 4U
+#define ROGUE_CR_SH_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SH_PERF_CLR_3_EN 0x00000010U
+#define ROGUE_CR_SH_PERF_CLR_2_SHIFT 3U
+#define ROGUE_CR_SH_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SH_PERF_CLR_2_EN 0x00000008U
+#define ROGUE_CR_SH_PERF_CLR_1_SHIFT 2U
+#define ROGUE_CR_SH_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SH_PERF_CLR_1_EN 0x00000004U
+#define ROGUE_CR_SH_PERF_CLR_0_SHIFT 1U
+#define ROGUE_CR_SH_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SH_PERF_CLR_0_EN 0x00000002U
+#define ROGUE_CR_SH_PERF_CTRL_ENABLE_SHIFT 0U
+#define ROGUE_CR_SH_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SH_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register ROGUE_CR_SH_PERF_SELECT0 */
+#define ROGUE_CR_SH_PERF_SELECT0 0xD600U
+#define ROGUE_CR_SH_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_MODE_SHIFT 21U
+#define ROGUE_CR_SH_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define ROGUE_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define ROGUE_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define ROGUE_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define ROGUE_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_SH_PERF_COUNTER_0 */
+#define ROGUE_CR_SH_PERF_COUNTER_0 0xD628U
+#define ROGUE_CR_SH_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SH_PERF_COUNTER_0_REG_SHIFT 0U
+#define ROGUE_CR_SH_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHF_SHG_CHECKSUM */
+#define ROGUE_CR_SHF_SHG_CHECKSUM 0xD1C0U
+#define ROGUE_CR_SHF_SHG_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM */
+#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM 0xD1C8U
+#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHF_VARY_BIF_CHECKSUM */
+#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM 0xD1D0U
+#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_RPM_BIF_CHECKSUM */
+#define ROGUE_CR_RPM_BIF_CHECKSUM 0xD1D8U
+#define ROGUE_CR_RPM_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHG_BIF_CHECKSUM */
+#define ROGUE_CR_SHG_BIF_CHECKSUM 0xD1E0U
+#define ROGUE_CR_SHG_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register ROGUE_CR_SHG_FE_BE_CHECKSUM */
+#define ROGUE_CR_SHG_FE_BE_CHECKSUM 0xD1E8U
+#define ROGUE_CR_SHG_FE_BE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT 0U
+#define ROGUE_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK 0x00000000U
+
+/* Register DPX_CR_BF_PERF */
+#define DPX_CR_BF_PERF 0xC458U
+#define DPX_CR_BF_PERF_MASKFULL 0x000000000000001FULL
+#define DPX_CR_BF_PERF_CLR_3_SHIFT 4U
+#define DPX_CR_BF_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_BF_PERF_CLR_3_EN 0x00000010U
+#define DPX_CR_BF_PERF_CLR_2_SHIFT 3U
+#define DPX_CR_BF_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define DPX_CR_BF_PERF_CLR_2_EN 0x00000008U
+#define DPX_CR_BF_PERF_CLR_1_SHIFT 2U
+#define DPX_CR_BF_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BF_PERF_CLR_1_EN 0x00000004U
+#define DPX_CR_BF_PERF_CLR_0_SHIFT 1U
+#define DPX_CR_BF_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_BF_PERF_CLR_0_EN 0x00000002U
+#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT 0U
+#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BF_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register DPX_CR_BF_PERF_SELECT0 */
+#define DPX_CR_BF_PERF_SELECT0 0xC460U
+#define DPX_CR_BF_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT 21U
+#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define DPX_CR_BF_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register DPX_CR_BF_PERF_COUNTER_0 */
+#define DPX_CR_BF_PERF_COUNTER_0 0xC488U
+#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT 0U
+#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register DPX_CR_BT_PERF */
+#define DPX_CR_BT_PERF 0xC3D0U
+#define DPX_CR_BT_PERF_MASKFULL 0x000000000000001FULL
+#define DPX_CR_BT_PERF_CLR_3_SHIFT 4U
+#define DPX_CR_BT_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_BT_PERF_CLR_3_EN 0x00000010U
+#define DPX_CR_BT_PERF_CLR_2_SHIFT 3U
+#define DPX_CR_BT_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define DPX_CR_BT_PERF_CLR_2_EN 0x00000008U
+#define DPX_CR_BT_PERF_CLR_1_SHIFT 2U
+#define DPX_CR_BT_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BT_PERF_CLR_1_EN 0x00000004U
+#define DPX_CR_BT_PERF_CLR_0_SHIFT 1U
+#define DPX_CR_BT_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_BT_PERF_CLR_0_EN 0x00000002U
+#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT 0U
+#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BT_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register DPX_CR_BT_PERF_SELECT0 */
+#define DPX_CR_BT_PERF_SELECT0 0xC3D8U
+#define DPX_CR_BT_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT 21U
+#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define DPX_CR_BT_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register DPX_CR_BT_PERF_COUNTER_0 */
+#define DPX_CR_BT_PERF_COUNTER_0 0xC420U
+#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT 0U
+#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register DPX_CR_RQ_USC_DEBUG */
+#define DPX_CR_RQ_USC_DEBUG 0xC110U
+#define DPX_CR_RQ_USC_DEBUG_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT 0U
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS */
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS 0xC5C8U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL 0x000000000000F775ULL
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT 12U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT 8U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT 5U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT 4U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN 0x00000010U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT 0U
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN 0x00000001U
+
+/* Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS */
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS 0xC5D0U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL 0x03FFFFFFFFFFFFF0ULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT 57U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK 0xFDFFFFFFFFFFFFFFULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN 0x0200000000000000ULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT 44U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK 0xFE000FFFFFFFFFFFULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT 40U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT 4U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE 16U
+
+/* Register DPX_CR_BIF_MMU_STATUS */
+#define DPX_CR_BIF_MMU_STATUS 0xC5D8U
+#define DPX_CR_BIF_MMU_STATUS_MASKFULL 0x000000000FFFFFF7ULL
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT 20U
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK 0xF00FFFFFU
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT 12U
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK 0xFFF00FFFU
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT 4U
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK 0xFFFFF00FU
+#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT 2U
+#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BIF_MMU_STATUS_STALLED_EN 0x00000004U
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT 1U
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN 0x00000002U
+#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT 0U
+#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BIF_MMU_STATUS_BUSY_EN 0x00000001U
+
+/* Register DPX_CR_RT_PERF */
+#define DPX_CR_RT_PERF 0xC700U
+#define DPX_CR_RT_PERF_MASKFULL 0x000000000000001FULL
+#define DPX_CR_RT_PERF_CLR_3_SHIFT 4U
+#define DPX_CR_RT_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_RT_PERF_CLR_3_EN 0x00000010U
+#define DPX_CR_RT_PERF_CLR_2_SHIFT 3U
+#define DPX_CR_RT_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define DPX_CR_RT_PERF_CLR_2_EN 0x00000008U
+#define DPX_CR_RT_PERF_CLR_1_SHIFT 2U
+#define DPX_CR_RT_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_RT_PERF_CLR_1_EN 0x00000004U
+#define DPX_CR_RT_PERF_CLR_0_SHIFT 1U
+#define DPX_CR_RT_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_RT_PERF_CLR_0_EN 0x00000002U
+#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT 0U
+#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_RT_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register DPX_CR_RT_PERF_SELECT0 */
+#define DPX_CR_RT_PERF_SELECT0 0xC708U
+#define DPX_CR_RT_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT 21U
+#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define DPX_CR_RT_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register DPX_CR_RT_PERF_COUNTER_0 */
+#define DPX_CR_RT_PERF_COUNTER_0 0xC730U
+#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT 0U
+#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register DPX_CR_BX_TU_PERF */
+#define DPX_CR_BX_TU_PERF 0xC908U
+#define DPX_CR_BX_TU_PERF_MASKFULL 0x000000000000001FULL
+#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT 4U
+#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK 0xFFFFFFEFU
+#define DPX_CR_BX_TU_PERF_CLR_3_EN 0x00000010U
+#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT 3U
+#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK 0xFFFFFFF7U
+#define DPX_CR_BX_TU_PERF_CLR_2_EN 0x00000008U
+#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT 2U
+#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK 0xFFFFFFFBU
+#define DPX_CR_BX_TU_PERF_CLR_1_EN 0x00000004U
+#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT 1U
+#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK 0xFFFFFFFDU
+#define DPX_CR_BX_TU_PERF_CLR_0_EN 0x00000002U
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT 0U
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN 0x00000001U
+
+/* Register DPX_CR_BX_TU_PERF_SELECT0 */
+#define DPX_CR_BX_TU_PERF_SELECT0 0xC910U
+#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT 48U
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT 32U
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT 21U
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN 0x0000000000200000ULL
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT 16U
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT 0U
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register DPX_CR_BX_TU_PERF_COUNTER_0 */
+#define DPX_CR_BX_TU_PERF_COUNTER_0 0xC938U
+#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT 0U
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK 0x00000000U
+
+/* Register DPX_CR_RS_PDS_RR_CHECKSUM */
+#define DPX_CR_RS_PDS_RR_CHECKSUM 0xC0F0U
+#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT 0U
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK 0xFFFFFFFF00000000ULL
+
+/* Register ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT */
+#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT 0xE140U
+#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL 0x00000000000000FFULL
+#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT 0U
+#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MMU_CBASE_MAPPING */
+#define ROGUE_CR_MMU_CBASE_MAPPING 0xE148U
+#define ROGUE_CR_MMU_CBASE_MAPPING_MASKFULL 0x000000000FFFFFFFULL
+#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT 0U
+#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK 0xF0000000U
+#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT 12U
+#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE 4096U
+
+/* Register ROGUE_CR_MMU_FAULT_STATUS */
+#define ROGUE_CR_MMU_FAULT_STATUS 0xE150U
+#define ROGUE_CR_MMU_FAULT_STATUS_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT 28U
+#define ROGUE_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK 0x000000000FFFFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT 20U
+#define ROGUE_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK 0xFFFFFFFFF00FFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT 12U
+#define ROGUE_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK 0xFFFFFFFFFFF00FFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT 6U
+#define ROGUE_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK 0xFFFFFFFFFFFFF03FULL
+#define ROGUE_CR_MMU_FAULT_STATUS_LEVEL_SHIFT 4U
+#define ROGUE_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK 0xFFFFFFFFFFFFFFCFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_RNW_SHIFT 3U
+#define ROGUE_CR_MMU_FAULT_STATUS_RNW_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_RNW_EN 0x0000000000000008ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_TYPE_SHIFT 1U
+#define ROGUE_CR_MMU_FAULT_STATUS_TYPE_CLRMSK 0xFFFFFFFFFFFFFFF9ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_FAULT_SHIFT 0U
+#define ROGUE_CR_MMU_FAULT_STATUS_FAULT_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MMU_FAULT_STATUS_FAULT_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_MMU_FAULT_STATUS_META */
+#define ROGUE_CR_MMU_FAULT_STATUS_META 0xE158U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT 28U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK 0x000000000FFFFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT 20U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK 0xFFFFFFFFF00FFFFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT 12U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK 0xFFFFFFFFFFF00FFFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT 6U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK 0xFFFFFFFFFFFFF03FULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT 4U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK 0xFFFFFFFFFFFFFFCFULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_RNW_SHIFT 3U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK 0xFFFFFFFFFFFFFFF7ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_RNW_EN 0x0000000000000008ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT 1U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK 0xFFFFFFFFFFFFFFF9ULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT 0U
+#define ROGUE_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK 0xFFFFFFFFFFFFFFFEULL
+#define ROGUE_CR_MMU_FAULT_STATUS_META_FAULT_EN 0x0000000000000001ULL
+
+/* Register ROGUE_CR_SLC3_CTRL_MISC */
+#define ROGUE_CR_SLC3_CTRL_MISC 0xE200U
+#define ROGUE_CR_SLC3_CTRL_MISC_MASKFULL 0x0000000000000107ULL
+#define ROGUE_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT 8U
+#define ROGUE_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK 0xFFFFFEFFU
+#define ROGUE_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN 0x00000100U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT 0U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK 0xFFFFFFF8U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR 0x00000000U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH 0x00000001U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH 0x00000002U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH 0x00000003U
+#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH 0x00000004U
+
+/* Register ROGUE_CR_SLC3_SCRAMBLE */
+#define ROGUE_CR_SLC3_SCRAMBLE 0xE208U
+#define ROGUE_CR_SLC3_SCRAMBLE_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_SCRAMBLE_BITS_SHIFT 0U
+#define ROGUE_CR_SLC3_SCRAMBLE_BITS_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC3_SCRAMBLE2 */
+#define ROGUE_CR_SLC3_SCRAMBLE2 0xE210U
+#define ROGUE_CR_SLC3_SCRAMBLE2_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_SCRAMBLE2_BITS_SHIFT 0U
+#define ROGUE_CR_SLC3_SCRAMBLE2_BITS_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC3_SCRAMBLE3 */
+#define ROGUE_CR_SLC3_SCRAMBLE3 0xE218U
+#define ROGUE_CR_SLC3_SCRAMBLE3_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_SCRAMBLE3_BITS_SHIFT 0U
+#define ROGUE_CR_SLC3_SCRAMBLE3_BITS_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC3_SCRAMBLE4 */
+#define ROGUE_CR_SLC3_SCRAMBLE4 0xE260U
+#define ROGUE_CR_SLC3_SCRAMBLE4_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_SCRAMBLE4_BITS_SHIFT 0U
+#define ROGUE_CR_SLC3_SCRAMBLE4_BITS_CLRMSK 0x0000000000000000ULL
+
+/* Register ROGUE_CR_SLC3_STATUS */
+#define ROGUE_CR_SLC3_STATUS 0xE220U
+#define ROGUE_CR_SLC3_STATUS_MASKFULL 0xFFFFFFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_STATUS_WRITES1_SHIFT 48U
+#define ROGUE_CR_SLC3_STATUS_WRITES1_CLRMSK 0x0000FFFFFFFFFFFFULL
+#define ROGUE_CR_SLC3_STATUS_WRITES0_SHIFT 32U
+#define ROGUE_CR_SLC3_STATUS_WRITES0_CLRMSK 0xFFFF0000FFFFFFFFULL
+#define ROGUE_CR_SLC3_STATUS_READS1_SHIFT 16U
+#define ROGUE_CR_SLC3_STATUS_READS1_CLRMSK 0xFFFFFFFF0000FFFFULL
+#define ROGUE_CR_SLC3_STATUS_READS0_SHIFT 0U
+#define ROGUE_CR_SLC3_STATUS_READS0_CLRMSK 0xFFFFFFFFFFFF0000ULL
+
+/* Register ROGUE_CR_SLC3_IDLE */
+#define ROGUE_CR_SLC3_IDLE 0xE228U
+#define ROGUE_CR_SLC3_IDLE_MASKFULL 0x00000000000FFFFFULL
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT 18U
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK 0xFFF3FFFFU
+#define ROGUE_CR_SLC3_IDLE_MMU_SHIFT 17U
+#define ROGUE_CR_SLC3_IDLE_MMU_CLRMSK 0xFFFDFFFFU
+#define ROGUE_CR_SLC3_IDLE_MMU_EN 0x00020000U
+#define ROGUE_CR_SLC3_IDLE_RDI_SHIFT 16U
+#define ROGUE_CR_SLC3_IDLE_RDI_CLRMSK 0xFFFEFFFFU
+#define ROGUE_CR_SLC3_IDLE_RDI_EN 0x00010000U
+#define ROGUE_CR_SLC3_IDLE_IMGBV4_SHIFT 12U
+#define ROGUE_CR_SLC3_IDLE_IMGBV4_CLRMSK 0xFFFF0FFFU
+#define ROGUE_CR_SLC3_IDLE_CACHE_BANKS_SHIFT 4U
+#define ROGUE_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK 0xFFFFF00FU
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT 2U
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK 0xFFFFFFF3U
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT 1U
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SLC3_IDLE_ORDERQ_JONES_EN 0x00000002U
+#define ROGUE_CR_SLC3_IDLE_XBAR_SHIFT 0U
+#define ROGUE_CR_SLC3_IDLE_XBAR_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SLC3_IDLE_XBAR_EN 0x00000001U
+
+/* Register ROGUE_CR_SLC3_FAULT_STOP_STATUS */
+#define ROGUE_CR_SLC3_FAULT_STOP_STATUS 0xE248U
+#define ROGUE_CR_SLC3_FAULT_STOP_STATUS_MASKFULL 0x0000000000001FFFULL
+#define ROGUE_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT 0U
+#define ROGUE_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK 0xFFFFE000U
+
+/* Register ROGUE_CR_VDM_CONTEXT_STORE_MODE */
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE 0xF048U
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MASKFULL 0x0000000000000003ULL
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT 0U
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK 0xFFFFFFFCU
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX 0x00000000U
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE 0x00000001U
+#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST 0x00000002U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING0 */
+#define ROGUE_CR_CONTEXT_MAPPING0 0xF078U
+#define ROGUE_CR_CONTEXT_MAPPING0_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING0_2D_SHIFT 24U
+#define ROGUE_CR_CONTEXT_MAPPING0_2D_CLRMSK 0x00FFFFFFU
+#define ROGUE_CR_CONTEXT_MAPPING0_CDM_SHIFT 16U
+#define ROGUE_CR_CONTEXT_MAPPING0_CDM_CLRMSK 0xFF00FFFFU
+#define ROGUE_CR_CONTEXT_MAPPING0_3D_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING0_3D_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CONTEXT_MAPPING0_TA_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING0_TA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING1 */
+#define ROGUE_CR_CONTEXT_MAPPING1 0xF080U
+#define ROGUE_CR_CONTEXT_MAPPING1_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING1_HOST_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING1_HOST_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CONTEXT_MAPPING1_TLA_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING1_TLA_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING2 */
+#define ROGUE_CR_CONTEXT_MAPPING2 0xF088U
+#define ROGUE_CR_CONTEXT_MAPPING2_MASKFULL 0x0000000000FFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING2_ALIST0_SHIFT 16U
+#define ROGUE_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK 0xFF00FFFFU
+#define ROGUE_CR_CONTEXT_MAPPING2_TE0_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING2_TE0_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CONTEXT_MAPPING2_VCE0_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING2_VCE0_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING3 */
+#define ROGUE_CR_CONTEXT_MAPPING3 0xF090U
+#define ROGUE_CR_CONTEXT_MAPPING3_MASKFULL 0x0000000000FFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING3_ALIST1_SHIFT 16U
+#define ROGUE_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK 0xFF00FFFFU
+#define ROGUE_CR_CONTEXT_MAPPING3_TE1_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING3_TE1_CLRMSK 0xFFFF00FFU
+#define ROGUE_CR_CONTEXT_MAPPING3_VCE1_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING3_VCE1_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_BIF_JONES_OUTSTANDING_READ */
+#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ 0xF098U
+#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT 0U
+#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ */
+#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ 0xF0A0U
+#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT 0U
+#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_BIF_DUST_OUTSTANDING_READ */
+#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ 0xF0A8U
+#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL 0x000000000000FFFFULL
+#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT 0U
+#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK 0xFFFF0000U
+
+/* Register ROGUE_CR_CONTEXT_MAPPING4 */
+#define ROGUE_CR_CONTEXT_MAPPING4 0xF210U
+#define ROGUE_CR_CONTEXT_MAPPING4_MASKFULL 0x0000FFFFFFFFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT 40U
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK 0xFFFF00FFFFFFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT 32U
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK 0xFFFFFF00FFFFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT 24U
+#define ROGUE_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK 0xFFFFFFFF00FFFFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT 16U
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK 0xFFFFFFFFFF00FFFFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT 8U
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK 0xFFFFFFFFFFFF00FFULL
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT 0U
+#define ROGUE_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK 0xFFFFFFFFFFFFFF00ULL
+
+/* Register ROGUE_CR_MULTICORE_GPU */
+#define ROGUE_CR_MULTICORE_GPU 0xF300U
+#define ROGUE_CR_MULTICORE_GPU_MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_SHIFT 6U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN 0x00000040U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_SHIFT 5U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN 0x00000020U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_SHIFT 4U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN 0x00000010U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_SHIFT 3U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN 0x00000008U
+#define ROGUE_CR_MULTICORE_GPU_ID_SHIFT 0U
+#define ROGUE_CR_MULTICORE_GPU_ID_CLRMSK 0xFFFFFFF8U
+
+/* Register ROGUE_CR_MULTICORE_SYSTEM */
+#define ROGUE_CR_MULTICORE_SYSTEM 0xF308U
+#define ROGUE_CR_MULTICORE_SYSTEM_MASKFULL 0x000000000000000FULL
+#define ROGUE_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT 0U
+#define ROGUE_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK 0xFFFFFFF0U
+
+/* Register ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON */
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON 0xF310U
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT 30U
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK 0x3FFFFFFFU
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT 8U
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK 0xC00000FFU
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
+#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON */
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON 0xF320U
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_SHIFT 30U
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK 0x3FFFFFFFU
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT 8U
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK 0xC00000FFU
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
+#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON */
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON 0xF330U
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL 0x00000000FFFFFFFFULL
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT 30U
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK 0x3FFFFFFFU
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT 8U
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK 0xC00000FFU
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT 0U
+#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U
+
+/* Register ROGUE_CR_ECC_RAM_ERR_INJ */
+#define ROGUE_CR_ECC_RAM_ERR_INJ 0xF340U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT 4U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN 0x00000010U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_SHIFT 3U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_EN 0x00000008U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT 2U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN 0x00000004U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT 1U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_EN 0x00000002U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_SHIFT 0U
+#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_EN 0x00000001U
+
+/* Register ROGUE_CR_ECC_RAM_INIT_KICK */
+#define ROGUE_CR_ECC_RAM_INIT_KICK 0xF348U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_SHIFT 4U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_EN 0x00000010U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_USC_SHIFT 3U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_USC_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_USC_EN 0x00000008U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_SHIFT 2U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_EN 0x00000004U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_RASCAL_SHIFT 1U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_RASCAL_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_ECC_RAM_INIT_KICK_RASCAL_EN 0x00000002U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_MARS_SHIFT 0U
+#define ROGUE_CR_ECC_RAM_INIT_KICK_MARS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_ECC_RAM_INIT_KICK_MARS_EN 0x00000001U
+
+/* Register ROGUE_CR_ECC_RAM_INIT_DONE */
+#define ROGUE_CR_ECC_RAM_INIT_DONE 0xF350U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_MASKFULL 0x000000000000001FULL
+#define ROGUE_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_SHIFT 4U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_EN 0x00000010U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_USC_SHIFT 3U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_USC_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_USC_EN 0x00000008U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_SHIFT 2U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_EN 0x00000004U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_RASCAL_SHIFT 1U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_RASCAL_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_ECC_RAM_INIT_DONE_RASCAL_EN 0x00000002U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_MARS_SHIFT 0U
+#define ROGUE_CR_ECC_RAM_INIT_DONE_MARS_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_ECC_RAM_INIT_DONE_MARS_EN 0x00000001U
+
+/* Register ROGUE_CR_SAFETY_EVENT_ENABLE */
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE 0xF390U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT 3U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN 0x00000008U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT 2U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN 0x00000004U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT 1U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN 0x00000002U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_SAFETY_EVENT_STATUS */
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE 0xF398U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_SHIFT 3U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_EN 0x00000008U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT 2U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN 0x00000004U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_SHIFT 1U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_EN 0x00000002U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_SAFETY_EVENT_CLEAR */
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE 0xF3A0U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_SHIFT 3U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_EN 0x00000008U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_SHIFT 2U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_EN 0x00000004U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_SHIFT 1U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_EN 0x00000002U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+
+/* Register ROGUE_CR_MTS_SAFETY_EVENT_ENABLE */
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE 0xF3D8U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT 3U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN 0x00000008U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT 2U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN 0x00000004U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT 1U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN 0x00000002U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU
+#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U
+
+/* clang-format on */
+
+#endif /* __PVR_ROGUE_CR_DEFS_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h
new file mode 100644
index 000000000000..e09232d1d4da
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h
@@ -0,0 +1,160 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_CR_DEFS_CLIENT_H__
+#define __PVR_ROGUE_CR_DEFS_CLIENT_H__
+
+/* clang-format off */
+
+/*
+ * This register controls the anti-aliasing mode of the Tiling Co-Processor, independent control is
+ * provided in both X & Y axis.
+ * This register needs to be set based on the ISP Samples Per Pixel a core supports.
+ *
+ * When ISP Samples Per Pixel = 1:
+ * 2xmsaa is achieved by enabling Y - TE does AA on Y plane only
+ * 4xmsaa is achieved by enabling Y and X - TE does AA on X and Y plane
+ * 8xmsaa not supported by XE cores
+ *
+ * When ISP Samples Per Pixel = 2:
+ * 2xmsaa is achieved by enabling X2 - does not affect TE
+ * 4xmsaa is achieved by enabling Y and X2 - TE does AA on Y plane only
+ * 8xmsaa is achieved by enabling Y, X and X2 - TE does AA on X and Y plane
+ * 8xmsaa not supported by XE cores
+ *
+ * When ISP Samples Per Pixel = 4:
+ * 2xmsaa is achieved by enabling X2 - does not affect TE
+ * 4xmsaa is achieved by enabling Y2 and X2 - TE does AA on Y plane only
+ * 8xmsaa not supported by XE cores
+ */
+/* Register ROGUE_CR_TE_AA */
+#define ROGUE_CR_TE_AA 0x0C00U
+#define ROGUE_CR_TE_AA_MASKFULL 0x000000000000000Full
+/* Y2
+ * Indicates 4xmsaa when X2 and Y2 are set to 1. This does not affect TE and is only used within
+ * TPW.
+ */
+#define ROGUE_CR_TE_AA_Y2_SHIFT 3
+#define ROGUE_CR_TE_AA_Y2_CLRMSK 0xFFFFFFF7
+#define ROGUE_CR_TE_AA_Y2_EN 0x00000008
+/* Y
+ * Anti-Aliasing in Y Plane Enabled
+ */
+#define ROGUE_CR_TE_AA_Y_SHIFT 2
+#define ROGUE_CR_TE_AA_Y_CLRMSK 0xFFFFFFFB
+#define ROGUE_CR_TE_AA_Y_EN 0x00000004
+/* X
+ * Anti-Aliasing in X Plane Enabled
+ */
+#define ROGUE_CR_TE_AA_X_SHIFT 1
+#define ROGUE_CR_TE_AA_X_CLRMSK 0xFFFFFFFD
+#define ROGUE_CR_TE_AA_X_EN 0x00000002
+/* X2
+ * 2x Anti-Aliasing Enabled, affects PPP only
+ */
+#define ROGUE_CR_TE_AA_X2_SHIFT                             (0U)
+#define ROGUE_CR_TE_AA_X2_CLRMSK                            (0xFFFFFFFEU)
+#define ROGUE_CR_TE_AA_X2_EN                                (0x00000001U)
+
+
+/* MacroTile Boundaries X Plane */
+/* Register ROGUE_CR_TE_MTILE1 */
+#define ROGUE_CR_TE_MTILE1 0x0C08
+#define ROGUE_CR_TE_MTILE1_MASKFULL 0x0000000007FFFFFFull
+/* X1 default: 0x00000004
+ * X1 MacroTile boundary, left tile X for second column of macrotiles (16MT mode) - 32 pixels across
+ * tile
+ */
+#define ROGUE_CR_TE_MTILE1_X1_SHIFT 18
+#define ROGUE_CR_TE_MTILE1_X1_CLRMSK 0xF803FFFF
+/* X2 default: 0x00000008
+ * X2 MacroTile boundary, left tile X for third(16MT) column of macrotiles - 32 pixels across tile
+ */
+#define ROGUE_CR_TE_MTILE1_X2_SHIFT 9U
+#define ROGUE_CR_TE_MTILE1_X2_CLRMSK 0xFFFC01FF
+/* X3 default: 0x0000000c
+ * X3 MacroTile boundary, left tile X for fourth column of macrotiles (16MT) - 32 pixels across tile
+ */
+#define ROGUE_CR_TE_MTILE1_X3_SHIFT 0
+#define ROGUE_CR_TE_MTILE1_X3_CLRMSK 0xFFFFFE00
+
+/* MacroTile Boundaries Y Plane. */
+/* Register ROGUE_CR_TE_MTILE2 */
+#define ROGUE_CR_TE_MTILE2 0x0C10
+#define ROGUE_CR_TE_MTILE2_MASKFULL 0x0000000007FFFFFFull
+/* Y1 default: 0x00000004
+ * X1 MacroTile boundary, ltop tile Y for second column of macrotiles (16MT mode) - 32 pixels tile
+ * height
+ */
+#define ROGUE_CR_TE_MTILE2_Y1_SHIFT 18
+#define ROGUE_CR_TE_MTILE2_Y1_CLRMSK 0xF803FFFF
+/* Y2 default: 0x00000008
+ * X2 MacroTile boundary, top tile Y for third(16MT) column of macrotiles - 32 pixels tile height
+ */
+#define ROGUE_CR_TE_MTILE2_Y2_SHIFT 9
+#define ROGUE_CR_TE_MTILE2_Y2_CLRMSK 0xFFFC01FF
+/* Y3 default: 0x0000000c
+ * X3 MacroTile boundary, top tile Y for fourth column of macrotiles (16MT) - 32 pixels tile height
+ */
+#define ROGUE_CR_TE_MTILE2_Y3_SHIFT 0
+#define ROGUE_CR_TE_MTILE2_Y3_CLRMSK 0xFFFFFE00
+
+/*
+ * In order to perform the tiling operation and generate the display list the maximum screen size
+ * must be configured in terms of the number of tiles in X & Y axis.
+ */
+
+/* Register ROGUE_CR_TE_SCREEN */
+#define ROGUE_CR_TE_SCREEN 0x0C18U
+#define ROGUE_CR_TE_SCREEN_MASKFULL 0x00000000001FF1FFull
+/* YMAX default: 0x00000010
+ * Maximum Y tile address visible on screen, 32 pixel tile height, 16Kx16K max screen size
+ */
+#define ROGUE_CR_TE_SCREEN_YMAX_SHIFT 12
+#define ROGUE_CR_TE_SCREEN_YMAX_CLRMSK 0xFFE00FFF
+/* XMAX default: 0x00000010
+ * Maximum X tile address visible on screen, 32 pixel tile width, 16Kx16K max screen size
+ */
+#define ROGUE_CR_TE_SCREEN_XMAX_SHIFT 0
+#define ROGUE_CR_TE_SCREEN_XMAX_CLRMSK 0xFFFFFE00
+
+/*
+ * In order to perform the tiling operation and generate the display list the maximum screen size
+ * must be configured in terms of the number of pixels in X & Y axis since this may not be the same
+ * as the number of tiles defined in the RGX_CR_TE_SCREEN register.
+ */
+/* Register ROGUE_CR_PPP_SCREEN */
+#define ROGUE_CR_PPP_SCREEN 0x0C98
+#define ROGUE_CR_PPP_SCREEN_MASKFULL 0x000000007FFF7FFFull
+/* PIXYMAX
+ * Screen height in pixels. (16K x 16K max screen size)
+ */
+#define ROGUE_CR_PPP_SCREEN_PIXYMAX_SHIFT 16
+#define ROGUE_CR_PPP_SCREEN_PIXYMAX_CLRMSK 0x8000FFFF
+/* PIXXMAX
+ * Screen width in pixels.(16K x 16K max screen size)
+ */
+#define ROGUE_CR_PPP_SCREEN_PIXXMAX_SHIFT 0
+#define ROGUE_CR_PPP_SCREEN_PIXXMAX_CLRMSK 0xFFFF8000
+
+/* Register ROGUE_CR_ISP_MTILE_SIZE */
+#define ROGUE_CR_ISP_MTILE_SIZE 0x0F18
+#define ROGUE_CR_ISP_MTILE_SIZE_MASKFULL 0x0000000003FF03FFull
+/* X
+ * Macrotile width, in tiles. A value of zero corresponds to the maximum size
+ */
+#define ROGUE_CR_ISP_MTILE_SIZE_X_SHIFT 16
+#define ROGUE_CR_ISP_MTILE_SIZE_X_CLRMSK 0xFC00FFFF
+#define ROGUE_CR_ISP_MTILE_SIZE_X_ALIGNSHIFT 0
+#define ROGUE_CR_ISP_MTILE_SIZE_X_ALIGNSIZE 1
+/* Y
+ * Macrotile height, in tiles. A value of zero corresponds to the maximum size
+ */
+#define ROGUE_CR_ISP_MTILE_SIZE_Y_SHIFT 0
+#define ROGUE_CR_ISP_MTILE_SIZE_Y_CLRMSK 0xFFFFFC00
+#define ROGUE_CR_ISP_MTILE_SIZE_Y_ALIGNSHIFT 0
+#define ROGUE_CR_ISP_MTILE_SIZE_Y_ALIGNSIZE 1
+
+/* clang-format on */
+
+#endif /* __PVR_ROGUE_CR_DEFS_CLIENT_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_defs.h
new file mode 100644
index 000000000000..392fc674c5c3
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_defs.h
@@ -0,0 +1,179 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_DEFS_H__
+#define __PVR_ROGUE_DEFS_H__
+
+#include "pvr_rogue_cr_defs.h"
+
+#include <linux/bits.h>
+
+/*
+ ******************************************************************************
+ * ROGUE Defines
+ ******************************************************************************
+ */
+
+#define ROGUE_FW_MAX_NUM_OS (8U)
+#define ROGUE_FW_HOST_OS (0U)
+#define ROGUE_FW_GUEST_OSID_START (1U)
+
+#define ROGUE_FW_THREAD_0 (0U)
+#define ROGUE_FW_THREAD_1 (1U)
+
+#define GET_ROGUE_CACHE_LINE_SIZE(x) ((((s32)(x)) > 0) ? ((x) / 8) : (0))
+
+#define MAX_HW_GEOM_FRAG_CONTEXTS 2U
+
+#define ROGUE_CR_CLK_CTRL_ALL_ON \
+	(0x5555555555555555ull & ROGUE_CR_CLK_CTRL_MASKFULL)
+#define ROGUE_CR_CLK_CTRL_ALL_AUTO \
+	(0xaaaaaaaaaaaaaaaaull & ROGUE_CR_CLK_CTRL_MASKFULL)
+#define ROGUE_CR_CLK_CTRL2_ALL_ON \
+	(0x5555555555555555ull & ROGUE_CR_CLK_CTRL2_MASKFULL)
+#define ROGUE_CR_CLK_CTRL2_ALL_AUTO \
+	(0xaaaaaaaaaaaaaaaaull & ROGUE_CR_CLK_CTRL2_MASKFULL)
+
+#define ROGUE_CR_SOFT_RESET_DUST_n_CORE_EN    \
+	(ROGUE_CR_SOFT_RESET_DUST_A_CORE_EN | \
+	 ROGUE_CR_SOFT_RESET_DUST_B_CORE_EN | \
+	 ROGUE_CR_SOFT_RESET_DUST_C_CORE_EN | \
+	 ROGUE_CR_SOFT_RESET_DUST_D_CORE_EN | \
+	 ROGUE_CR_SOFT_RESET_DUST_E_CORE_EN | \
+	 ROGUE_CR_SOFT_RESET_DUST_F_CORE_EN | \
+	 ROGUE_CR_SOFT_RESET_DUST_G_CORE_EN | \
+	 ROGUE_CR_SOFT_RESET_DUST_H_CORE_EN)
+
+/* SOFT_RESET Rascal and DUSTs bits */
+#define ROGUE_CR_SOFT_RESET_RASCALDUSTS_EN    \
+	(ROGUE_CR_SOFT_RESET_RASCAL_CORE_EN | \
+	 ROGUE_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+/* SOFT_RESET steps as defined in the TRM */
+#define ROGUE_S7_SOFT_RESET_DUSTS (ROGUE_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+#define ROGUE_S7_SOFT_RESET_JONES                                 \
+	(ROGUE_CR_SOFT_RESET_PM_EN | ROGUE_CR_SOFT_RESET_VDM_EN | \
+	 ROGUE_CR_SOFT_RESET_ISP_EN)
+
+#define ROGUE_S7_SOFT_RESET_JONES_ALL                             \
+	(ROGUE_S7_SOFT_RESET_JONES | ROGUE_CR_SOFT_RESET_BIF_EN | \
+	 ROGUE_CR_SOFT_RESET_SLC_EN | ROGUE_CR_SOFT_RESET_GARTEN_EN)
+
+#define ROGUE_S7_SOFT_RESET2                                                  \
+	(ROGUE_CR_SOFT_RESET2_BLACKPEARL_EN | ROGUE_CR_SOFT_RESET2_PIXEL_EN | \
+	 ROGUE_CR_SOFT_RESET2_CDM_EN | ROGUE_CR_SOFT_RESET2_VERTEX_EN)
+
+#define ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12U)
+#define ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE \
+	BIT(ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+
+#define ROGUE_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT (14U)
+#define ROGUE_BIF_PM_VIRTUAL_PAGE_SIZE BIT(ROGUE_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT)
+
+#define ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE (16U)
+
+/*
+ * To get the number of required Dusts, divide the number of
+ * clusters by 2 and round up
+ */
+#define ROGUE_REQ_NUM_DUSTS(CLUSTERS) (((CLUSTERS) + 1U) / 2U)
+
+/*
+ * To get the number of required Bernado/Phantom(s), divide
+ * the number of clusters by 4 and round up
+ */
+#define ROGUE_REQ_NUM_PHANTOMS(CLUSTERS) (((CLUSTERS) + 3U) / 4U)
+#define ROGUE_REQ_NUM_BERNADOS(CLUSTERS) (((CLUSTERS) + 3U) / 4U)
+#define ROGUE_REQ_NUM_BLACKPEARLS(CLUSTERS) (((CLUSTERS) + 3U) / 4U)
+
+/*
+ * FW MMU contexts
+ */
+#define MMU_CONTEXT_MAPPING_FWPRIV (0x0) /* FW code/private data */
+#define MMU_CONTEXT_MAPPING_FWIF (0x0) /* Host/FW data */
+
+/*
+ * Utility macros to calculate CAT_BASE register addresses
+ */
+#define BIF_CAT_BASEX(n)          \
+	(ROGUE_CR_BIF_CAT_BASE0 + \
+	 (n) * (ROGUE_CR_BIF_CAT_BASE1 - ROGUE_CR_BIF_CAT_BASE0))
+
+#define FWCORE_MEM_CAT_BASEX(n)                 \
+	(ROGUE_CR_FWCORE_MEM_CAT_BASE0 +        \
+	 (n) * (ROGUE_CR_FWCORE_MEM_CAT_BASE1 - \
+		ROGUE_CR_FWCORE_MEM_CAT_BASE0))
+
+/*
+ * FWCORE wrapper register defines
+ */
+#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT \
+	ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT
+#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK \
+	ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK
+#define FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT (12U)
+
+#define ROGUE_MAX_COMPUTE_SHARED_REGISTERS (2 * 1024)
+#define ROGUE_MAX_VERTEX_SHARED_REGISTERS 1024
+#define ROGUE_MAX_PIXEL_SHARED_REGISTERS 1024
+#define ROGUE_CSRM_LINE_SIZE_IN_DWORDS (64 * 4 * 4)
+
+#define ROGUE_CDMCTRL_USC_COMMON_SIZE_ALIGNSIZE 64
+#define ROGUE_CDMCTRL_USC_COMMON_SIZE_UPPER 256
+
+/*
+ * The maximum amount of local memory which can be allocated by a single kernel
+ * (in dwords/32-bit registers).
+ *
+ * ROGUE_CDMCTRL_USC_COMMON_SIZE_ALIGNSIZE is in bytes so we divide by four.
+ */
+#define ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS ((ROGUE_CDMCTRL_USC_COMMON_SIZE_ALIGNSIZE * \
+						   ROGUE_CDMCTRL_USC_COMMON_SIZE_UPPER) >> 2)
+
+/*
+ ******************************************************************************
+ * WA HWBRNs
+ ******************************************************************************
+ */
+
+/* GPU CR timer tick in GPU cycles */
+#define ROGUE_CRTIME_TICK_IN_CYCLES (256U)
+
+/* for nohw multicore return max cores possible to client */
+#define ROGUE_MULTICORE_MAX_NOHW_CORES (4U)
+
+/*
+ * If the size of the SLC is less than this value then the TPU bypasses the SLC.
+ */
+#define ROGUE_TPU_CACHED_SLC_SIZE_THRESHOLD (128U * 1024U)
+
+/*
+ * If the size of the SLC is bigger than this value then the TCU must not be
+ * bypassed in the SLC.
+ * In XE_MEMORY_HIERARCHY cores, the TCU is bypassed by default.
+ */
+#define ROGUE_TCU_CACHED_SLC_SIZE_THRESHOLD (32U * 1024U)
+
+/*
+ * Register used by the FW to track the current boot stage (not used in MIPS)
+ */
+#define ROGUE_FW_BOOT_STAGE_REGISTER (ROGUE_CR_POWER_ESTIMATE_RESULT)
+
+/*
+ * Virtualisation definitions
+ */
+#define ROGUE_VIRTUALISATION_REG_SIZE_PER_OS \
+	(ROGUE_CR_MTS_SCHEDULE1 - ROGUE_CR_MTS_SCHEDULE)
+
+/*
+ * Macro used to indicate which version of HWPerf is active
+ */
+#define ROGUE_FEATURE_HWPERF_ROGUE
+
+/*
+ * Maximum number of cores supported by TRP
+ */
+#define ROGUE_TRP_MAX_NUM_CORES (4U)
+
+#endif /* __PVR_ROGUE_DEFS_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif.h
new file mode 100644
index 000000000000..367da927fd26
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif.h
@@ -0,0 +1,2271 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_FWIF_H__
+#define __PVR_ROGUE_FWIF_H__
+
+#include <linux/bits.h>
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "pvr_rogue_defs.h"
+#include "pvr_rogue_fwif_common.h"
+#include "pvr_rogue_fwif_shared.h"
+
+/*
+ ****************************************************************************
+ * Logging type
+ ****************************************************************************
+ */
+#define ROGUE_FWIF_LOG_TYPE_NONE 0x00000000U
+#define ROGUE_FWIF_LOG_TYPE_TRACE 0x00000001U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_MAIN 0x00000002U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_MTS 0x00000004U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_CSW 0x00000010U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_BIF 0x00000020U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_PM 0x00000040U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_RTD 0x00000080U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_SPM 0x00000100U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_POW 0x00000200U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_HWR 0x00000400U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_HWP 0x00000800U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_RPM 0x00001000U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_DMA 0x00002000U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_MISC 0x00004000U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U
+#define ROGUE_FWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU
+#define ROGUE_FWIF_LOG_TYPE_MASK 0x80007FFFU
+
+/* String used in pvrdebug -h output */
+#define ROGUE_FWIF_LOG_GROUPS_STRING_LIST \
+	"main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug"
+
+/* Table entry to map log group strings to log type value */
+struct rogue_fwif_log_group_map_entry {
+	const char *log_group_name;
+	u32 log_group_type;
+};
+
+/* clang-format off */
+/*
+ * Macro for use with the ROGUE_FWIF_LOG_GROUP_MAP_ENTRY type to create a lookup
+ * table where needed. Keep log group names short, no more than 20 chars.
+ */
+#define ROGUE_FWIF_LOG_GROUP_NAME_VALUE_MAP		\
+	{ "none", ROGUE_FWIF_LOG_TYPE_NONE },		\
+	{ "main", ROGUE_FWIF_LOG_TYPE_GROUP_MAIN },	\
+	{ "mts", ROGUE_FWIF_LOG_TYPE_GROUP_MTS },		\
+	{ "cleanup", ROGUE_FWIF_LOG_TYPE_GROUP_CLEANUP },	\
+	{ "csw", ROGUE_FWIF_LOG_TYPE_GROUP_CSW },		\
+	{ "bif", ROGUE_FWIF_LOG_TYPE_GROUP_BIF },		\
+	{ "pm", ROGUE_FWIF_LOG_TYPE_GROUP_PM },		\
+	{ "rtd", ROGUE_FWIF_LOG_TYPE_GROUP_RTD },		\
+	{ "spm", ROGUE_FWIF_LOG_TYPE_GROUP_SPM },		\
+	{ "pow", ROGUE_FWIF_LOG_TYPE_GROUP_POW },		\
+	{ "hwr", ROGUE_FWIF_LOG_TYPE_GROUP_HWR },		\
+	{ "hwp", ROGUE_FWIF_LOG_TYPE_GROUP_HWP },		\
+	{ "rpm", ROGUE_FWIF_LOG_TYPE_GROUP_RPM },		\
+	{ "dma", ROGUE_FWIF_LOG_TYPE_GROUP_DMA },		\
+	{ "misc", ROGUE_FWIF_LOG_TYPE_GROUP_MISC },	\
+	{ "debug", ROGUE_FWIF_LOG_TYPE_GROUP_DEBUG }
+/* clang-format on */
+
+/*
+ * Used in print statements to display log group state, one %s per group defined
+ */
+#define ROGUE_FWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC \
+	"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+
+/* clang-format off */
+/* Used in a print statement to display log group state, one per group */
+#define ROGUE_FWIF_LOG_ENABLED_GROUPS_LIST(types)				  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_MAIN)) ? ("main ") : ("")),	  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_MTS)) ? ("mts ") : ("")),	  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_CLEANUP)) ? ("cleanup ") : ("")), \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_CSW)) ? ("csw ") : ("")),	  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_BIF)) ? ("bif ") : ("")),	  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_PM)) ? ("pm ") : ("")),		  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_RTD)) ? ("rtd ") : ("")),	  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_SPM)) ? ("spm ") : ("")),	  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_POW)) ? ("pow ") : ("")),	  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_HWR)) ? ("hwr ") : ("")),	  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_HWP)) ? ("hwp ") : ("")),	  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_RPM)) ? ("rpm ") : ("")),	  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_DMA)) ? ("dma ") : ("")),	  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_MISC)) ? ("misc ") : ("")),	  \
+	(((types)(&ROGUE_FWIF_LOG_TYPE_GROUP_DEBUG)) ? ("debug ") : (""))
+/* clang-format on */
+
+/*
+ ****************************************************************************
+ * ROGUE FW signature checks
+ ****************************************************************************
+ */
+#define ROGUE_FW_SIG_BUFFER_SIZE_MIN (8192)
+
+#define ROGUE_FWIF_TIMEDIFF_ID ((0x1UL << 28) | ROGUE_CR_TIMER)
+
+/*
+ ****************************************************************************
+ * Trace Buffer
+ ****************************************************************************
+ */
+
+/* Default size of ROGUE_FWIF_TRACEBUF_SPACE in DWords */
+#define ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U
+#define ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE 200U
+#define ROGUE_FW_THREAD_NUM 1U
+#define ROGUE_FW_THREAD_MAX 2U
+
+#define ROGUE_FW_POLL_TYPE_SET 0x80000000U
+
+struct rogue_fwif_file_info_buf {
+	char path[ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE];
+	char info[ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE];
+	u32 line_num;
+	u32 padding;
+} __aligned(8);
+
+struct rogue_fwif_tracebuf_space {
+	u32 trace_pointer;
+
+	u32 trace_buffer_fw_addr;
+
+	/* To be used by host when reading from trace buffer */
+	u32 *trace_buffer;
+
+	struct rogue_fwif_file_info_buf assert_buf;
+} __aligned(8);
+
+/* Total number of FW fault logs stored */
+#define ROGUE_FWIF_FWFAULTINFO_MAX (8U)
+
+struct rogue_fw_fault_info {
+	aligned_u64 cr_timer;
+	aligned_u64 os_timer;
+	u32 data __aligned(8);
+	u32 reserved;
+	struct rogue_fwif_file_info_buf fault_buf;
+} __aligned(8);
+
+/* clang-format off */
+#define ROGUE_FWIF_POW_STATES                                               \
+	X(ROGUE_FWIF_POW_OFF) /* idle and ready to full power down */       \
+	X(ROGUE_FWIF_POW_ON) /* running HW commands */                      \
+	X(ROGUE_FWIF_POW_FORCED_IDLE) /* forced idle */                     \
+	X(ROGUE_FWIF_POW_IDLE) /* idle waiting for host handshake */
+/* clang-format on */
+
+enum rogue_fwif_pow_state {
+#define X(NAME) NAME,
+	ROGUE_FWIF_POW_STATES
+#undef X
+};
+
+/* Firmware HWR states */
+/* The HW state is ok or locked up */
+#define ROGUE_FWIF_HWR_HARDWARE_OK BIT(0)
+/* Tells if a HWR reset is in progress */
+#define ROGUE_FWIF_HWR_RESET_IN_PROGRESS BIT(1)
+/* A DM unrelated lockup has been detected */
+#define ROGUE_FWIF_HWR_GENERAL_LOCKUP BIT(3)
+/* At least one DM is running without being close to a lockup */
+#define ROGUE_FWIF_HWR_DM_RUNNING_OK BIT(4)
+/* At least one DM is close to lockup */
+#define ROGUE_FWIF_HWR_DM_STALLING BIT(5)
+/* The FW has faulted and needs to restart */
+#define ROGUE_FWIF_HWR_FW_FAULT BIT(6)
+/* The FW has requested the host to restart it */
+#define ROGUE_FWIF_HWR_RESTART_REQUESTED BIT(7)
+
+#define ROGUE_FWIF_PHR_STATE_SHIFT (8U)
+/* The FW has requested the host to restart it, per PHR configuration */
+#define ROGUE_FWIF_PHR_RESTART_REQUESTED ((1) << ROGUE_FWIF_PHR_STATE_SHIFT)
+/* A PHR triggered GPU reset has just finished */
+#define ROGUE_FWIF_PHR_RESTART_FINISHED ((2) << ROGUE_FWIF_PHR_STATE_SHIFT)
+#define ROGUE_FWIF_PHR_RESTART_MASK \
+	(ROGUE_FWIF_PHR_RESTART_REQUESTED | ROGUE_FWIF_PHR_RESTART_FINISHED)
+
+#define ROGUE_FWIF_PHR_MODE_OFF (0UL)
+#define ROGUE_FWIF_PHR_MODE_RD_RESET (1UL)
+#define ROGUE_FWIF_PHR_MODE_FULL_RESET (2UL)
+
+/* Firmware per-DM HWR states */
+/* DM is working if all flags are cleared */
+#define ROGUE_FWIF_DM_STATE_WORKING (0)
+/* DM is idle and ready for HWR */
+#define ROGUE_FWIF_DM_STATE_READY_FOR_HWR BIT(0)
+/* DM need to skip to next cmd before resuming processing */
+#define ROGUE_FWIF_DM_STATE_NEEDS_SKIP BIT(2)
+/* DM need partial render cleanup before resuming processing */
+#define ROGUE_FWIF_DM_STATE_NEEDS_PR_CLEANUP BIT(3)
+/* DM need to increment Recovery Count once fully recovered */
+#define ROGUE_FWIF_DM_STATE_NEEDS_TRACE_CLEAR BIT(4)
+/* DM was identified as locking up and causing HWR */
+#define ROGUE_FWIF_DM_STATE_GUILTY_LOCKUP BIT(5)
+/* DM was innocently affected by another lockup which caused HWR */
+#define ROGUE_FWIF_DM_STATE_INNOCENT_LOCKUP BIT(6)
+/* DM was identified as over-running and causing HWR */
+#define ROGUE_FWIF_DM_STATE_GUILTY_OVERRUNING BIT(7)
+/* DM was innocently affected by another DM over-running which caused HWR */
+#define ROGUE_FWIF_DM_STATE_INNOCENT_OVERRUNING BIT(8)
+/* DM was forced into HWR as it delayed more important workloads */
+#define ROGUE_FWIF_DM_STATE_HARD_CONTEXT_SWITCH BIT(9)
+/* DM was forced into HWR due to an uncorrected GPU ECC error */
+#define ROGUE_FWIF_DM_STATE_GPU_ECC_HWR BIT(10)
+
+/* Firmware's connection state */
+enum rogue_fwif_connection_fw_state {
+	/* Firmware is offline */
+	ROGUE_FW_CONNECTION_FW_OFFLINE = 0,
+	/* Firmware is initialised */
+	ROGUE_FW_CONNECTION_FW_READY,
+	/* Firmware connection is fully established */
+	ROGUE_FW_CONNECTION_FW_ACTIVE,
+	/* Firmware is clearing up connection data*/
+	ROGUE_FW_CONNECTION_FW_OFFLOADING,
+	ROGUE_FW_CONNECTION_FW_STATE_COUNT
+};
+
+/* OS' connection state */
+enum rogue_fwif_connection_os_state {
+	/* OS is offline */
+	ROGUE_FW_CONNECTION_OS_OFFLINE = 0,
+	/* OS's KM driver is setup and waiting */
+	ROGUE_FW_CONNECTION_OS_READY,
+	/* OS connection is fully established */
+	ROGUE_FW_CONNECTION_OS_ACTIVE,
+	ROGUE_FW_CONNECTION_OS_STATE_COUNT
+};
+
+struct rogue_fwif_os_runtime_flags {
+	int os_state : 3;
+	int fl_ok : 1;
+	int fl_grow_pending : 1;
+	int isolated_os : 1;
+	int reserved : 26;
+};
+
+#define PVR_SLR_LOG_ENTRIES 10
+/* MAX_CLIENT_CCB_NAME not visible to this header */
+#define PVR_SLR_LOG_STRLEN 30
+
+struct rogue_fwif_slr_entry {
+	aligned_u64 timestamp;
+	u32 fw_ctx_addr;
+	u32 num_ufos;
+	char ccb_name[PVR_SLR_LOG_STRLEN];
+	char padding[2];
+} __aligned(8);
+
+#define MAX_THREAD_NUM 2
+
+/* firmware trace control data */
+struct rogue_fwif_tracebuf {
+	u32 log_type;
+	struct rogue_fwif_tracebuf_space tracebuf[MAX_THREAD_NUM];
+	/*
+	 * Member initialised only when sTraceBuf is actually allocated (in
+	 * ROGUETraceBufferInitOnDemandResources)
+	 */
+	u32 tracebuf_size_in_dwords;
+	/* Compatibility and other flags */
+	u32 tracebuf_flags;
+} __aligned(8);
+
+/* firmware system data shared with the Host driver */
+struct rogue_fwif_sysdata {
+	/* Configuration flags from host */
+	u32 config_flags;
+	/* Extended configuration flags from host */
+	u32 config_flags_ext;
+	volatile enum rogue_fwif_pow_state pow_state;
+	volatile u32 hw_perf_ridx;
+	volatile u32 hw_perf_widx;
+	volatile u32 hw_perf_wrap_count;
+	/* Constant after setup, needed in FW */
+	u32 hw_perf_size;
+	/* The number of times the FW drops a packet due to buffer full */
+	u32 hw_perf_drop_count;
+
+	/*
+	 * ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid
+	 * when FW is built with ROGUE_HWPERF_UTILIZATION &
+	 * ROGUE_HWPERF_DROP_TRACKING defined in rogue_fw_hwperf.c
+	 */
+	/* Buffer utilisation, high watermark of bytes in use */
+	u32 hw_perf_ut;
+	/* The ordinal of the first packet the FW dropped */
+	u32 first_drop_ordinal;
+	/* The ordinal of the last packet the FW dropped */
+	u32 last_drop_ordinal;
+	/* State flags for each Operating System mirrored from Fw coremem */
+	struct rogue_fwif_os_runtime_flags
+		os_runtime_flags_mirror[ROGUE_FW_MAX_NUM_OS];
+
+	struct rogue_fw_fault_info fault_info[ROGUE_FWIF_FWFAULTINFO_MAX];
+	u32 fw_faults;
+	u32 cr_poll_addr[MAX_THREAD_NUM];
+	u32 cr_poll_mask[MAX_THREAD_NUM];
+	u32 cr_poll_count[MAX_THREAD_NUM];
+	aligned_u64 start_idle_time;
+
+#if defined(SUPPORT_ROGUE_FW_STATS_FRAMEWORK)
+#	define ROGUE_FWIF_STATS_FRAMEWORK_LINESIZE (8)
+#	define ROGUE_FWIF_STATS_FRAMEWORK_MAX \
+		(2048 * ROGUE_FWIF_STATS_FRAMEWORK_LINESIZE)
+	u32 fw_stats_buf[ROGUE_FWIF_STATS_FRAMEWORK_MAX] __aligned(8);
+#endif
+	u32 hwr_state_flags;
+	u32 hwr_recovery_flags[PVR_FWIF_DM_MAX];
+	/* Compatibility and other flags */
+	u32 fw_sys_data_flags;
+	/* Identify whether MC config is P-P or P-S */
+	u32 mc_config;
+} __aligned(8);
+
+/* per-os firmware shared data */
+struct rogue_fwif_osdata {
+	/* Configuration flags from an OS */
+	u32 fw_os_config_flags;
+	/* Markers to signal that the host should perform a full sync check */
+	u32 fw_sync_check_mark;
+	u32 host_sync_check_mark;
+
+	u32 forced_updates_requested;
+	u8 slr_log_wp;
+	struct rogue_fwif_slr_entry slr_log_first;
+	struct rogue_fwif_slr_entry slr_log[PVR_SLR_LOG_ENTRIES];
+	aligned_u64 last_forced_update_time;
+
+	/* Interrupt count from Threads > */
+	volatile u32 interrupt_count[MAX_THREAD_NUM];
+	u32 kccb_cmds_executed;
+	u32 power_sync_fw_addr;
+	/* Compatibility and other flags */
+	u32 fw_os_data_flags;
+	u32 padding;
+} __aligned(8);
+
+/* Firmware trace time-stamp field breakup */
+
+/* ROGUE_CR_TIMER register read (48 bits) value*/
+#define ROGUE_FWT_TIMESTAMP_TIME_SHIFT (0U)
+#define ROGUE_FWT_TIMESTAMP_TIME_CLRMSK (0xFFFF000000000000ull)
+
+/* Extra debug-info (16 bits) */
+#define ROGUE_FWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U)
+#define ROGUE_FWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK
+
+/* Debug-info sub-fields */
+/*
+ * Bit 0: ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from ROGUE_CR_EVENT_STATUS
+ * register
+ */
+#define ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U)
+#define ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SET \
+	BIT(ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT)
+
+/* Bit 1: ROGUE_CR_BIF_MMU_ENTRY_PENDING bit from ROGUE_CR_BIF_MMU_ENTRY register */
+#define ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U)
+#define ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET \
+	BIT(ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT)
+
+/* Bit 2: ROGUE_CR_SLAVE_EVENT register is non-zero */
+#define ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U)
+#define ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SET \
+	BIT(ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT)
+
+/* Bit 3-15: Unused bits */
+
+#define ROGUE_FWT_DEBUG_INFO_STR_MAXLEN 64
+#define ROGUE_FWT_DEBUG_INFO_STR_PREPEND " (debug info: "
+#define ROGUE_FWT_DEBUG_INFO_STR_APPEND ")"
+
+/*
+ * Table of debug info sub-field's masks and corresponding message strings
+ * to be appended to firmware trace
+ *
+ * Mask     : 16 bit mask to be applied to debug-info field
+ * String   : debug info message string
+ */
+
+#define ROGUE_FWT_DEBUG_INFO_MSKSTRLIST                              \
+	/*Mask,                                           String*/   \
+	X(ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf")         \
+	X(ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") \
+	X(ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SET, "slave events")
+
+/*
+ ******************************************************************************
+ * HWR Data
+ ******************************************************************************
+ */
+enum rogue_hwrtype {
+	ROGUE_HWRTYPE_UNKNOWNFAILURE = 0,
+	ROGUE_HWRTYPE_OVERRUN = 1,
+	ROGUE_HWRTYPE_POLLFAILURE = 2,
+	ROGUE_HWRTYPE_BIF0FAULT = 3,
+	ROGUE_HWRTYPE_BIF1FAULT = 4,
+	ROGUE_HWRTYPE_TEXASBIF0FAULT = 5,
+	ROGUE_HWRTYPE_MMUFAULT = 6,
+	ROGUE_HWRTYPE_MMUMETAFAULT = 7,
+	ROGUE_HWRTYPE_MIPSTLBFAULT = 8,
+	ROGUE_HWRTYPE_ECCFAULT = 9,
+	ROGUE_HWRTYPE_MMURISCVFAULT = 10,
+};
+
+#define ROGUE_FWIF_HWRTYPE_BIF_BANK_GET(hwr_type) \
+	(((hwr_type) == ROGUE_HWRTYPE_BIF0FAULT) ? 0 : 1)
+
+#define ROGUE_FWIF_HWRTYPE_PAGE_FAULT_GET(hwr_type)       \
+	((((hwr_type) == ROGUE_HWRTYPE_BIF0FAULT) ||      \
+	  ((hwr_type) == ROGUE_HWRTYPE_BIF1FAULT) ||      \
+	  ((hwr_type) == ROGUE_HWRTYPE_TEXASBIF0FAULT) || \
+	  ((hwr_type) == ROGUE_HWRTYPE_MMUFAULT) ||       \
+	  ((hwr_type) == ROGUE_HWRTYPE_MMUMETAFAULT) ||   \
+	  ((hwr_type) == ROGUE_HWRTYPE_MIPSTLBFAULT) ||   \
+	  ((hwr_type) == ROGUE_HWRTYPE_MMURISCVFAULT))    \
+		 ? true                                   \
+		 : false)
+
+struct rogue_bifinfo {
+	aligned_u64 bif_req_status;
+	aligned_u64 bif_mmu_status;
+	aligned_u64 pc_address; /* phys address of the page catalogue */
+	aligned_u64 reserved;
+};
+
+struct rogue_eccinfo {
+	u32 fault_gpu;
+};
+
+struct rogue_mmuinfo {
+	aligned_u64 mmu_status[2];
+	aligned_u64 pc_address; /* phys address of the page catalogue */
+	aligned_u64 reserved;
+};
+
+struct rogue_pollinfo {
+	u32 thread_num;
+	u32 cr_poll_addr;
+	u32 cr_poll_mask;
+	u32 cr_poll_last_value;
+	aligned_u64 reserved;
+} __aligned(8);
+
+struct rogue_tlbinfo {
+	u32 bad_addr;
+	u32 entry_lo;
+};
+
+struct rogue_hwrinfo {
+	union {
+		struct rogue_bifinfo bif_info;
+		struct rogue_mmuinfo mmu_info;
+		struct rogue_pollinfo poll_info;
+		struct rogue_tlbinfo tlb_info;
+		struct rogue_eccinfo ecc_info;
+	} hwr_data;
+
+	aligned_u64 cr_timer;
+	aligned_u64 os_timer;
+	u32 frame_num;
+	u32 pid;
+	u32 active_hwrt_data;
+	u32 hwr_number;
+	u32 event_status;
+	u32 hwr_recovery_flags;
+	enum rogue_hwrtype hwr_type;
+	u32 dm;
+	u32 core_id;
+	aligned_u64 cr_time_of_kick;
+	aligned_u64 cr_time_hw_reset_start;
+	aligned_u64 cr_time_hw_reset_finish;
+	aligned_u64 cr_time_freelist_ready;
+	aligned_u64 reserved[2];
+} __aligned(8);
+
+/* Number of first HWR logs recorded (never overwritten by newer logs) */
+#define ROGUE_FWIF_HWINFO_MAX_FIRST 8U
+/* Number of latest HWR logs (older logs are overwritten by newer logs) */
+#define ROGUE_FWIF_HWINFO_MAX_LAST 8U
+/* Total number of HWR logs stored in a buffer */
+#define ROGUE_FWIF_HWINFO_MAX \
+	(ROGUE_FWIF_HWINFO_MAX_FIRST + ROGUE_FWIF_HWINFO_MAX_LAST)
+/* Index of the last log in the HWR log buffer */
+#define ROGUE_FWIF_HWINFO_LAST_INDEX (ROGUE_FWIF_HWINFO_MAX - 1U)
+
+struct rogue_fwif_hwrinfobuf {
+	struct rogue_hwrinfo hwr_info[ROGUE_FWIF_HWINFO_MAX];
+	u32 hwr_counter;
+	u32 write_index;
+	u32 dd_req_count;
+	u32 hwr_info_buf_flags; /* Compatibility and other flags */
+	u32 hwr_dm_locked_up_count[PVR_FWIF_DM_MAX];
+	u32 hwr_dm_overran_count[PVR_FWIF_DM_MAX];
+	u32 hwr_dm_recovered_count[PVR_FWIF_DM_MAX];
+	u32 hwr_dm_false_detect_count[PVR_FWIF_DM_MAX];
+} __aligned(8);
+
+#define ROGUE_FWIF_CTXSWITCH_PROFILE_FAST_EN (1)
+#define ROGUE_FWIF_CTXSWITCH_PROFILE_MEDIUM_EN (2)
+#define ROGUE_FWIF_CTXSWITCH_PROFILE_SLOW_EN (3)
+#define ROGUE_FWIF_CTXSWITCH_PROFILE_NODELAY_EN (4)
+
+#define ROGUE_FWIF_CDM_ARBITRATION_TASK_DEMAND_EN (1)
+#define ROGUE_FWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (2)
+
+#define ROGUE_FWIF_ISP_SCHEDMODE_VER1_IPP (1)
+#define ROGUE_FWIF_ISP_SCHEDMODE_VER2_ISP (2)
+/*
+ ******************************************************************************
+ * ROGUE firmware Init Config Data
+ ******************************************************************************
+ */
+
+/* Flag definitions affecting the firmware globally */
+#define ROGUE_FWIF_INICFG_CTXSWITCH_MODE_RAND BIT(0)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_SRESET_EN BIT(1)
+#define ROGUE_FWIF_INICFG_HWPERF_EN BIT(2)
+#define ROGUE_FWIF_INICFG_DM_KILL_MODE_RAND_EN BIT(3)
+#define ROGUE_FWIF_INICFG_POW_RASCALDUST BIT(4)
+/* Bit 5 is reserved. */
+#define ROGUE_FWIF_INICFG_FBCDC_V3_1_EN BIT(6)
+#define ROGUE_FWIF_INICFG_CHECK_MLIST_EN BIT(7)
+#define ROGUE_FWIF_INICFG_DISABLE_CLKGATING_EN BIT(8)
+/* Bit 9 is reserved. */
+/* Bit 10 is reserved. */
+/* Bit 11 is reserved. */
+#define ROGUE_FWIF_INICFG_REGCONFIG_EN BIT(12)
+#define ROGUE_FWIF_INICFG_ASSERT_ON_OUTOFMEMORY BIT(13)
+#define ROGUE_FWIF_INICFG_HWP_DISABLE_FILTER BIT(14)
+/* Bit 15 is reserved. */
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_FAST \
+	(ROGUE_FWIF_CTXSWITCH_PROFILE_FAST_EN    \
+	 << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM \
+	(ROGUE_FWIF_CTXSWITCH_PROFILE_MEDIUM_EN    \
+	 << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SLOW \
+	(ROGUE_FWIF_CTXSWITCH_PROFILE_SLOW_EN    \
+	 << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_NODELAY \
+	(ROGUE_FWIF_CTXSWITCH_PROFILE_NODELAY_EN    \
+	 << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_MASK \
+	(7 << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define ROGUE_FWIF_INICFG_DISABLE_DM_OVERLAP BIT(19)
+#define ROGUE_FWIF_INICFG_ASSERT_ON_HWR_TRIGGER BIT(20)
+#define ROGUE_FWIF_INICFG_FABRIC_COHERENCY_ENABLED BIT(21)
+#define ROGUE_FWIF_INICFG_VALIDATE_IRQ BIT(22)
+#define ROGUE_FWIF_INICFG_DISABLE_PDP_EN BIT(23)
+#define ROGUE_FWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN BIT(24)
+#define ROGUE_FWIF_INICFG_WORKEST BIT(25)
+#define ROGUE_FWIF_INICFG_PDVFS BIT(26)
+#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT (27)
+#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND \
+	(ROGUE_FWIF_CDM_ARBITRATION_TASK_DEMAND_EN    \
+	 << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN \
+	(ROGUE_FWIF_CDM_ARBITRATION_ROUND_ROBIN_EN    \
+	 << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_MASK \
+	(3 << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT (29)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_NONE (0)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER1_IPP \
+	(ROGUE_FWIF_ISP_SCHEDMODE_VER1_IPP      \
+	 << ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER2_ISP \
+	(ROGUE_FWIF_ISP_SCHEDMODE_VER2_ISP      \
+	 << ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT)
+#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_MASK        \
+	(ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER1_IPP | \
+	 ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER2_ISP)
+#define ROGUE_FWIF_INICFG_VALIDATE_SOCUSC_TIMER BIT(31)
+
+#define ROGUE_FWIF_INICFG_ALL (0xFFFFFFFFU)
+
+/* Extended Flag definitions affecting the firmware globally */
+#define ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_SHIFT (0)
+/* [7]   YUV10 override
+ * [6:4] Quality
+ * [3]   Quality enable
+ * [2:1] Compression scheme
+ * [0]   Lossy group */
+#define ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_MASK (0xFF)
+#define ROGUE_FWIF_INICFG_EXT_ALL (ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_MASK)
+
+/* Flag definitions affecting only workloads submitted by a particular OS */
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_TDM_EN BIT(0)
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_GEOM_EN BIT(1)
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_FRAG_EN BIT(2)
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_CDM_EN BIT(3)
+
+#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_TDM BIT(4)
+#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_GEOM BIT(5)
+#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_FRAG BIT(6)
+#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_CDM BIT(7)
+
+#define ROGUE_FWIF_INICFG_OS_ALL (0xFF)
+
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_DM_ALL     \
+	(ROGUE_FWIF_INICFG_OS_CTXSWITCH_TDM_EN |  \
+	 ROGUE_FWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \
+	 ROGUE_FWIF_INICFG_OS_CTXSWITCH_FRAG_EN |   \
+	 ROGUE_FWIF_INICFG_OS_CTXSWITCH_CDM_EN)
+
+#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_CLRMSK \
+	~(ROGUE_FWIF_INICFG_OS_CTXSWITCH_DM_ALL)
+
+#define ROGUE_FWIF_FILTCFG_TRUNCATE_HALF BIT(3)
+#define ROGUE_FWIF_FILTCFG_TRUNCATE_INT BIT(2)
+#define ROGUE_FWIF_FILTCFG_NEW_FILTER_MODE BIT(1)
+
+enum rogue_activepm_conf {
+	ROGUE_ACTIVEPM_FORCE_OFF = 0,
+	ROGUE_ACTIVEPM_FORCE_ON = 1,
+	ROGUE_ACTIVEPM_DEFAULT = 2
+};
+
+enum rogue_rd_power_island_conf {
+	ROGUE_RD_POWER_ISLAND_FORCE_OFF = 0,
+	ROGUE_RD_POWER_ISLAND_FORCE_ON = 1,
+	ROGUE_RD_POWER_ISLAND_DEFAULT = 2
+};
+
+#if defined(ROGUE_FW_IRQ_OS_COUNTERS)
+/* clang-format off */
+/*
+ * Unused registers re-purposed for storing counters of the Firmware's
+ * interrupts for each OS
+ */
+#	define IRQ_COUNTER_STORAGE_REGS				\
+		0x2028U, /* ROGUE_CR_PM_TA_MMU_FSTACK         */	\
+		0x2050U, /* ROGUE_CR_PM_3D_MMU_FSTACK         */	\
+		0x2030U, /* ROGUE_CR_PM_START_OF_MMU_TACONTEXT*/	\
+		0x2058U, /* ROGUE_CR_PM_START_OF_MMU_3DCONTEXT*/	\
+		0x2058U, /* ROGUE_CR_PM_START_OF_MMU_3DCONTEXT*/	\
+		0x2058U, /* ROGUE_CR_PM_START_OF_MMU_3DCONTEXT*/	\
+		0x2058U, /* ROGUE_CR_PM_START_OF_MMU_3DCONTEXT*/	\
+		0x2058U, /* ROGUE_CR_PM_START_OF_MMU_3DCONTEXT*/
+/* clang-format on */
+#endif
+
+struct rogue_fw_register_list {
+	/* Register number */
+	u16 reg_num;
+	/* Indirect register number (or 0 if not used) */
+	u16 indirect_reg_num;
+	/* Start value for indirect register */
+	u16 indirect_start_val;
+	/* End value for indirect register */
+	u16 indirect_end_val;
+};
+
+struct rogue_fwif_dllist_node {
+	u32 p;
+	u32 n;
+};
+
+/*
+ * This number is used to represent an invalid page catalogue physical address
+ */
+#define ROGUE_FWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU
+
+/* This number is used to represent unallocated page catalog base register */
+#define ROGUE_FW_BIF_INVALID_PCSET 0xFFFFFFFFU
+
+/* Firmware memory context. */
+struct rogue_fwif_fwmemcontext {
+	/* device physical address of context's page catalogue */
+	aligned_u64 pc_dev_paddr;
+	/*
+	 * associated page catalog base register (ROGUE_FW_BIF_INVALID_PCSET ==
+	 * unallocated)
+	 */
+	u32 page_cat_base_reg_set;
+	/* breakpoint address */
+	u32 breakpoint_addr;
+	/* breakpoint handler address */
+	u32 bp_handler_addr;
+	/* DM and enable control for BP */
+	u32 breakpoint_ctl;
+	/* Compatibility and other flags */
+	u32 fw_mem_ctx_flags;
+	u32 padding;
+} __aligned(8);
+
+/*
+ * FW context state flags
+ */
+#define ROGUE_FWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U)
+#define ROGUE_FWIF_CONTEXT_FLAGS_MC_NEED_RESUME_MASKFULL (0x000000FFU)
+#define ROGUE_FWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000100U)
+#define ROGUE_FWIF_CONTEXT_FLAGS_LAST_KICK_SECURE (0x00000200U)
+
+#define ROGUE_NUM_GEOM_CORES_MAX 4
+
+/*
+ * FW-accessible TA state which must be written out to memory on context store
+ */
+struct rogue_fwif_geom_ctx_state_per_geom {
+	/* To store in mid-TA */
+	aligned_u64 geom_reg_vdm_call_stack_pointer;
+	/* Initial value (in case is 'lost' due to a lock-up */
+	aligned_u64 geom_reg_vdm_call_stack_pointer_init;
+	u32 geom_reg_vbs_so_prim[4];
+	u16 geom_current_idx;
+	u16 padding[3];
+} __aligned(8);
+
+struct rogue_fwif_geom_ctx_state {
+	/* FW-accessible TA state which must be written out to memory on context store */
+	struct rogue_fwif_geom_ctx_state_per_geom geom_core[ROGUE_NUM_GEOM_CORES_MAX];
+} __aligned(8);
+
+/*
+ * FW-accessible ISP state which must be written out to memory on context store
+ */
+struct rogue_fwif_frag_ctx_state {
+	u32 frag_reg_pm_deallocated_mask_status;
+	u32 frag_reg_dm_pds_mtilefree_status;
+	/* Compatibility and other flags */
+	u32 ctx_state_flags;
+	/*
+	 * frag_reg_isp_store should be the last element of the structure as this
+	 * is an array whose size is determined at runtime after detecting the
+	 * ROGUE core
+	 */
+	u32 frag_reg_isp_store[];
+} __aligned(8);
+
+#define ROGUE_FWIF_CTX_USING_BUFFER_A (0)
+#define ROGUE_FWIF_CTX_USING_BUFFER_B (1U)
+
+struct rogue_fwif_compute_ctx_state {
+	u32 ctx_state_flags; /* Target buffer and other flags */
+};
+
+struct rogue_fwif_fwcommoncontext {
+	/* CCB details for this firmware context */
+	u32 ccbctl_fw_addr; /* CCB control */
+	u32 ccb_fw_addr; /* CCB base */
+	struct rogue_fwif_dma_addr ccb_meta_dma_addr;
+
+	/* Context suspend state */
+	/* geom/frag context suspend state, read/written by FW */
+	u32 context_state_addr __aligned(8);
+
+	/* Flags e.g. for context switching */
+	u32 fw_com_ctx_flags;
+	u32 priority;
+	u32 priority_seq_num;
+
+	/* Framework state */
+	/* Register updates for Framework */
+	u32 rf_cmd_addr __aligned(8);
+
+	/* Statistic updates waiting to be passed back to the host... */
+	/* True when some stats are pending */
+	bool stats_pending __aligned(4);
+	/* Number of stores on this context since last update */
+	s32 stats_num_stores;
+	/* Number of OOMs on this context since last update */
+	s32 stats_num_out_of_memory;
+	/* Number of PRs on this context since last update */
+	s32 stats_num_partial_renders;
+	/* Data Master type */
+	u32 dm;
+	/* Device Virtual Address of the signal the context is waiting on */
+	aligned_u64 wait_signal_address;
+	/* List entry for the wait-signal list */
+	struct rogue_fwif_dllist_node wait_signal_node __aligned(8);
+	/* List entry for the buffer stalled list */
+	struct rogue_fwif_dllist_node buf_stalled_node __aligned(8);
+	/* Address of the circular buffer queue pointers */
+	aligned_u64 cbuf_queue_ctrl_addr;
+
+	aligned_u64 robustness_address;
+	/* Max HWR deadline limit in ms */
+	u32 max_deadline_ms;
+	/* Following HWR circular buffer read-offset needs resetting */
+	bool read_offset_needs_reset;
+
+	/* List entry for the waiting list */
+	struct rogue_fwif_dllist_node waiting_node __aligned(8);
+	/* List entry for the run list */
+	struct rogue_fwif_dllist_node run_node __aligned(8);
+	/* UFO that last failed (or NULL) */
+	struct rogue_fwif_ufo last_failed_ufo;
+
+	/* Memory context */
+	u32 fw_mem_context_fw_addr;
+
+	/* References to the host side originators */
+	/* the Server Common Context */
+	u32 server_common_context_id;
+	/* associated process ID */
+	u32 pid;
+
+	/* True when Geom DM OOM is not allowed */
+	bool geom_oom_disabled __aligned(4);
+} __aligned(8);
+
+/* Firmware render context. */
+struct rogue_fwif_fwrendercontext {
+	/* Geometry firmware context. */
+	struct rogue_fwif_fwcommoncontext geom_context;
+	/* Fragment firmware context. */
+	struct rogue_fwif_fwcommoncontext frag_context;
+
+	struct rogue_fwif_static_rendercontext_state static_render_context_state;
+
+	/* Number of commands submitted to the WorkEst FW CCB */
+	u32 work_est_ccb_submitted;
+
+	/* Compatibility and other flags */
+	u32 fw_render_ctx_flags;
+} __aligned(8);
+
+/* Firmware compute context. */
+struct rogue_fwif_fwcomputecontext {
+	/* Firmware context for the CDM */
+	struct rogue_fwif_fwcommoncontext cdm_context;
+
+	struct rogue_fwif_static_computecontext_state
+		static_compute_context_state;
+
+	/* Number of commands submitted to the WorkEst FW CCB */
+	u32 work_est_ccb_submitted;
+
+	/* Compatibility and other flags */
+	u32 compute_ctx_flags;
+
+	u32 wgp_state;
+	u32 wgp_checksum;
+	u32 core_mask_a;
+	u32 core_mask_b;
+} __aligned(8);
+
+/* Firmware TDM context. */
+struct rogue_fwif_fwtdmcontext {
+	/* Firmware context for the TDM */
+	struct rogue_fwif_fwcommoncontext tdm_context;
+
+	/* Number of commands submitted to the WorkEst FW CCB */
+	u32 work_est_ccb_submitted;
+} __aligned(8);
+
+/* Firmware TQ3D context. */
+struct rogue_fwif_fwtransfercontext {
+	/* Firmware context for TQ3D. */
+	struct rogue_fwif_fwcommoncontext tq_context;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Defines for CMD_TYPE corruption detection and forward compatibility check
+ ******************************************************************************
+ */
+
+/*
+ * CMD_TYPE 32bit contains:
+ * 31:16	Reserved for magic value to detect corruption (16 bits)
+ * 15		Reserved for ROGUE_CCB_TYPE_TASK (1 bit)
+ * 14:0		Bits available for CMD_TYPEs (15 bits)
+ */
+
+/* Magic value to detect corruption */
+#define ROGUE_CMD_MAGIC_DWORD (0x2ABC)
+#define ROGUE_CMD_MAGIC_DWORD_MASK (0xFFFF0000U)
+#define ROGUE_CMD_MAGIC_DWORD_SHIFT (16U)
+#define ROGUE_CMD_MAGIC_DWORD_SHIFTED \
+	(ROGUE_CMD_MAGIC_DWORD << ROGUE_CMD_MAGIC_DWORD_SHIFT)
+
+/* Kernel CCB control for ROGUE */
+struct rogue_fwif_ccb_ctl {
+	/* write offset into array of commands (MUST be aligned to 16 bytes!) */
+	volatile u32 write_offset;
+	/* read offset into array of commands */
+	volatile u32 read_offset;
+	/* Offset wrapping mask (Total capacity of the CCB - 1) */
+	u32 wrap_mask;
+	/* size of each command in bytes */
+	u32 cmd_size;
+} __aligned(8);
+
+/* Kernel CCB command structure for ROGUE */
+
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PT (0x1U) /* MMU_CTRL_INVAL_PT_EN */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */
+
+/*
+ * can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT
+ * bit from BIF_CTRL reg
+ */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10)
+/* BIF_CTRL_INVAL_TLB1_EN */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_TLB \
+	(ROGUE_FWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8)
+/* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800)
+
+/* indicates FW should interrupt the host */
+#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U)
+
+struct rogue_fwif_mmucachedata {
+	u32 cache_flags;
+	u32 mmu_cache_sync_fw_addr;
+	u32 mmu_cache_sync_update_value;
+};
+
+#define ROGUE_FWIF_BPDATA_FLAGS_ENABLE BIT(0)
+#define ROGUE_FWIF_BPDATA_FLAGS_WRITE BIT(1)
+#define ROGUE_FWIF_BPDATA_FLAGS_CTL BIT(2)
+#define ROGUE_FWIF_BPDATA_FLAGS_REGS BIT(3)
+
+struct rogue_fwif_bpdata {
+	/* Memory context */
+	u32 fw_mem_context_fw_addr;
+	/* Breakpoint address */
+	u32 bp_addr;
+	/* Breakpoint handler */
+	u32 bp_handler_addr;
+	/* Breakpoint control */
+	u32 bp_dm;
+	u32 bp_data_flags;
+	/* Number of temporary registers to overallocate */
+	u32 temp_regs;
+	/* Number of shared registers to overallocate */
+	u32 shared_regs;
+	/* DM associated with the breakpoint */
+	u32 dm;
+};
+
+#define ROGUE_FWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS \
+	(ROGUE_FWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */
+
+struct rogue_fwif_kccb_cmd_kick_data {
+	/* address of the firmware context */
+	u32 context_fw_addr;
+	/* Client CCB woff update */
+	u32 client_woff_update;
+	/* Client CCB wrap mask update after CCCB growth */
+	u32 client_wrap_mask_update;
+	/* number of CleanupCtl pointers attached */
+	u32 num_cleanup_ctl;
+	/* CleanupCtl structures associated with command */
+	u32 cleanup_ctl_fw_addr
+		[ROGUE_FWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS];
+	/*
+	 * offset to the CmdHeader which houses the workload estimation kick
+	 * data.
+	 */
+	u32 work_est_cmd_header_offset;
+};
+
+struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data {
+	struct rogue_fwif_kccb_cmd_kick_data geom_cmd_kick_data;
+	struct rogue_fwif_kccb_cmd_kick_data frag_cmd_kick_data;
+};
+
+struct rogue_fwif_kccb_cmd_force_update_data {
+	/* address of the firmware context */
+	u32 context_fw_addr;
+	/* Client CCB fence offset */
+	u32 ccb_fence_offset;
+};
+
+enum rogue_fwif_cleanup_type {
+	/* FW common context cleanup */
+	ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT,
+	/* FW HW RT data cleanup */
+	ROGUE_FWIF_CLEANUP_HWRTDATA,
+	/* FW freelist cleanup */
+	ROGUE_FWIF_CLEANUP_FREELIST,
+	/* FW ZS Buffer cleanup */
+	ROGUE_FWIF_CLEANUP_ZSBUFFER,
+};
+
+struct rogue_fwif_cleanup_request {
+	/* Cleanup type */
+	enum rogue_fwif_cleanup_type cleanup_type;
+	union {
+		/* FW common context to cleanup */
+		u32 context_fw_addr;
+		/* HW RT to cleanup */
+		u32 hwrt_data_fw_addr;
+		/* Freelist to cleanup */
+		u32 freelist_fw_addr;
+		/* ZS Buffer to cleanup */
+		u32 zs_buffer_fw_addr;
+	} cleanup_data;
+};
+
+enum rogue_fwif_power_type {
+	ROGUE_FWIF_POW_OFF_REQ = 1,
+	ROGUE_FWIF_POW_FORCED_IDLE_REQ,
+	ROGUE_FWIF_POW_NUM_UNITS_CHANGE,
+	ROGUE_FWIF_POW_APM_LATENCY_CHANGE
+};
+
+enum rogue_fwif_power_force_idle_type {
+	ROGUE_FWIF_POWER_FORCE_IDLE = 1,
+	ROGUE_FWIF_POWER_CANCEL_FORCED_IDLE,
+	ROGUE_FWIF_POWER_HOST_TIMEOUT,
+};
+
+struct rogue_fwif_power_request {
+	/* Type of power request */
+	enum rogue_fwif_power_type pow_type;
+	union {
+		/* Number of active Dusts */
+		u32 num_of_dusts;
+		/* If the operation is mandatory */
+		bool forced __aligned(4);
+		/*
+		 * Type of Request. Consolidating Force Idle, Cancel Forced
+		 * Idle, Host Timeout
+		 */
+		enum rogue_fwif_power_force_idle_type pow_request_type;
+	} power_req_data;
+};
+
+struct rogue_fwif_slcflushinvaldata {
+	/* Context to fence on (only useful when bDMContext == TRUE) */
+	u32 context_fw_addr;
+	/* Invalidate the cache as well as flushing */
+	bool inval __aligned(4);
+	/* The data to flush/invalidate belongs to a specific DM context */
+	bool dm_context __aligned(4);
+	/* Optional address of range (only useful when bDMContext == FALSE) */
+	aligned_u64 address;
+	/* Optional size of range (only useful when bDMContext == FALSE) */
+	aligned_u64 size;
+};
+
+enum rogue_fwif_hwperf_update_config {
+	ROGUE_FWIF_HWPERF_CTRL_TOGGLE = 0,
+	ROGUE_FWIF_HWPERF_CTRL_SET = 1,
+	ROGUE_FWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2
+};
+
+struct rogue_fwif_hwperf_ctrl {
+	enum rogue_fwif_hwperf_update_config opcode; /* Control operation code */
+	aligned_u64 mask; /* Mask of events to toggle */
+};
+
+struct rogue_fwif_hwperf_config_enable_blks {
+	/* Number of ROGUE_HWPERF_CONFIG_MUX_CNTBLK in the array */
+	u32 num_blocks;
+	/* Address of the ROGUE_HWPERF_CONFIG_MUX_CNTBLK array */
+	u32 block_configs_fw_addr;
+};
+
+struct rogue_fwif_hwperf_config_da_blks {
+	/* Number of ROGUE_HWPERF_CONFIG_CNTBLK in the array */
+	u32 num_blocks;
+	/* Address of the ROGUE_HWPERF_CONFIG_CNTBLK array */
+	u32 block_configs_fw_addr;
+};
+
+struct rogue_fwif_coreclkspeedchange_data {
+	u32 new_clock_speed; /* New clock speed */
+};
+
+#define ROGUE_FWIF_HWPERF_CTRL_BLKS_MAX 16
+
+struct rogue_fwif_hwperf_ctrl_blks {
+	bool enable;
+	/* Number of block IDs in the array */
+	u32 num_blocks;
+	/* Array of ROGUE_HWPERF_CNTBLK_ID values */
+	u16 block_ids[ROGUE_FWIF_HWPERF_CTRL_BLKS_MAX];
+};
+
+struct rogue_fwif_hwperf_select_custom_cntrs {
+	u16 custom_block;
+	u16 num_counters;
+	u32 custom_counter_ids_fw_addr;
+};
+
+struct rogue_fwif_zsbuffer_backing_data {
+	u32 zs_buffer_fw_addr; /* ZS-Buffer FW address */
+	bool done __aligned(4); /* action backing/unbacking succeeded */
+};
+
+struct rogue_fwif_freelist_gs_data {
+	/* Freelist FW address */
+	u32 freelist_fw_addr;
+	/* Amount of the Freelist change */
+	u32 delta_pages;
+	/* New amount of pages on the freelist (including ready pages) */
+	u32 new_pages;
+	/* Number of ready pages to be held in reserve until OOM */
+	u32 ready_pages;
+};
+
+#define MAX_FREELISTS_SIZE 3
+#define MAX_HW_GEOM_FRAG_CONTEXTS_SIZE 3
+
+#define ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT \
+	(MAX_HW_GEOM_FRAG_CONTEXTS_SIZE * MAX_FREELISTS_SIZE * 2U)
+#define ROGUE_FWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U
+
+struct rogue_fwif_freelists_reconstruction_data {
+	u32 freelist_count;
+	u32 freelist_ids[ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT];
+};
+
+struct rogue_fwif_write_offset_update_data {
+	/*
+	 * Context to that may need to be resumed following write offset update
+	 */
+	u32 context_fw_addr;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Proactive DVFS Structures
+ ******************************************************************************
+ */
+#define NUM_OPP_VALUES 16
+
+struct pdvfs_opp {
+	u32 volt; /* V  */
+	u32 freq; /* Hz */
+} __aligned(8);
+
+struct rogue_fwif_pdvfs_opp {
+	struct pdvfs_opp opp_values[NUM_OPP_VALUES];
+	u32 min_opp_point;
+	u32 max_opp_point;
+} __aligned(8);
+
+struct rogue_fwif_pdvfs_max_freq_data {
+	u32 max_opp_point;
+} __aligned(8);
+
+struct rogue_fwif_pdvfs_min_freq_data {
+	u32 min_opp_point;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Register configuration structures
+ ******************************************************************************
+ */
+
+#define ROGUE_FWIF_REG_CFG_MAX_SIZE 512
+
+enum rogue_fwif_regdata_cmd_type {
+	ROGUE_FWIF_REGCFG_CMD_ADD = 101,
+	ROGUE_FWIF_REGCFG_CMD_CLEAR = 102,
+	ROGUE_FWIF_REGCFG_CMD_ENABLE = 103,
+	ROGUE_FWIF_REGCFG_CMD_DISABLE = 104
+};
+
+enum rogue_fwif_reg_cfg_type {
+	/* Sidekick power event */
+	ROGUE_FWIF_REG_CFG_TYPE_PWR_ON = 0,
+	/* Rascal / dust power event */
+	ROGUE_FWIF_REG_CFG_TYPE_DUST_CHANGE,
+	/* Geometry kick */
+	ROGUE_FWIF_REG_CFG_TYPE_GEOM,
+	/* Fragment kick */
+	ROGUE_FWIF_REG_CFG_TYPE_FRAG,
+	/* Compute kick */
+	ROGUE_FWIF_REG_CFG_TYPE_CDM,
+	/* TLA kick */
+	ROGUE_FWIF_REG_CFG_TYPE_TLA,
+	/* TDM kick */
+	ROGUE_FWIF_REG_CFG_TYPE_TDM,
+	/* Applies to all types. Keep as last element */
+	ROGUE_FWIF_REG_CFG_TYPE_ALL
+};
+
+struct rogue_fwif_reg_cfg_rec {
+	u64 sddr;
+	u64 mask;
+	u64 value;
+};
+
+struct rogue_fwif_regconfig_data {
+	enum rogue_fwif_regdata_cmd_type cmd_type;
+	enum rogue_fwif_reg_cfg_type reg_config_type;
+	struct rogue_fwif_reg_cfg_rec reg_config __aligned(8);
+};
+
+struct rogue_fwif_reg_cfg {
+	/*
+	 * PDump WRW command write granularity is 32 bits.
+	 * Add padding to ensure array size is 32 bit granular.
+	 */
+	u8 num_regs_type[ALIGN((u32)ROGUE_FWIF_REG_CFG_TYPE_ALL,
+			       sizeof(u32))] __aligned(8);
+	struct rogue_fwif_reg_cfg_rec
+		reg_configs[ROGUE_FWIF_REG_CFG_MAX_SIZE] __aligned(8);
+} __aligned(8);
+
+/* clang-format off */
+enum rogue_fwif_os_state_change {
+	ROGUE_FWIF_OS_ONLINE = 1,
+	ROGUE_FWIF_OS_OFFLINE
+};
+/* clang-format on */
+
+struct rogue_fwif_os_state_change_data {
+	u32 osid;
+	enum rogue_fwif_os_state_change new_os_state;
+} __aligned(8);
+
+enum rogue_fwif_counter_dump_request {
+	ROGUE_FWIF_PWR_COUNTER_DUMP_START = 1,
+	ROGUE_FWIF_PWR_COUNTER_DUMP_STOP,
+	ROGUE_FWIF_PWR_COUNTER_DUMP_SAMPLE,
+};
+
+struct rogue_fwif_counter_dump_data {
+	enum rogue_fwif_counter_dump_request counter_dump_request;
+} __aligned(8);
+
+enum rogue_fwif_kccb_cmd_type {
+	/* Common commands */
+	ROGUE_FWIF_KCCB_CMD_KICK = 101U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	ROGUE_FWIF_KCCB_CMD_MMUCACHE = 102U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	ROGUE_FWIF_KCCB_CMD_BP = 103U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* SLC flush and invalidation request */
+	ROGUE_FWIF_KCCB_CMD_SLCFLUSHINVAL = 105U |
+					    ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/*
+	 * Requests cleanup of a FW resource (type specified in the command
+	 * data)
+	 */
+	ROGUE_FWIF_KCCB_CMD_CLEANUP = 106U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Power request */
+	ROGUE_FWIF_KCCB_CMD_POW = 107U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Backing for on-demand ZS-Buffer done */
+	ROGUE_FWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE =
+		108U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Unbacking for on-demand ZS-Buffer done */
+	ROGUE_FWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE =
+		109U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Freelist Grow done */
+	ROGUE_FWIF_KCCB_CMD_FREELIST_GROW_UPDATE =
+		110U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Freelists Reconstruction done */
+	ROGUE_FWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE =
+		112U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/*
+	 * Informs the firmware that the host has added more data to a CDM2
+	 * Circular Buffer
+	 */
+	ROGUE_FWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE =
+		114U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Health check request */
+	ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK = 115U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Forcing signalling of all unmet UFOs for a given CCB offset */
+	ROGUE_FWIF_KCCB_CMD_FORCE_UPDATE = 116U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+	/* There is a geometry and a fragment command in this single kick */
+	ROGUE_FWIF_KCCB_CMD_COMBINED_GEOM_FRAG_KICK = 117U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Informs the FW that a Guest OS has come online / offline. */
+	ROGUE_FWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE	= 118U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+	/* Commands only permitted to the native or host OS */
+	ROGUE_FWIF_KCCB_CMD_REGCONFIG = 200U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+	/* Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */
+	ROGUE_FWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+	/* Enable or disable multiple HWPerf blocks (reusing existing configuration) */
+	ROGUE_FWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Core clock speed change event */
+	ROGUE_FWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+	/*
+	 * Ask the firmware to update its cached ui32LogType value from the (shared)
+	 * tracebuf control structure
+	 */
+	ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Set a maximum frequency/OPP point */
+	ROGUE_FWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/*
+	 * Changes the relative scheduling priority for a particular OSid. It can
+	 * only be serviced for the Host DDK
+	 */
+	ROGUE_FWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Set or clear firmware state flags */
+	ROGUE_FWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+	/* Set a minimum frequency/OPP point */
+	ROGUE_FWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Configure Periodic Hardware Reset behaviour */
+	ROGUE_FWIF_KCCB_CMD_PHR_CFG = 213U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+	/* Configure Safety Firmware Watchdog */
+	ROGUE_FWIF_KCCB_CMD_WDG_CFG = 215U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Controls counter dumping in the FW */
+	ROGUE_FWIF_KCCB_CMD_COUNTER_DUMP = 216U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Configure, clear and enable multiple HWPerf blocks */
+	ROGUE_FWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 217U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Configure the custom counters for HWPerf */
+	ROGUE_FWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 218U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+	/* Configure directly addressable counters for HWPerf */
+	ROGUE_FWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 220U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+};
+
+#define ROGUE_FWIF_LAST_ALLOWED_GUEST_KCCB_CMD \
+	(ROGUE_FWIF_KCCB_CMD_REGCONFIG - 1)
+
+/* Kernel CCB command packet */
+struct rogue_fwif_kccb_cmd {
+	/* Command type */
+	enum rogue_fwif_kccb_cmd_type cmd_type;
+	/* Compatibility and other flags */
+	u32 kccb_flags;
+
+	/*
+	 * NOTE: Make sure that uCmdData is the last member of this struct
+	 * This is to calculate actual command size for device mem copy.
+	 * (Refer ROGUEGetCmdMemCopySize())
+	 */
+	union {
+		/* Data for Kick command */
+		struct rogue_fwif_kccb_cmd_kick_data cmd_kick_data;
+		/* Data for combined geom/frag Kick command */
+		struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data
+			combined_geom_frag_cmd_kick_data;
+		/* Data for MMU cache command */
+		struct rogue_fwif_mmucachedata mmu_cache_data;
+		/* Data for Breakpoint Commands */
+		struct rogue_fwif_bpdata bp_data;
+		/* Data for SLC Flush/Inval commands */
+		struct rogue_fwif_slcflushinvaldata slc_flush_inval_data;
+		/* Data for cleanup commands */
+		struct rogue_fwif_cleanup_request cleanup_data;
+		/* Data for power request commands */
+		struct rogue_fwif_power_request pow_data;
+		/* Data for HWPerf control command */
+		struct rogue_fwif_hwperf_ctrl hw_perf_ctrl;
+		/*
+		 * Data for HWPerf configure, clear and enable performance
+		 * counter block command
+		 */
+		struct rogue_fwif_hwperf_config_enable_blks
+			hw_perf_cfg_enable_blks;
+		/*
+		 * Data for HWPerf enable or disable performance counter block
+		 * commands
+		 */
+		struct rogue_fwif_hwperf_ctrl_blks hw_perf_ctrl_blks;
+		/* Data for HWPerf configure the custom counters to read */
+		struct rogue_fwif_hwperf_select_custom_cntrs
+			hw_perf_select_cstm_cntrs;
+		/* Data for HWPerf configure Directly Addressable blocks */
+		struct rogue_fwif_hwperf_config_da_blks hw_perf_cfg_da_blks;
+		/* Data for core clock speed change */
+		struct rogue_fwif_coreclkspeedchange_data
+			core_clk_speed_change_data;
+		/* Feedback for Z/S Buffer backing/unbacking */
+		struct rogue_fwif_zsbuffer_backing_data zs_buffer_backing_data;
+		/* Feedback for Freelist grow/shrink */
+		struct rogue_fwif_freelist_gs_data free_list_gs_data;
+		/* Feedback for Freelists reconstruction*/
+		struct rogue_fwif_freelists_reconstruction_data
+			free_lists_reconstruction_data;
+		/* Data for custom register configuration */
+		struct rogue_fwif_regconfig_data reg_config_data;
+		/* Data for informing the FW about the write offset update */
+		struct rogue_fwif_write_offset_update_data
+			write_offset_update_data;
+		/* Data for setting the max frequency/OPP */
+		struct rogue_fwif_pdvfs_max_freq_data pdvfs_max_freq_data;
+		/* Data for setting the min frequency/OPP */
+		struct rogue_fwif_pdvfs_min_freq_data pdvfs_min_freq_data;
+		/* Data for updating the Guest Online states */
+		struct rogue_fwif_os_state_change_data cmd_os_online_state_data;
+		/* Dev address for TBI buffer allocated on demand */
+		u32 tbi_buffer_fw_addr;
+		/* Data for dumping of register ranges */
+		struct rogue_fwif_counter_dump_data counter_dump_config_data;
+		/* Data for signalling all unmet fences for a given CCB */
+		struct rogue_fwif_kccb_cmd_force_update_data force_update_data;
+	} cmd_data __aligned(8);
+} __aligned(8);
+
+PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_fwif_kccb_cmd);
+
+/*
+ ******************************************************************************
+ * Firmware CCB command structure for ROGUE
+ ******************************************************************************
+ */
+
+struct rogue_fwif_fwccb_cmd_zsbuffer_backing_data {
+	u32 zs_buffer_id;
+};
+
+struct rogue_fwif_fwccb_cmd_freelist_gs_data {
+	u32 freelist_id;
+};
+
+struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data {
+	u32 freelist_count;
+	u32 hwr_counter;
+	u32 freelist_ids[ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT];
+};
+
+/* 1 if a page fault happened */
+#define ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF BIT(0)
+/* 1 if applicable to all contexts */
+#define ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS BIT(1)
+
+struct rogue_fwif_fwccb_cmd_context_reset_data {
+	/* Context affected by the reset */
+	u32 server_common_context_id;
+	/* Reason for reset */
+	enum rogue_context_reset_reason reset_reason;
+	/* Data Master affected by the reset */
+	u32 dm;
+	/* Job ref running at the time of reset */
+	u32 reset_job_ref;
+	/* ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */
+	u32 flags;
+	/* At what page catalog address */
+	aligned_u64 pc_address;
+	/* Page fault address (only when applicable) */
+	aligned_u64 fault_address;
+};
+
+struct rogue_fwif_fwccb_cmd_fw_pagefault_data {
+	/* Page fault address */
+	u64 fw_fault_addr;
+};
+
+enum rogue_fwif_fwccb_cmd_type {
+	/* Requests ZSBuffer to be backed with physical pages */
+	ROGUE_FWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U |
+						ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Requests ZSBuffer to be unbacked */
+	ROGUE_FWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U |
+						  ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Requests an on-demand freelist grow/shrink */
+	ROGUE_FWIF_FWCCB_CMD_FREELIST_GROW = 103U |
+					     ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Requests freelists reconstruction */
+	ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION =
+		104U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Notifies host of a HWR event on a context */
+	ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION =
+		105U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Requests an on-demand debug dump */
+	ROGUE_FWIF_FWCCB_CMD_DEBUG_DUMP = 106U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	/* Requests an on-demand update on process stats */
+	ROGUE_FWIF_FWCCB_CMD_UPDATE_STATS = 107U |
+					    ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+	ROGUE_FWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE =
+		108U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+	ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART =
+		109U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+
+	/* Notifies host of a FW pagefault */
+	ROGUE_FWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION =
+		112U | ROGUE_CMD_MAGIC_DWORD_SHIFTED,
+};
+
+enum rogue_fwif_fwccb_cmd_update_stats_type {
+	/*
+	 * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+	 * ui32TotalNumPartialRenders stat
+	 */
+	ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS = 1,
+	/*
+	 * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+	 * ui32TotalNumOutOfMemory stat
+	 */
+	ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY,
+	/*
+	 * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+	 * ui32NumGeomStores stat
+	 */
+	ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_GEOM_STORES,
+	/*
+	 * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+	 * ui32NumFragStores stat
+	 */
+	ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_FRAG_STORES,
+	/*
+	 * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+	 * ui32NumCDMStores stat
+	 */
+	ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES,
+	/*
+	 * PVRSRVStatsUpdateRenderContextStats should increase the value of the
+	 * ui32NumTDMStores stat
+	 */
+	ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES
+};
+
+struct rogue_fwif_fwccb_cmd_update_stats_data {
+	/* Element to update */
+	enum rogue_fwif_fwccb_cmd_update_stats_type element_to_update;
+	/* The pid of the process whose stats are being updated */
+	u32 pid_owner;
+	/* Adjustment to be made to the statistic */
+	s32 adjustment_value;
+};
+
+struct rogue_fwif_fwccb_cmd_core_clk_rate_change_data {
+	u32 core_clk_rate;
+} __aligned(8);
+
+struct rogue_fwif_fwccb_cmd {
+	/* Command type */
+	enum rogue_fwif_fwccb_cmd_type cmd_type;
+	/* Compatibility and other flags */
+	u32 fwccb_flags;
+
+	union {
+		/* Data for Z/S-Buffer on-demand (un)backing*/
+		struct rogue_fwif_fwccb_cmd_zsbuffer_backing_data
+			cmd_zs_buffer_backing;
+		/* Data for on-demand freelist grow/shrink */
+		struct rogue_fwif_fwccb_cmd_freelist_gs_data cmd_free_list_gs;
+		/* Data for freelists reconstruction */
+		struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data
+			cmd_freelists_reconstruction;
+		/* Data for context reset notification */
+		struct rogue_fwif_fwccb_cmd_context_reset_data
+			cmd_context_reset_notification;
+		/* Data for updating process stats */
+		struct rogue_fwif_fwccb_cmd_update_stats_data
+			cmd_update_stats_data;
+		struct rogue_fwif_fwccb_cmd_core_clk_rate_change_data
+			cmd_core_clk_rate_change;
+		struct rogue_fwif_fwccb_cmd_fw_pagefault_data cmd_fw_pagefault;
+	} cmd_data __aligned(8);
+} __aligned(8);
+
+PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_fwif_fwccb_cmd);
+
+/*
+ ******************************************************************************
+ * Workload estimation Firmware CCB command structure for ROGUE
+ ******************************************************************************
+ */
+struct rogue_fwif_workest_fwccb_cmd {
+	/* Index for return data array */
+	u16 return_data_index;
+	/* The cycles the workload took on the hardware */
+	u32 cycles_taken;
+};
+
+/*
+ ******************************************************************************
+ * Client CCB commands for ROGUE
+ ******************************************************************************
+ */
+
+/*
+ * Required memory alignment for 64-bit variables accessible by Meta
+ * (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared
+ * between the host and meta that contains 64-bit variables has to maintain
+ * this alignment)
+ */
+#define ROGUE_FWIF_FWALLOC_ALIGN sizeof(u64)
+
+#define ROGUE_CCB_TYPE_TASK BIT(15)
+#define ROGUE_CCB_FWALLOC_ALIGN(size)                \
+	(((size) + (ROGUE_FWIF_FWALLOC_ALIGN - 1)) & \
+	 ~(ROGUE_FWIF_FWALLOC_ALIGN - 1))
+
+#define ROGUE_FWIF_CCB_CMD_TYPE_GEOM \
+	(201U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_TQ_3D \
+	(202U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_FRAG \
+	(203U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR \
+	(204U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_CDM \
+	(205U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_TQ_TDM \
+	(206U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_FBSC_INVALIDATE \
+	(207U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_TQ_2D \
+	(208U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_PRE_TIMESTAMP \
+	(209U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_NULL \
+	(210U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+#define ROGUE_FWIF_CCB_CMD_TYPE_ABORT \
+	(211U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK)
+
+/* Leave a gap between CCB specific commands and generic commands */
+#define ROGUE_FWIF_CCB_CMD_TYPE_FENCE (212U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_UPDATE (213U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_RMW_UPDATE \
+	(214U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR (215U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_PRIORITY (216U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+/*
+ * Pre and Post timestamp commands are supposed to sandwich the DM cmd. The
+ * padding code with the CCB wrap upsets the FW if we don't have the task type
+ * bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+ */
+#define ROGUE_FWIF_CCB_CMD_TYPE_POST_TIMESTAMP \
+	(217U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_UNFENCED_UPDATE \
+	(218U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+#define ROGUE_FWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE \
+	(219U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+
+#define ROGUE_FWIF_CCB_CMD_TYPE_PADDING (221U | ROGUE_CMD_MAGIC_DWORD_SHIFTED)
+
+struct rogue_fwif_workest_kick_data {
+	/* Index for the KM Workload estimation return data array */
+	u16 return_data_index __aligned(8);
+	/* Predicted time taken to do the work in cycles */
+	u32 cycles_prediction __aligned(8);
+	/* Deadline for the workload */
+	aligned_u64 deadline;
+};
+
+struct rogue_fwif_ccb_cmd_header {
+	u32 cmd_type;
+	u32 cmd_size;
+	/*
+	 * external job reference - provided by client and used in debug for
+	 * tracking submitted work
+	 */
+	u32 ext_job_ref;
+	/*
+	 * internal job reference - generated by services and used in debug for
+	 * tracking submitted work
+	 */
+	u32 int_job_ref;
+	/* Workload Estimation - Workload Estimation Data */
+	struct rogue_fwif_workest_kick_data work_est_kick_data __aligned(8);
+};
+
+/*
+ ******************************************************************************
+ * Client CCB commands which are only required by the kernel
+ ******************************************************************************
+ */
+struct rogue_fwif_cmd_priority {
+	s32 priority;
+};
+
+/*
+ ******************************************************************************
+ * Signature and Checksums Buffer
+ ******************************************************************************
+ */
+struct rogue_fwif_sigbuf_ctl {
+	/* Ptr to Signature Buffer memory */
+	u32 buffer_fw_addr;
+	/* Amount of space left for storing regs in the buffer */
+	u32 left_size_in_regs;
+} __aligned(8);
+
+struct rogue_fwif_counter_dump_ctl {
+	/* Ptr to counter dump buffer */
+	u32 buffer_fw_addr;
+	/* Amount of space for storing in the buffer */
+	u32 size_in_dwords;
+} __aligned(8);
+
+struct rogue_fwif_firmware_gcov_ctl {
+	/* Ptr to firmware gcov buffer */
+	u32 buffer_fw_addr;
+	/* Amount of space for storing in the buffer */
+	u32 size;
+} __aligned(8);
+
+/*
+ *****************************************************************************
+ * ROGUE Compatibility checks
+ *****************************************************************************
+ */
+
+/*
+ * WARNING: Whenever the layout of ROGUE_FWIF_COMPCHECKS_BVNC changes, the
+ * following define should be increased by 1 to indicate to the compatibility
+ * logic that layout has changed.
+ */
+#define ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION 3
+
+struct rogue_fwif_compchecks_bvnc {
+	/* WARNING: This field must be defined as first one in this structure */
+	u32 layout_version;
+	aligned_u64 bvnc;
+} __aligned(8);
+
+struct rogue_fwif_init_options {
+	u8 os_count_support;
+	u8 padding[7];
+} __aligned(8);
+
+#define ROGUE_FWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \
+	struct rogue_fwif_compchecks_bvnc(name) = {       \
+		ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION,     \
+		0,                                        \
+	}
+#define ROGUE_FWIF_COMPCHECKS_BVNC_INIT(name)                                 \
+	do {                                                                  \
+		(name).layout_version = ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION; \
+		(name).bvnc = 0;                                              \
+	} while (0)
+
+struct rogue_fwif_compchecks {
+	/* hardware BVNC (from the ROGUE registers) */
+	struct rogue_fwif_compchecks_bvnc hw_bvnc;
+	/* firmware BVNC */
+	struct rogue_fwif_compchecks_bvnc fw_bvnc;
+	/* identifier of the FW processor version */
+	u32 fw_processor_version;
+	/* software DDK version */
+	u32 ddk_version;
+	/* software DDK build no. */
+	u32 ddk_build;
+	/* build options bit-field */
+	u32 build_options;
+	/* initialisation options bit-field */
+	struct rogue_fwif_init_options init_options;
+	/* Information is valid */
+	bool updated __aligned(4);
+	u32 padding;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Updated configuration post FW data init.
+ ******************************************************************************
+ */
+struct rogue_fwif_runtime_cfg {
+	/* APM latency in ms before signalling IDLE to the host */
+	u32 active_pm_latency_ms;
+	/* Compatibility and other flags */
+	u32 runtime_cfg_flags;
+	/*
+	 * If set, APM latency does not reset to system default each GPU power
+	 * transition
+	 */
+	bool active_pm_latency_persistant __aligned(4);
+	/* Core clock speed, currently only used to calculate timer ticks */
+	u32 core_clock_speed;
+	/* Last number of dusts change requested by the host */
+	u32 default_dusts_num_init;
+	/* Periodic Hardware Reset configuration values */
+	u32 phr_mode;
+	/* New number of milliseconds C/S is allowed to last */
+	u32 hcs_deadline_ms;
+	/* The watchdog period in microseconds */
+	u32 wdg_period_us;
+	/* Array of priorities per OS */
+	u32 osid_priority[ROGUE_FW_MAX_NUM_OS];
+	/* On-demand allocated HWPerf buffer address, to be passed to the FW */
+	u32 hwperf_buf_fw_addr;
+	bool padding __aligned(4);
+};
+
+/*
+ *****************************************************************************
+ * Control data for ROGUE
+ *****************************************************************************
+ */
+
+#define ROGUE_FWIF_HWR_DEBUG_DUMP_ALL (99999U)
+
+enum rogue_fwif_tpu_dm {
+	ROGUE_FWIF_TPU_DM_PDM = 0,
+	ROGUE_FWIF_TPU_DM_VDM = 1,
+	ROGUE_FWIF_TPU_DM_CDM = 2,
+	ROGUE_FWIF_TPU_DM_TDM = 3,
+	ROGUE_FWIF_TPU_DM_LAST
+};
+
+enum rogue_fwif_gpio_val_mode {
+	/* No GPIO validation */
+	ROGUE_FWIF_GPIO_VAL_OFF = 0,
+	/*
+	 * Simple test case that initiates by sending data via the GPIO and then
+	 * sends back any data received over the GPIO
+	 */
+	ROGUE_FWIF_GPIO_VAL_GENERAL = 1,
+	/*
+	 * More complex test case that writes and reads data across the entire
+	 * GPIO AP address range.
+	 */
+	ROGUE_FWIF_GPIO_VAL_AP = 2,
+	/* Validates the GPIO Testbench. */
+	ROGUE_FWIF_GPIO_VAL_TESTBENCH = 5,
+	/* Send and then receive each byte in the range 0-255. */
+	ROGUE_FWIF_GPIO_VAL_LOOPBACK = 6,
+	/* Send and then receive each power-of-2 byte in the range 0-255. */
+	ROGUE_FWIF_GPIO_VAL_LOOPBACK_LITE = 7,
+	ROGUE_FWIF_GPIO_VAL_LAST
+};
+
+enum fw_perf_conf {
+	FW_PERF_CONF_NONE = 0,
+	FW_PERF_CONF_ICACHE = 1,
+	FW_PERF_CONF_DCACHE = 2,
+	FW_PERF_CONF_JTLB_INSTR = 5,
+	FW_PERF_CONF_INSTRUCTIONS = 6
+};
+
+enum fw_boot_stage {
+	FW_BOOT_STAGE_TLB_INIT_FAILURE = -2,
+	FW_BOOT_STAGE_NOT_AVAILABLE = -1,
+	FW_BOOT_NOT_STARTED = 0,
+	FW_BOOT_BLDR_STARTED = 1,
+	FW_BOOT_CACHE_DONE,
+	FW_BOOT_TLB_DONE,
+	FW_BOOT_MAIN_STARTED,
+	FW_BOOT_ALIGNCHECKS_DONE,
+	FW_BOOT_INIT_DONE,
+};
+
+/*
+ * Kernel CCB return slot responses. Usage of bit-fields instead of bare
+ * integers allows FW to possibly pack-in several responses for each single kCCB
+ * command.
+ */
+/* Command executed (return status from FW) */
+#define ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED BIT(0)
+/* A cleanup was requested but resource busy */
+#define ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY BIT(1)
+/* Poll failed in FW for a HW operation to complete */
+#define ROGUE_FWIF_KCCB_RTN_SLOT_POLL_FAILURE BIT(2)
+/* Reset value of a kCCB return slot (set by host) */
+#define ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U
+
+struct rogue_fwif_connection_ctl {
+	/* Fw-Os connection states */
+	volatile enum rogue_fwif_connection_fw_state connection_fw_state;
+	volatile enum rogue_fwif_connection_os_state connection_os_state;
+	volatile u32 alive_fw_token;
+	volatile u32 alive_os_token;
+} __aligned(8);
+
+struct rogue_fwif_osinit {
+	/* Kernel CCB */
+	u32 kernel_ccbctl_fw_addr;
+	u32 kernel_ccb_fw_addr;
+	u32 kernel_ccb_rtn_slots_fw_addr;
+
+	/* Firmware CCB */
+	u32 firmware_ccbctl_fw_addr;
+	u32 firmware_ccb_fw_addr;
+
+	/* Workload Estimation Firmware CCB */
+	u32 work_est_firmware_ccbctl_fw_addr;
+	u32 work_est_firmware_ccb_fw_addr;
+
+	u32 rogue_fwif_hwr_info_buf_ctl_fw_addr;
+
+	u32 hwr_debug_dump_limit;
+
+	u32 fw_os_data_fw_addr;
+
+	/* Compatibility checks to be populated by the Firmware */
+	struct rogue_fwif_compchecks rogue_comp_checks;
+} __aligned(8);
+
+/* BVNC Features */
+struct rogue_hwperf_bvnc_block {
+	/* Counter block ID, see ROGUE_HWPERF_CNTBLK_ID */
+	u16 block_id;
+
+	/* Number of counters in this block type */
+	u16 num_counters;
+
+	/* Number of blocks of this type */
+	u16 num_blocks;
+
+	u16 reserved;
+};
+
+#define ROGUE_HWPERF_MAX_BVNC_LEN (24)
+
+#define ROGUE_HWPERF_MAX_BVNC_BLOCK_LEN (16U)
+
+/* BVNC Features */
+struct rogue_hwperf_bvnc {
+	/* BVNC string */
+	char bvnc_string[ROGUE_HWPERF_MAX_BVNC_LEN];
+	/* See ROGUE_HWPERF_FEATURE_FLAGS */
+	u32 bvnc_km_feature_flags;
+	/* Number of blocks described in aBvncBlocks */
+	u16 num_bvnc_blocks;
+	/* Number of GPU cores present */
+	u16 bvnc_gpu_cores;
+	/* Supported Performance Blocks for BVNC */
+	struct rogue_hwperf_bvnc_block
+		bvnc_blocks[ROGUE_HWPERF_MAX_BVNC_BLOCK_LEN];
+};
+
+PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_hwperf_bvnc);
+
+struct rogue_fwif_sysinit {
+	/* Fault read address */
+	aligned_u64 fault_phys_addr;
+
+	/* PDS execution base */
+	aligned_u64 pds_exec_base;
+	/* UCS execution base */
+	aligned_u64 usc_exec_base;
+	/* FBCDC bindless texture state table base */
+	aligned_u64 fbcdc_state_table_base;
+	aligned_u64 fbcdc_large_state_table_base;
+	/* Texture state base */
+	aligned_u64 texture_heap_base;
+
+	/* Event filter for Firmware events */
+	u64 hw_perf_filter;
+
+	aligned_u64 slc3_fence_dev_addr;
+
+	u32 tpu_trilinear_frac_mask[ROGUE_FWIF_TPU_DM_LAST] __aligned(8);
+
+	/* Signature and Checksum Buffers for DMs */
+	struct rogue_fwif_sigbuf_ctl sigbuf_ctl[PVR_FWIF_DM_MAX];
+
+	struct rogue_fwif_pdvfs_opp pdvfs_opp_info;
+
+	struct rogue_fwif_dma_addr coremem_data_store;
+
+	struct rogue_fwif_counter_dump_ctl counter_dump_ctl;
+
+	u32 filter_flags;
+
+	u32 runtime_cfg_fw_addr;
+
+	u32 trace_buf_ctl_fw_addr;
+	u32 fw_sys_data_fw_addr;
+
+	u32 gpu_util_fw_cb_ctl_fw_addr;
+	u32 reg_cfg_fw_addr;
+	u32 hwperf_ctl_fw_addr;
+
+	u32 align_checks;
+
+	/* Core clock speed at FW boot time */
+	u32 initial_core_clock_speed;
+
+	/* APM latency in ms before signalling IDLE to the host */
+	u32 active_pm_latency_ms;
+
+	/* Flag to be set by the Firmware after successful start */
+	bool firmware_started __aligned(4);
+
+	/* Host/FW Trace synchronisation Partition Marker */
+	u32 marker_val;
+
+	/* Firmware initialization complete time */
+	u32 firmware_started_timestamp;
+
+	u32 jones_disable_mask;
+
+	/* Firmware performance counter config */
+	enum fw_perf_conf firmware_perf;
+
+	/*
+	 * FW Pointer to memory containing core clock rate in Hz.
+	 * Firmware (PDVFS) updates the memory when running on non primary FW
+	 * thread to communicate to host driver.
+	 */
+	u32 core_clock_rate_fw_addr;
+
+	enum rogue_fwif_gpio_val_mode gpio_validation_mode;
+
+	/* Used in HWPerf for decoding BVNC Features */
+	struct rogue_hwperf_bvnc bvnc_km_feature_flags;
+
+
+	/* Value to write into ROGUE_CR_TFBC_COMPRESSION_CONTROL */
+	u32 tfbc_compression_control;
+} __aligned(8);
+
+/*
+ *****************************************************************************
+ * Timer correlation shared data and defines
+ *****************************************************************************
+ */
+
+struct rogue_fwif_time_corr {
+	aligned_u64 os_timestamp;
+	aligned_u64 os_mono_timestamp;
+	aligned_u64 cr_timestamp;
+
+	/*
+	 * Utility variable used to convert CR timer deltas to OS timer deltas
+	 * (nS), where the deltas are relative to the timestamps above:
+	 * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below
+	 */
+	aligned_u64 cr_delta_to_os_delta_kns;
+
+	u32 core_clock_speed;
+	u32 reserved;
+} __aligned(8);
+
+/*
+ * The following macros are used to help converting FW timestamps to the Host
+ * time domain. On the FW the ROGUE_CR_TIMER counter is used to keep track of
+ * time; it increments by 1 every 256 GPU clock ticks, so the general
+ * formula to perform the conversion is:
+ *
+ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS,
+ *   otherwise if (scale == 10^6) then deltaOS is in uS ]
+ *
+ *             deltaCR * 256                                   256 * scale
+ *  deltaOS = --------------- * scale = deltaCR * K    [ K = --------------- ]
+ *             GPUclockspeed                                  GPUclockspeed
+ *
+ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20)
+ * to get some better accuracy and to avoid returning 0 in the integer
+ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz.
+ * This is the same as keeping K as a decimal number.
+ *
+ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies
+ * (deltaCR * K is more or less a constant), and it's relative to the base
+ * OS timestamp sampled as a part of the timer correlation data.
+ * This base is refreshed on GPU power-on, DVFS transition and periodic
+ * frequency calibration (executed every few seconds if the FW is doing
+ * some work), so as long as the GPU is doing something and one of these
+ * events is triggered then deltaCR * K will not overflow and deltaOS will be
+ * correct.
+ */
+
+#define ROGUE_FWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20)
+
+#define ROGUE_FWIF_GET_DELTA_OSTIME_NS(delta_cr, k) \
+	(((delta_cr) * (k)) >> ROGUE_FWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT)
+
+/*
+ ******************************************************************************
+ * GPU Utilisation
+ ******************************************************************************
+ */
+
+/* See rogue_common.h for a list of GPU states */
+#define ROGUE_FWIF_GPU_UTIL_TIME_MASK \
+	(0xFFFFFFFFFFFFFFFFull & ~ROGUE_FWIF_GPU_UTIL_STATE_MASK)
+
+#define ROGUE_FWIF_GPU_UTIL_GET_TIME(word) \
+	((word)(&ROGUE_FWIF_GPU_UTIL_TIME_MASK))
+#define ROGUE_FWIF_GPU_UTIL_GET_STATE(word) \
+	((word)(&ROGUE_FWIF_GPU_UTIL_STATE_MASK))
+
+/*
+ * The OS timestamps computed by the FW are approximations of the real time,
+ * which means they could be slightly behind or ahead the real timer on the
+ * Host. In some cases we can perform subtractions between FW approximated
+ * timestamps and real OS timestamps, so we need a form of protection against
+ * negative results if for instance the FW one is a bit ahead of time.
+ */
+#define ROGUE_FWIF_GPU_UTIL_GET_PERIOD(newtime, oldtime) \
+	(((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U)
+
+#define ROGUE_FWIF_GPU_UTIL_MAKE_WORD(time, state) \
+	(ROGUE_FWIF_GPU_UTIL_GET_TIME(time) |      \
+	 ROGUE_FWIF_GPU_UTIL_GET_STATE(state))
+
+/*
+ * The timer correlation array must be big enough to ensure old entries won't be
+ * overwritten before all the HWPerf events linked to those entries are
+ * processed by the MISR. The update frequency of this array depends on how fast
+ * the system can change state (basically how small the APM latency is) and
+ * perform DVFS transitions.
+ *
+ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading
+ * an entry while the Host is updating it. With 2 entries in the worst case the
+ * FW will read old data, which is still quite ok if the Host is updating the
+ * timer correlation at that time.
+ */
+#define ROGUE_FWIF_TIME_CORR_ARRAY_SIZE 256U
+#define ROGUE_FWIF_TIME_CORR_CURR_INDEX(seqcount) \
+	((seqcount) % ROGUE_FWIF_TIME_CORR_ARRAY_SIZE)
+
+/* Make sure the timer correlation array size is a power of 2 */
+static_assert((ROGUE_FWIF_TIME_CORR_ARRAY_SIZE &
+	       (ROGUE_FWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U,
+	      "ROGUE_FWIF_TIME_CORR_ARRAY_SIZE must be a power of two");
+
+struct rogue_fwif_gpu_util_fwcb {
+	struct rogue_fwif_time_corr time_corr[ROGUE_FWIF_TIME_CORR_ARRAY_SIZE];
+	u32 time_corr_seq_count;
+
+	/* Compatibility and other flags */
+	u32 gpu_util_flags;
+
+	/* Last GPU state + OS time of the last state update */
+	aligned_u64 last_word;
+
+	/* Counters for the amount of time the GPU was active/idle/blocked */
+	aligned_u64 stats_counters[PVR_FWIF_GPU_UTIL_STATE_NUM];
+} __aligned(8);
+
+struct rogue_fwif_rta_ctl {
+	/* Render number */
+	u32 render_target_index;
+	/* index in RTA */
+	u32 current_render_target;
+	/* total active RTs */
+	u32 active_render_targets;
+	/* total active RTs from the first TA kick, for OOM */
+	u32 cumul_active_render_targets;
+	/* Array of valid RT indices */
+	u32 valid_render_targets_fw_addr;
+	/* Array of number of occurred partial renders per render target */
+	u32 rta_num_partial_renders_fw_addr;
+	/* Number of render targets in the array */
+	u32 max_rts;
+	/* Compatibility and other flags */
+	u32 rta_ctl_flags;
+} __aligned(8);
+
+struct rogue_fwif_freelist {
+	aligned_u64 freelist_dev_addr;
+	aligned_u64 current_dev_addr;
+	u32 current_stack_top;
+	u32 max_pages;
+	u32 grow_pages;
+	/* HW pages */
+	u32 current_pages;
+	u32 allocated_page_count;
+	u32 allocated_mmu_page_count;
+	u32 freelist_id;
+	bool grow_pending __aligned(4);
+	/* Pages that should be used only when OOM is reached */
+	u32 ready_pages;
+	/* Compatibility and other flags */
+	u32 freelist_flags;
+	/* PM Global PB on which Freelist is loaded */
+	u32 pm_global_pb;
+	u32 padding;
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * HWRTData
+ ******************************************************************************
+ */
+
+/* HWRTData flags */
+/* Deprecated flags 1:0 */
+#define HWRTDATA_HAS_LAST_GEOM BIT(2)
+#define HWRTDATA_PARTIAL_RENDERED BIT(3)
+#define HWRTDATA_DISABLE_TILE_REORDERING BIT(4)
+#define HWRTDATA_NEED_BRN65101_BLIT BIT(5)
+#define HWRTDATA_FIRST_BRN65101_STRIP BIT(6)
+#define HWRTDATA_NEED_BRN67182_2ND_RENDER BIT(7)
+
+enum rogue_fwif_rtdata_state {
+	ROGUE_FWIF_RTDATA_STATE_NONE = 0,
+	ROGUE_FWIF_RTDATA_STATE_KICK_GEOM,
+	ROGUE_FWIF_RTDATA_STATE_KICK_GEOM_FIRST,
+	ROGUE_FWIF_RTDATA_STATE_GEOM_FINISHED,
+	ROGUE_FWIF_RTDATA_STATE_KICK_FRAG,
+	ROGUE_FWIF_RTDATA_STATE_FRAG_FINISHED,
+	ROGUE_FWIF_RTDATA_STATE_FRAG_CONTEXT_STORED,
+	ROGUE_FWIF_RTDATA_STATE_GEOM_OUTOFMEM,
+	ROGUE_FWIF_RTDATA_STATE_PARTIALRENDERFINISHED,
+	/*
+	 * In case of HWR, we can't set the RTDATA state to NONE, as this will
+	 * cause any TA to become a first TA. To ensure all related TA's are
+	 * skipped, we use the HWR state
+	 */
+	ROGUE_FWIF_RTDATA_STATE_HWR,
+	ROGUE_FWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU
+};
+
+struct rogue_fwif_hwrtdata_common {
+	bool geom_caches_need_zeroing __aligned(4);
+
+	u32 screen_pixel_max;
+	aligned_u64 multi_sample_ctl;
+	u64 flipped_multi_sample_ctl;
+	u32 tpc_stride;
+	u32 tpc_size;
+	u32 te_screen;
+	u32 mtile_stride;
+	u32 teaa;
+	u32 te_mtile1;
+	u32 te_mtile2;
+	u32 isp_merge_lower_x;
+	u32 isp_merge_lower_y;
+	u32 isp_merge_upper_x;
+	u32 isp_merge_upper_y;
+	u32 isp_merge_scale_x;
+	u32 isp_merge_scale_y;
+	u32 rgn_header_size;
+	u32 isp_mtile_size;
+	u32 padding;
+} __aligned(8);
+
+struct rogue_fwif_hwrtdata {
+	/* MList Data Store */
+	aligned_u64 pm_mlist_dev_addr;
+
+	aligned_u64 vce_cat_base[4];
+	aligned_u64 vce_last_cat_base[4];
+	aligned_u64 te_cat_base[4];
+	aligned_u64 te_last_cat_base[4];
+	aligned_u64 alist_cat_base;
+	aligned_u64 alist_last_cat_base;
+
+	aligned_u64 pm_alist_stack_pointer;
+	u32 pm_mlist_stack_pointer;
+
+	u32 hwrt_data_common_fw_addr;
+
+	u32 hwrt_data_flags;
+	enum rogue_fwif_rtdata_state state;
+
+	u32 freelists_fw_addr[MAX_FREELISTS_SIZE] __aligned(8);
+	u32 freelist_hwr_snapshot[MAX_FREELISTS_SIZE];
+
+	aligned_u64 vheap_table_dev_addr;
+
+	struct rogue_fwif_cleanup_ctl cleanup_state;
+
+	struct rogue_fwif_rta_ctl rta_ctl;
+
+	aligned_u64 tail_ptrs_dev_addr;
+	aligned_u64 macrotile_array_dev_addr;
+	aligned_u64 rgn_header_dev_addr;
+	aligned_u64 rtc_dev_addr;
+
+	u32 owner_geom_not_used_by_host __aligned(8);
+
+	bool geom_caches_need_zeroing __aligned(4);
+} __aligned(8);
+
+/*
+ ******************************************************************************
+ * Sync checkpoints
+ ******************************************************************************
+ */
+
+#define PVR_SYNC_CHECKPOINT_UNDEF 0x000
+#define PVR_SYNC_CHECKPOINT_ACTIVE 0xac1     /* Checkpoint has not signaled. */
+#define PVR_SYNC_CHECKPOINT_SIGNALED 0x519   /* Checkpoint has signaled. */
+#define PVR_SYNC_CHECKPOINT_ERRORED 0xeff    /* Checkpoint has been errored. */
+
+#include "pvr_rogue_fwif_check.h"
+
+#endif /* __PVR_ROGUE_FWIF_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h
new file mode 100644
index 000000000000..482b0490df49
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h
@@ -0,0 +1,491 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_FWIF_CHECK_H__
+#define __PVR_ROGUE_FWIF_CHECK_H__
+
+#include <linux/build_bug.h>
+
+#define OFFSET_CHECK(type, member, offset) \
+	static_assert(offsetof(type, member) == offset, "offsetof(" #type ", " #member ") incorrect");
+
+#define SIZE_CHECK(type, size) \
+	static_assert(sizeof(type) == size, #type " is incorrect size");
+
+OFFSET_CHECK(struct rogue_fwif_file_info_buf, path, 0);
+OFFSET_CHECK(struct rogue_fwif_file_info_buf, info, 200);
+OFFSET_CHECK(struct rogue_fwif_file_info_buf, line_num, 400);
+SIZE_CHECK(struct rogue_fwif_file_info_buf, 408);
+
+OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_pointer, 0);
+OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_buffer_fw_addr, 4);
+OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_buffer, 8);
+OFFSET_CHECK(struct rogue_fwif_tracebuf_space, assert_buf, 16);
+SIZE_CHECK(struct rogue_fwif_tracebuf_space, 424);
+
+OFFSET_CHECK(struct rogue_fwif_tracebuf, log_type, 0);
+OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf, 8);
+OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf_size_in_dwords, 856);
+OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf_flags, 860);
+SIZE_CHECK(struct rogue_fwif_tracebuf, 864);
+
+OFFSET_CHECK(struct rogue_fw_fault_info, cr_timer, 0);
+OFFSET_CHECK(struct rogue_fw_fault_info, os_timer, 8);
+OFFSET_CHECK(struct rogue_fw_fault_info, data, 16);
+OFFSET_CHECK(struct rogue_fw_fault_info, reserved, 20);
+OFFSET_CHECK(struct rogue_fw_fault_info, fault_buf, 24);
+SIZE_CHECK(struct rogue_fw_fault_info, 432);
+
+OFFSET_CHECK(struct rogue_fwif_sysdata, config_flags, 0);
+OFFSET_CHECK(struct rogue_fwif_sysdata, config_flags_ext, 4);
+OFFSET_CHECK(struct rogue_fwif_sysdata, pow_state, 8);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_ridx, 12);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_widx, 16);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_wrap_count, 20);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_size, 24);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_drop_count, 28);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_ut, 32);
+OFFSET_CHECK(struct rogue_fwif_sysdata, first_drop_ordinal, 36);
+OFFSET_CHECK(struct rogue_fwif_sysdata, last_drop_ordinal, 40);
+OFFSET_CHECK(struct rogue_fwif_sysdata, os_runtime_flags_mirror, 44);
+OFFSET_CHECK(struct rogue_fwif_sysdata, fault_info, 80);
+OFFSET_CHECK(struct rogue_fwif_sysdata, fw_faults, 3536);
+OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_addr, 3540);
+OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_mask, 3548);
+OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_count, 3556);
+OFFSET_CHECK(struct rogue_fwif_sysdata, start_idle_time, 3568);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hwr_state_flags, 3576);
+OFFSET_CHECK(struct rogue_fwif_sysdata, hwr_recovery_flags, 3580);
+OFFSET_CHECK(struct rogue_fwif_sysdata, fw_sys_data_flags, 3616);
+OFFSET_CHECK(struct rogue_fwif_sysdata, mc_config, 3620);
+SIZE_CHECK(struct rogue_fwif_sysdata, 3624);
+
+OFFSET_CHECK(struct rogue_fwif_slr_entry, timestamp, 0);
+OFFSET_CHECK(struct rogue_fwif_slr_entry, fw_ctx_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_slr_entry, num_ufos, 12);
+OFFSET_CHECK(struct rogue_fwif_slr_entry, ccb_name, 16);
+SIZE_CHECK(struct rogue_fwif_slr_entry, 48);
+
+OFFSET_CHECK(struct rogue_fwif_osdata, fw_os_config_flags, 0);
+OFFSET_CHECK(struct rogue_fwif_osdata, fw_sync_check_mark, 4);
+OFFSET_CHECK(struct rogue_fwif_osdata, host_sync_check_mark, 8);
+OFFSET_CHECK(struct rogue_fwif_osdata, forced_updates_requested, 12);
+OFFSET_CHECK(struct rogue_fwif_osdata, slr_log_wp, 16);
+OFFSET_CHECK(struct rogue_fwif_osdata, slr_log_first, 24);
+OFFSET_CHECK(struct rogue_fwif_osdata, slr_log, 72);
+OFFSET_CHECK(struct rogue_fwif_osdata, last_forced_update_time, 552);
+OFFSET_CHECK(struct rogue_fwif_osdata, interrupt_count, 560);
+OFFSET_CHECK(struct rogue_fwif_osdata, kccb_cmds_executed, 568);
+OFFSET_CHECK(struct rogue_fwif_osdata, power_sync_fw_addr, 572);
+OFFSET_CHECK(struct rogue_fwif_osdata, fw_os_data_flags, 576);
+SIZE_CHECK(struct rogue_fwif_osdata, 584);
+
+
+OFFSET_CHECK(struct rogue_bifinfo, bif_req_status, 0);
+OFFSET_CHECK(struct rogue_bifinfo, bif_mmu_status, 8);
+OFFSET_CHECK(struct rogue_bifinfo, pc_address, 16);
+OFFSET_CHECK(struct rogue_bifinfo, reserved, 24);
+SIZE_CHECK(struct rogue_bifinfo, 32);
+
+OFFSET_CHECK(struct rogue_eccinfo, fault_gpu, 0);
+SIZE_CHECK(struct rogue_eccinfo, 4);
+
+OFFSET_CHECK(struct rogue_mmuinfo, mmu_status, 0);
+OFFSET_CHECK(struct rogue_mmuinfo, pc_address, 16);
+OFFSET_CHECK(struct rogue_mmuinfo, reserved, 24);
+SIZE_CHECK(struct rogue_mmuinfo, 32);
+
+OFFSET_CHECK(struct rogue_pollinfo, thread_num, 0);
+OFFSET_CHECK(struct rogue_pollinfo, cr_poll_addr, 4);
+OFFSET_CHECK(struct rogue_pollinfo, cr_poll_mask, 8);
+OFFSET_CHECK(struct rogue_pollinfo, cr_poll_last_value, 12);
+OFFSET_CHECK(struct rogue_pollinfo, reserved, 16);
+SIZE_CHECK(struct rogue_pollinfo, 24);
+
+OFFSET_CHECK(struct rogue_tlbinfo, bad_addr, 0);
+OFFSET_CHECK(struct rogue_tlbinfo, entry_lo, 4);
+SIZE_CHECK(struct rogue_tlbinfo, 8);
+
+OFFSET_CHECK(struct rogue_hwrinfo, hwr_data, 0);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_timer, 32);
+OFFSET_CHECK(struct rogue_hwrinfo, os_timer, 40);
+OFFSET_CHECK(struct rogue_hwrinfo, frame_num, 48);
+OFFSET_CHECK(struct rogue_hwrinfo, pid, 52);
+OFFSET_CHECK(struct rogue_hwrinfo, active_hwrt_data, 56);
+OFFSET_CHECK(struct rogue_hwrinfo, hwr_number, 60);
+OFFSET_CHECK(struct rogue_hwrinfo, event_status, 64);
+OFFSET_CHECK(struct rogue_hwrinfo, hwr_recovery_flags, 68);
+OFFSET_CHECK(struct rogue_hwrinfo, hwr_type, 72);
+OFFSET_CHECK(struct rogue_hwrinfo, dm, 76);
+OFFSET_CHECK(struct rogue_hwrinfo, core_id, 80);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_time_of_kick, 88);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_time_hw_reset_start, 96);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_time_hw_reset_finish, 104);
+OFFSET_CHECK(struct rogue_hwrinfo, cr_time_freelist_ready, 112);
+OFFSET_CHECK(struct rogue_hwrinfo, reserved, 120);
+SIZE_CHECK(struct rogue_hwrinfo, 136);
+
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_info, 0);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_counter, 2176);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, write_index, 2180);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, dd_req_count, 2184);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_info_buf_flags, 2188);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_locked_up_count, 2192);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_overran_count, 2228);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_recovered_count, 2264);
+OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_false_detect_count, 2300);
+SIZE_CHECK(struct rogue_fwif_hwrinfobuf, 2336);
+
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, pc_dev_paddr, 0);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, page_cat_base_reg_set, 8);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, breakpoint_addr, 12);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, bp_handler_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, breakpoint_ctl, 20);
+OFFSET_CHECK(struct rogue_fwif_fwmemcontext, fw_mem_ctx_flags, 24);
+SIZE_CHECK(struct rogue_fwif_fwmemcontext, 32);
+
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vdm_call_stack_pointer, 0);
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vdm_call_stack_pointer_init, 8);
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vbs_so_prim, 16);
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_current_idx, 32);
+SIZE_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, 40);
+
+OFFSET_CHECK(struct rogue_fwif_geom_ctx_state, geom_core, 0);
+SIZE_CHECK(struct rogue_fwif_geom_ctx_state, 160);
+
+OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_pm_deallocated_mask_status, 0);
+OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_dm_pds_mtilefree_status, 4);
+OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, ctx_state_flags, 8);
+OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_isp_store, 12);
+SIZE_CHECK(struct rogue_fwif_frag_ctx_state, 16);
+
+OFFSET_CHECK(struct rogue_fwif_compute_ctx_state, ctx_state_flags, 0);
+SIZE_CHECK(struct rogue_fwif_compute_ctx_state, 4);
+
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccbctl_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccb_fw_addr, 4);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccb_meta_dma_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, context_state_addr, 24);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, fw_com_ctx_flags, 28);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, priority, 32);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, priority_seq_num, 36);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, rf_cmd_addr, 40);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_pending, 44);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_stores, 48);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_out_of_memory, 52);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_partial_renders, 56);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, dm, 60);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, wait_signal_address, 64);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, wait_signal_node, 72);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, buf_stalled_node, 80);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, cbuf_queue_ctrl_addr, 88);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, robustness_address, 96);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, max_deadline_ms, 104);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, read_offset_needs_reset, 108);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, waiting_node, 112);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, run_node, 120);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, last_failed_ufo, 128);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, fw_mem_context_fw_addr, 136);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, server_common_context_id, 140);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, pid, 144);
+OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, geom_oom_disabled, 148);
+SIZE_CHECK(struct rogue_fwif_fwcommoncontext, 152);
+
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, write_offset, 0);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, read_offset, 4);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, wrap_mask, 8);
+OFFSET_CHECK(struct rogue_fwif_ccb_ctl, cmd_size, 12);
+SIZE_CHECK(struct rogue_fwif_ccb_ctl, 16);
+
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, context_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, client_woff_update, 4);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, client_wrap_mask_update, 8);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, num_cleanup_ctl, 12);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, cleanup_ctl_fw_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, work_est_cmd_header_offset, 28);
+SIZE_CHECK(struct rogue_fwif_kccb_cmd_kick_data, 32);
+
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, geom_cmd_kick_data, 0);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, frag_cmd_kick_data, 32);
+SIZE_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, 64);
+
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, context_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, ccb_fence_offset, 4);
+SIZE_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, 8);
+
+OFFSET_CHECK(struct rogue_fwif_cleanup_request, cleanup_type, 0);
+OFFSET_CHECK(struct rogue_fwif_cleanup_request, cleanup_data, 4);
+SIZE_CHECK(struct rogue_fwif_cleanup_request, 8);
+
+OFFSET_CHECK(struct rogue_fwif_power_request, pow_type, 0);
+OFFSET_CHECK(struct rogue_fwif_power_request, power_req_data, 4);
+SIZE_CHECK(struct rogue_fwif_power_request, 8);
+
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, context_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, inval, 4);
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, dm_context, 8);
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, address, 16);
+OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, size, 24);
+SIZE_CHECK(struct rogue_fwif_slcflushinvaldata, 32);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl, opcode, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl, mask, 8);
+SIZE_CHECK(struct rogue_fwif_hwperf_ctrl, 16);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_config_enable_blks, num_blocks, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_config_enable_blks, block_configs_fw_addr, 4);
+SIZE_CHECK(struct rogue_fwif_hwperf_config_enable_blks, 8);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_config_da_blks, num_blocks, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_config_da_blks, block_configs_fw_addr, 4);
+SIZE_CHECK(struct rogue_fwif_hwperf_config_da_blks, 8);
+
+OFFSET_CHECK(struct rogue_fwif_coreclkspeedchange_data, new_clock_speed, 0);
+SIZE_CHECK(struct rogue_fwif_coreclkspeedchange_data, 4);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, enable, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, num_blocks, 4);
+OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, block_ids, 8);
+SIZE_CHECK(struct rogue_fwif_hwperf_ctrl_blks, 40);
+
+OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, custom_block, 0);
+OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, num_counters, 2);
+OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, custom_counter_ids_fw_addr, 4);
+SIZE_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, 8);
+
+OFFSET_CHECK(struct rogue_fwif_zsbuffer_backing_data, zs_buffer_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_zsbuffer_backing_data, done, 4);
+SIZE_CHECK(struct rogue_fwif_zsbuffer_backing_data, 8);
+
+OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, freelist_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, delta_pages, 4);
+OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, new_pages, 8);
+OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, ready_pages, 12);
+SIZE_CHECK(struct rogue_fwif_freelist_gs_data, 16);
+
+OFFSET_CHECK(struct rogue_fwif_freelists_reconstruction_data, freelist_count, 0);
+OFFSET_CHECK(struct rogue_fwif_freelists_reconstruction_data, freelist_ids, 4);
+SIZE_CHECK(struct rogue_fwif_freelists_reconstruction_data, 76);
+
+OFFSET_CHECK(struct rogue_fwif_write_offset_update_data, context_fw_addr, 0);
+SIZE_CHECK(struct rogue_fwif_write_offset_update_data, 8);
+
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd, cmd_type, 0);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd, kccb_flags, 4);
+OFFSET_CHECK(struct rogue_fwif_kccb_cmd, cmd_data, 8);
+SIZE_CHECK(struct rogue_fwif_kccb_cmd, 88);
+
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, server_common_context_id, 0);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, reset_reason, 4);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, dm, 8);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, reset_job_ref, 12);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, flags, 16);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, pc_address, 24);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, fault_address, 32);
+SIZE_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, 40);
+
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_fw_pagefault_data, fw_fault_addr, 0);
+SIZE_CHECK(struct rogue_fwif_fwccb_cmd_fw_pagefault_data, 8);
+
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, cmd_type, 0);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, fwccb_flags, 4);
+OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, cmd_data, 8);
+SIZE_CHECK(struct rogue_fwif_fwccb_cmd, 88);
+
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, cmd_type, 0);
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, cmd_size, 4);
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, ext_job_ref, 8);
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, int_job_ref, 12);
+OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, work_est_kick_data, 16);
+SIZE_CHECK(struct rogue_fwif_ccb_cmd_header, 40);
+
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, active_pm_latency_ms, 0);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, runtime_cfg_flags, 4);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, active_pm_latency_persistant, 8);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, core_clock_speed, 12);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, default_dusts_num_init, 16);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, phr_mode, 20);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, hcs_deadline_ms, 24);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, wdg_period_us, 28);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, osid_priority, 32);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, hwperf_buf_fw_addr, 64);
+OFFSET_CHECK(struct rogue_fwif_runtime_cfg, padding, 68);
+SIZE_CHECK(struct rogue_fwif_runtime_cfg, 72);
+
+OFFSET_CHECK(struct rogue_fwif_connection_ctl, connection_fw_state, 0);
+OFFSET_CHECK(struct rogue_fwif_connection_ctl, connection_os_state, 4);
+OFFSET_CHECK(struct rogue_fwif_connection_ctl, alive_fw_token, 8);
+OFFSET_CHECK(struct rogue_fwif_connection_ctl, alive_os_token, 12);
+SIZE_CHECK(struct rogue_fwif_connection_ctl, 16);
+
+OFFSET_CHECK(struct rogue_fwif_compchecks_bvnc, layout_version, 0);
+OFFSET_CHECK(struct rogue_fwif_compchecks_bvnc, bvnc, 8);
+SIZE_CHECK(struct rogue_fwif_compchecks_bvnc, 16);
+
+OFFSET_CHECK(struct rogue_fwif_init_options, os_count_support, 0);
+SIZE_CHECK(struct rogue_fwif_init_options, 8);
+
+OFFSET_CHECK(struct rogue_fwif_compchecks, hw_bvnc, 0);
+OFFSET_CHECK(struct rogue_fwif_compchecks, fw_bvnc, 16);
+OFFSET_CHECK(struct rogue_fwif_compchecks, fw_processor_version, 32);
+OFFSET_CHECK(struct rogue_fwif_compchecks, ddk_version, 36);
+OFFSET_CHECK(struct rogue_fwif_compchecks, ddk_build, 40);
+OFFSET_CHECK(struct rogue_fwif_compchecks, build_options, 44);
+OFFSET_CHECK(struct rogue_fwif_compchecks, init_options, 48);
+OFFSET_CHECK(struct rogue_fwif_compchecks, updated, 56);
+SIZE_CHECK(struct rogue_fwif_compchecks, 64);
+
+OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccbctl_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccb_fw_addr, 4);
+OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccb_rtn_slots_fw_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_osinit, firmware_ccbctl_fw_addr, 12);
+OFFSET_CHECK(struct rogue_fwif_osinit, firmware_ccb_fw_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_osinit, work_est_firmware_ccbctl_fw_addr, 20);
+OFFSET_CHECK(struct rogue_fwif_osinit, work_est_firmware_ccb_fw_addr, 24);
+OFFSET_CHECK(struct rogue_fwif_osinit, rogue_fwif_hwr_info_buf_ctl_fw_addr, 28);
+OFFSET_CHECK(struct rogue_fwif_osinit, hwr_debug_dump_limit, 32);
+OFFSET_CHECK(struct rogue_fwif_osinit, fw_os_data_fw_addr, 36);
+OFFSET_CHECK(struct rogue_fwif_osinit, rogue_comp_checks, 40);
+SIZE_CHECK(struct rogue_fwif_osinit, 104);
+
+OFFSET_CHECK(struct rogue_fwif_sigbuf_ctl, buffer_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_sigbuf_ctl, left_size_in_regs, 4);
+SIZE_CHECK(struct rogue_fwif_sigbuf_ctl, 8);
+
+OFFSET_CHECK(struct pdvfs_opp, volt, 0);
+OFFSET_CHECK(struct pdvfs_opp, freq, 4);
+SIZE_CHECK(struct pdvfs_opp, 8);
+
+OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, opp_values, 0);
+OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, min_opp_point, 128);
+OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, max_opp_point, 132);
+SIZE_CHECK(struct rogue_fwif_pdvfs_opp, 136);
+
+OFFSET_CHECK(struct rogue_fwif_counter_dump_ctl, buffer_fw_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_counter_dump_ctl, size_in_dwords, 4);
+SIZE_CHECK(struct rogue_fwif_counter_dump_ctl, 8);
+
+OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_string, 0);
+OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_km_feature_flags, 24);
+OFFSET_CHECK(struct rogue_hwperf_bvnc, num_bvnc_blocks, 28);
+OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_gpu_cores, 30);
+OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_blocks, 32);
+SIZE_CHECK(struct rogue_hwperf_bvnc, 160);
+
+OFFSET_CHECK(struct rogue_fwif_sysinit, fault_phys_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_sysinit, pds_exec_base, 8);
+OFFSET_CHECK(struct rogue_fwif_sysinit, usc_exec_base, 16);
+OFFSET_CHECK(struct rogue_fwif_sysinit, fbcdc_state_table_base, 24);
+OFFSET_CHECK(struct rogue_fwif_sysinit, fbcdc_large_state_table_base, 32);
+OFFSET_CHECK(struct rogue_fwif_sysinit, texture_heap_base, 40);
+OFFSET_CHECK(struct rogue_fwif_sysinit, hw_perf_filter, 48);
+OFFSET_CHECK(struct rogue_fwif_sysinit, slc3_fence_dev_addr, 56);
+OFFSET_CHECK(struct rogue_fwif_sysinit, tpu_trilinear_frac_mask, 64);
+OFFSET_CHECK(struct rogue_fwif_sysinit, sigbuf_ctl, 80);
+OFFSET_CHECK(struct rogue_fwif_sysinit, pdvfs_opp_info, 152);
+OFFSET_CHECK(struct rogue_fwif_sysinit, coremem_data_store, 288);
+OFFSET_CHECK(struct rogue_fwif_sysinit, counter_dump_ctl, 304);
+OFFSET_CHECK(struct rogue_fwif_sysinit, filter_flags, 312);
+OFFSET_CHECK(struct rogue_fwif_sysinit, runtime_cfg_fw_addr, 316);
+OFFSET_CHECK(struct rogue_fwif_sysinit, trace_buf_ctl_fw_addr, 320);
+OFFSET_CHECK(struct rogue_fwif_sysinit, fw_sys_data_fw_addr, 324);
+OFFSET_CHECK(struct rogue_fwif_sysinit, gpu_util_fw_cb_ctl_fw_addr, 328);
+OFFSET_CHECK(struct rogue_fwif_sysinit, reg_cfg_fw_addr, 332);
+OFFSET_CHECK(struct rogue_fwif_sysinit, hwperf_ctl_fw_addr, 336);
+OFFSET_CHECK(struct rogue_fwif_sysinit, align_checks, 340);
+OFFSET_CHECK(struct rogue_fwif_sysinit, initial_core_clock_speed, 344);
+OFFSET_CHECK(struct rogue_fwif_sysinit, active_pm_latency_ms, 348);
+OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_started, 352);
+OFFSET_CHECK(struct rogue_fwif_sysinit, marker_val, 356);
+OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_started_timestamp, 360);
+OFFSET_CHECK(struct rogue_fwif_sysinit, jones_disable_mask, 364);
+OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_perf, 368);
+OFFSET_CHECK(struct rogue_fwif_sysinit, core_clock_rate_fw_addr, 372);
+OFFSET_CHECK(struct rogue_fwif_sysinit, gpio_validation_mode, 376);
+OFFSET_CHECK(struct rogue_fwif_sysinit, bvnc_km_feature_flags, 380);
+OFFSET_CHECK(struct rogue_fwif_sysinit, tfbc_compression_control, 540);
+SIZE_CHECK(struct rogue_fwif_sysinit, 544);
+
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, time_corr, 0);
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, time_corr_seq_count, 10240);
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, gpu_util_flags, 10244);
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, last_word, 10248);
+OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, stats_counters, 10256);
+SIZE_CHECK(struct rogue_fwif_gpu_util_fwcb, 10280);
+
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, render_target_index, 0);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, current_render_target, 4);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, active_render_targets, 8);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, cumul_active_render_targets, 12);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, valid_render_targets_fw_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, rta_num_partial_renders_fw_addr, 20);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, max_rts, 24);
+OFFSET_CHECK(struct rogue_fwif_rta_ctl, rta_ctl_flags, 28);
+SIZE_CHECK(struct rogue_fwif_rta_ctl, 32);
+
+OFFSET_CHECK(struct rogue_fwif_freelist, freelist_dev_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_freelist, current_dev_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_freelist, current_stack_top, 16);
+OFFSET_CHECK(struct rogue_fwif_freelist, max_pages, 20);
+OFFSET_CHECK(struct rogue_fwif_freelist, grow_pages, 24);
+OFFSET_CHECK(struct rogue_fwif_freelist, current_pages, 28);
+OFFSET_CHECK(struct rogue_fwif_freelist, allocated_page_count, 32);
+OFFSET_CHECK(struct rogue_fwif_freelist, allocated_mmu_page_count, 36);
+OFFSET_CHECK(struct rogue_fwif_freelist, freelist_id, 40);
+OFFSET_CHECK(struct rogue_fwif_freelist, grow_pending, 44);
+OFFSET_CHECK(struct rogue_fwif_freelist, ready_pages, 48);
+OFFSET_CHECK(struct rogue_fwif_freelist, freelist_flags, 52);
+OFFSET_CHECK(struct rogue_fwif_freelist, pm_global_pb, 56);
+SIZE_CHECK(struct rogue_fwif_freelist, 64);
+
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, geom_caches_need_zeroing, 0);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, screen_pixel_max, 4);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, multi_sample_ctl, 8);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, flipped_multi_sample_ctl, 16);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, tpc_stride, 24);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, tpc_size, 28);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_screen, 32);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, mtile_stride, 36);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, teaa, 40);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_mtile1, 44);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_mtile2, 48);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_lower_x, 52);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_lower_y, 56);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_upper_x, 60);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_upper_y, 64);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_scale_x, 68);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_scale_y, 72);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, rgn_header_size, 76);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_mtile_size, 80);
+SIZE_CHECK(struct rogue_fwif_hwrtdata_common, 88);
+
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_mlist_dev_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, vce_cat_base, 8);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, vce_last_cat_base, 40);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, te_cat_base, 72);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, te_last_cat_base, 104);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, alist_cat_base, 136);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, alist_last_cat_base, 144);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_alist_stack_pointer, 152);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_mlist_stack_pointer, 160);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, hwrt_data_common_fw_addr, 164);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, hwrt_data_flags, 168);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, state, 172);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, freelists_fw_addr, 176);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, freelist_hwr_snapshot, 188);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, vheap_table_dev_addr, 200);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, cleanup_state, 208);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, rta_ctl, 216);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, tail_ptrs_dev_addr, 248);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, macrotile_array_dev_addr, 256);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, rgn_header_dev_addr, 264);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, rtc_dev_addr, 272);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, owner_geom_not_used_by_host, 280);
+OFFSET_CHECK(struct rogue_fwif_hwrtdata, geom_caches_need_zeroing, 284);
+SIZE_CHECK(struct rogue_fwif_hwrtdata, 288);
+
+OFFSET_CHECK(struct rogue_fwif_sync_checkpoint, state, 0);
+OFFSET_CHECK(struct rogue_fwif_sync_checkpoint, fw_ref_count, 4);
+SIZE_CHECK(struct rogue_fwif_sync_checkpoint, 8);
+
+#endif /* __PVR_ROGUE_FWIF_CHECK_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h
new file mode 100644
index 000000000000..40a362cfaf32
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h
@@ -0,0 +1,369 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_FWIF_CLIENT_H__
+#define __PVR_ROGUE_FWIF_CLIENT_H__
+
+#include <linux/bits.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "pvr_rogue_fwif_shared.h"
+
+/*
+ * Page size used for Parameter Management.
+ */
+#define ROGUE_PM_PAGE_SIZE SZ_4K
+
+/*
+ * Minimum/Maximum PB size.
+ *
+ * Base page size is dependent on core:
+ *   S6/S6XT/S7               = 50 pages
+ *   S8XE                     = 40 pages
+ *   S8XE with BRN66011 fixed = 25 pages
+ *
+ * Minimum PB = Base Pages + (NUM_TE_PIPES-1)*16K + (NUM_VCE_PIPES-1)*64K +
+ *              IF_PM_PREALLOC(NUM_TE_PIPES*16K + NUM_VCE_PIPES*16K)
+ *
+ * Maximum PB size must ensure that no PM address space can be fully used,
+ * because if the full address space was used it would wrap and corrupt itself.
+ * Since there are two freelists (local is always minimum sized) this can be
+ * described as following three conditions being met:
+ *
+ *   (Minimum PB + Maximum PB)  <  ALIST PM address space size (16GB)
+ *   (Minimum PB + Maximum PB)  <  TE PM address space size (16GB) / NUM_TE_PIPES
+ *   (Minimum PB + Maximum PB)  <  VCE PM address space size (16GB) / NUM_VCE_PIPES
+ *
+ * Since the max of NUM_TE_PIPES and NUM_VCE_PIPES is 4, we have a hard limit
+ * of 4GB minus the Minimum PB. For convenience we take the smaller power-of-2
+ * value of 2GB. This is far more than any current applications use.
+ */
+#define ROGUE_PM_MAX_FREELIST_SIZE SZ_2G
+
+/*
+ * Flags supported by the geometry DM command i.e. &struct rogue_fwif_cmd_geom.
+ */
+
+#define ROGUE_GEOM_FLAGS_FIRSTKICK BIT_MASK(0)
+#define ROGUE_GEOM_FLAGS_LASTKICK BIT_MASK(1)
+/* Use single core in a multi core setup. */
+#define ROGUE_GEOM_FLAGS_SINGLE_CORE BIT_MASK(3)
+
+/*
+ * Flags supported by the fragment DM command i.e. &struct rogue_fwif_cmd_frag.
+ */
+
+/* Use single core in a multi core setup. */
+#define ROGUE_FRAG_FLAGS_SINGLE_CORE BIT_MASK(3)
+/* Indicates whether this render produces visibility results. */
+#define ROGUE_FRAG_FLAGS_GET_VIS_RESULTS BIT_MASK(5)
+/* Indicates whether a depth buffer is present. */
+#define ROGUE_FRAG_FLAGS_DEPTHBUFFER BIT_MASK(7)
+/* Indicates whether a stencil buffer is present. */
+#define ROGUE_FRAG_FLAGS_STENCILBUFFER BIT_MASK(8)
+/* Disallow compute overlapped with this render. */
+#define ROGUE_FRAG_FLAGS_PREVENT_CDM_OVERLAP BIT_MASK(26)
+
+/*
+ * Flags supported by the compute DM command i.e. &struct rogue_fwif_cmd_compute.
+ */
+
+#define ROGUE_COMPUTE_FLAG_PREVENT_ALL_OVERLAP BIT_MASK(2)
+/*!< Use single core in a multi core setup. */
+#define ROGUE_COMPUTE_FLAG_SINGLE_CORE BIT_MASK(5)
+
+/*
+ * Flags supported by the transfer DM command i.e. &struct rogue_fwif_cmd_transfer.
+ */
+
+/*!< Use single core in a multi core setup. */
+#define ROGUE_TRANSFER_FLAGS_SINGLE_CORE BIT_MASK(1)
+
+/*
+ ************************************************
+ * Parameter/HWRTData control structures.
+ ************************************************
+ */
+
+/*
+ * Configuration registers which need to be loaded by the firmware before a geometry
+ * job can be started.
+ */
+struct rogue_fwif_geom_regs {
+	u64 vdm_ctrl_stream_base;
+	u64 tpu_border_colour_table;
+
+	/* Only used when feature VDM_DRAWINDIRECT present. */
+	u64 vdm_draw_indirect0;
+	/* Only used when feature VDM_DRAWINDIRECT present. */
+	u32 vdm_draw_indirect1;
+
+	u32 ppp_ctrl;
+	u32 te_psg;
+	/* Only used when BRN 49927 present. */
+	u32 tpu;
+
+	u32 vdm_context_resume_task0_size;
+	/* Only used when feature VDM_OBJECT_LEVEL_LLS present. */
+	u32 vdm_context_resume_task3_size;
+
+	/* Only used when BRN 56279 or BRN 67381 present. */
+	u32 pds_ctrl;
+
+	u32 view_idx;
+
+	/* Only used when feature TESSELLATION present */
+	u32 pds_coeff_free_prog;
+
+	u32 padding;
+};
+
+/* Only used when BRN 44455 or BRN 63027 present. */
+struct rogue_fwif_dummy_rgnhdr_init_geom_regs {
+	u64 te_psgregion_addr;
+};
+
+/*
+ * Represents a geometry command that can be used to tile a whole scene's objects as
+ * per TA behavior.
+ */
+struct rogue_fwif_cmd_geom {
+	/*
+	 * rogue_fwif_cmd_geom_frag_shared field must always be at the beginning of the
+	 * struct.
+	 *
+	 * The command struct (rogue_fwif_cmd_geom) is shared between Client and
+	 * Firmware. Kernel is unable to perform read/write operations on the
+	 * command struct, the SHARED region is the only exception from this rule.
+	 * This region must be the first member so that Kernel can easily access it.
+	 * For more info, see rogue_fwif_cmd_geom_frag_shared definition.
+	 */
+	struct rogue_fwif_cmd_geom_frag_shared cmd_shared;
+
+	struct rogue_fwif_geom_regs regs __aligned(8);
+	u32 flags __aligned(8);
+
+	/*
+	 * Holds the geometry/fragment fence value to allow the fragment partial render command
+	 * to go through.
+	 */
+	struct rogue_fwif_ufo partial_render_geom_frag_fence;
+
+	/* Only used when BRN 44455 or BRN 63027 present. */
+	struct rogue_fwif_dummy_rgnhdr_init_geom_regs dummy_rgnhdr_init_geom_regs __aligned(8);
+
+	/* Only used when BRN 61484 or BRN 66333 present. */
+	u32 brn61484_66333_live_rt;
+
+	u32 padding;
+};
+
+/*
+ * Configuration registers which need to be loaded by the firmware before ISP
+ * can be started.
+ */
+struct rogue_fwif_frag_regs {
+	u32 usc_pixel_output_ctrl;
+
+#define ROGUE_MAXIMUM_OUTPUT_REGISTERS_PER_PIXEL 8U
+	u32 usc_clear_register[ROGUE_MAXIMUM_OUTPUT_REGISTERS_PER_PIXEL];
+
+	u32 isp_bgobjdepth;
+	u32 isp_bgobjvals;
+	u32 isp_aa;
+	/* Only used when feature S7_TOP_INFRASTRUCTURE present. */
+	u32 isp_xtp_pipe_enable;
+
+	u32 isp_ctl;
+
+	/* Only used when BRN 49927 present. */
+	u32 tpu;
+
+	u32 event_pixel_pds_info;
+
+	/* Only used when feature CLUSTER_GROUPING present. */
+	u32 pixel_phantom;
+
+	u32 view_idx;
+
+	u32 event_pixel_pds_data;
+
+	/* Only used when BRN 65101 present. */
+	u32 brn65101_event_pixel_pds_data;
+
+	/* Only used when feature GPU_MULTICORE_SUPPORT or BRN 47217 present. */
+	u32 isp_oclqry_stride;
+
+	/* Only used when feature ZLS_SUBTILE present. */
+	u32 isp_zls_pixels;
+
+	/* Only used when feature ISP_ZLS_D24_S8_PACKING_OGL_MODE present. */
+	u32 rgx_cr_blackpearl_fix;
+
+	/* All values below the ALIGN(8) must be 64 bit. */
+	aligned_u64 isp_scissor_base;
+	u64 isp_dbias_base;
+	u64 isp_oclqry_base;
+	u64 isp_zlsctl;
+	u64 isp_zload_store_base;
+	u64 isp_stencil_load_store_base;
+
+	/*
+	 * Only used when feature FBCDC_ALGORITHM present and value < 3 or feature
+	 * FB_CDC_V4 present. Additionally, BRNs 48754, 60227, 72310 and 72311 must
+	 * not be present.
+	 */
+	u64 fb_cdc_zls;
+
+#define ROGUE_PBE_WORDS_REQUIRED_FOR_RENDERS 3U
+	u64 pbe_word[8U][ROGUE_PBE_WORDS_REQUIRED_FOR_RENDERS];
+	u64 tpu_border_colour_table;
+	u64 pds_bgnd[3U];
+
+	/* Only used when BRN 65101 present. */
+	u64 pds_bgnd_brn65101[3U];
+
+	u64 pds_pr_bgnd[3U];
+
+	/* Only used when BRN 62850 or 62865 present. */
+	u64 isp_dummy_stencil_store_base;
+
+	/* Only used when BRN 66193 present. */
+	u64 isp_dummy_depth_store_base;
+
+	/* Only used when BRN 67182 present. */
+	u32 rgnhdr_single_rt_size;
+	/* Only used when BRN 67182 present. */
+	u32 rgnhdr_scratch_offset;
+};
+
+struct rogue_fwif_cmd_frag {
+	struct rogue_fwif_cmd_geom_frag_shared cmd_shared __aligned(8);
+
+	struct rogue_fwif_frag_regs regs __aligned(8);
+	/* command control flags. */
+	u32 flags;
+	/* Stride IN BYTES for Z-Buffer in case of RTAs. */
+	u32 zls_stride;
+	/* Stride IN BYTES for S-Buffer in case of RTAs. */
+	u32 sls_stride;
+
+	/* Only used if feature GPU_MULTICORE_SUPPORT present. */
+	u32 execute_count;
+};
+
+/*
+ * Configuration registers which need to be loaded by the firmware before CDM
+ * can be started.
+ */
+struct rogue_fwif_compute_regs {
+	u64 tpu_border_colour_table;
+
+	/* Only used when feature CDM_USER_MODE_QUEUE present. */
+	u64 cdm_cb_queue;
+
+	/* Only used when feature CDM_USER_MODE_QUEUE present. */
+	u64 cdm_cb_base;
+	/* Only used when feature CDM_USER_MODE_QUEUE present. */
+	u64 cdm_cb;
+
+	/* Only used when feature CDM_USER_MODE_QUEUE is not present. */
+	u64 cdm_ctrl_stream_base;
+
+	u64 cdm_context_state_base_addr;
+
+	/* Only used when BRN 49927 is present. */
+	u32 tpu;
+	u32 cdm_resume_pds1;
+
+	/* Only used when feature COMPUTE_MORTON_CAPABLE present. */
+	u32 cdm_item;
+
+	/* Only used when feature CLUSTER_GROUPING present. */
+	u32 compute_cluster;
+
+	/* Only used when feature TPU_DM_GLOBAL_REGISTERS present. */
+	u32 tpu_tag_cdm_ctrl;
+
+	u32 padding;
+};
+
+struct rogue_fwif_cmd_compute {
+	/* Common command attributes */
+	struct rogue_fwif_cmd_common common __aligned(8);
+
+	/* CDM registers */
+	struct rogue_fwif_compute_regs regs;
+
+	/* Control flags */
+	u32 flags __aligned(8);
+
+	/* Only used when feature UNIFIED_STORE_VIRTUAL_PARTITIONING present. */
+	u32 num_temp_regions;
+
+	/* Only used when feature CDM_USER_MODE_QUEUE present. */
+	u32 stream_start_offset;
+
+	/* Only used when feature GPU_MULTICORE_SUPPORT present. */
+	u32 execute_count;
+};
+
+struct rogue_fwif_transfer_regs {
+	/*
+	 * All 32 bit values should be added in the top section. This then requires only a
+	 * single RGXFW_ALIGN to align all the 64 bit values in the second section.
+	 */
+	u32 isp_bgobjvals;
+
+	u32 usc_pixel_output_ctrl;
+	u32 usc_clear_register0;
+	u32 usc_clear_register1;
+	u32 usc_clear_register2;
+	u32 usc_clear_register3;
+
+	u32 isp_mtile_size;
+	u32 isp_render_origin;
+	u32 isp_ctl;
+
+	/* Only used when feature S7_TOP_INFRASTRUCTURE present. */
+	u32 isp_xtp_pipe_enable;
+	u32 isp_aa;
+
+	u32 event_pixel_pds_info;
+
+	u32 event_pixel_pds_code;
+	u32 event_pixel_pds_data;
+
+	u32 isp_render;
+	u32 isp_rgn;
+
+	/* Only used when feature GPU_MULTICORE_SUPPORT present. */
+	u32 frag_screen;
+
+	/* All values below the aligned_u64 must be 64 bit. */
+	aligned_u64 pds_bgnd0_base;
+	u64 pds_bgnd1_base;
+	u64 pds_bgnd3_sizeinfo;
+
+	u64 isp_mtile_base;
+#define ROGUE_PBE_WORDS_REQUIRED_FOR_TQS 3
+	/* TQ_MAX_RENDER_TARGETS * PBE_STATE_SIZE */
+	u64 pbe_wordx_mrty[3U * ROGUE_PBE_WORDS_REQUIRED_FOR_TQS];
+};
+
+struct rogue_fwif_cmd_transfer {
+	/* Common command attributes */
+	struct rogue_fwif_cmd_common common __aligned(8);
+
+	struct rogue_fwif_transfer_regs regs __aligned(8);
+
+	u32 flags;
+
+	u32 padding;
+};
+
+#include "pvr_rogue_fwif_client_check.h"
+
+#endif /* __PVR_ROGUE_FWIF_CLIENT_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h
new file mode 100644
index 000000000000..361aaffdaa44
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h
@@ -0,0 +1,133 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_FWIF_CLIENT_CHECK_H__
+#define __PVR_ROGUE_FWIF_CLIENT_CHECK_H__
+
+#include <linux/build_bug.h>
+
+#define OFFSET_CHECK(type, member, offset) \
+	static_assert(offsetof(type, member) == offset, "offsetof(" #type ", " #member ") incorrect");
+
+#define SIZE_CHECK(type, size) \
+	static_assert(sizeof(type) == size, #type " is incorrect size");
+
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_ctrl_stream_base, 0);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, tpu_border_colour_table, 8);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_draw_indirect0, 16);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_draw_indirect1, 24);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, ppp_ctrl, 28);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, te_psg, 32);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, tpu, 36);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_context_resume_task0_size, 40);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_context_resume_task3_size, 44);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, pds_ctrl, 48);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, view_idx, 52);
+OFFSET_CHECK(struct rogue_fwif_geom_regs, pds_coeff_free_prog, 56);
+SIZE_CHECK(struct rogue_fwif_geom_regs, 64);
+
+OFFSET_CHECK(struct rogue_fwif_dummy_rgnhdr_init_geom_regs, te_psgregion_addr, 0);
+SIZE_CHECK(struct rogue_fwif_dummy_rgnhdr_init_geom_regs, 8);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, cmd_shared, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, regs, 16);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, flags, 80);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, partial_render_geom_frag_fence, 84);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, dummy_rgnhdr_init_geom_regs, 96);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom, brn61484_66333_live_rt, 104);
+SIZE_CHECK(struct rogue_fwif_cmd_geom, 112);
+
+OFFSET_CHECK(struct rogue_fwif_frag_regs, usc_pixel_output_ctrl, 0);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, usc_clear_register, 4);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_bgobjdepth, 36);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_bgobjvals, 40);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_aa, 44);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_xtp_pipe_enable, 48);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_ctl, 52);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, tpu, 56);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, event_pixel_pds_info, 60);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pixel_phantom, 64);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, view_idx, 68);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, event_pixel_pds_data, 72);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, brn65101_event_pixel_pds_data, 76);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_oclqry_stride, 80);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zls_pixels, 84);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, rgx_cr_blackpearl_fix, 88);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_scissor_base, 96);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dbias_base, 104);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_oclqry_base, 112);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zlsctl, 120);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zload_store_base, 128);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_stencil_load_store_base, 136);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, fb_cdc_zls, 144);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pbe_word, 152);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, tpu_border_colour_table, 344);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_bgnd, 352);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_bgnd_brn65101, 376);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_pr_bgnd, 400);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dummy_stencil_store_base, 424);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dummy_depth_store_base, 432);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, rgnhdr_single_rt_size, 440);
+OFFSET_CHECK(struct rogue_fwif_frag_regs, rgnhdr_scratch_offset, 444);
+SIZE_CHECK(struct rogue_fwif_frag_regs, 448);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, cmd_shared, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, regs, 16);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, flags, 464);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, zls_stride, 468);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, sls_stride, 472);
+OFFSET_CHECK(struct rogue_fwif_cmd_frag, execute_count, 476);
+SIZE_CHECK(struct rogue_fwif_cmd_frag, 480);
+
+
+OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu_border_colour_table, 0);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb_queue, 8);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb_base, 16);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb, 24);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_ctrl_stream_base, 32);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_context_state_base_addr, 40);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu, 48);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_resume_pds1, 52);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_item, 56);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, compute_cluster, 60);
+OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu_tag_cdm_ctrl, 64);
+SIZE_CHECK(struct rogue_fwif_compute_regs, 72);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, common, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, regs, 8);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, flags, 80);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, num_temp_regions, 84);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, stream_start_offset, 88);
+OFFSET_CHECK(struct rogue_fwif_cmd_compute, execute_count, 92);
+SIZE_CHECK(struct rogue_fwif_cmd_compute, 96);
+
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_bgobjvals, 0);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_pixel_output_ctrl, 4);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register0, 8);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register1, 12);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register2, 16);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register3, 20);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_mtile_size, 24);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_render_origin, 28);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_ctl, 32);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_xtp_pipe_enable, 36);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_aa, 40);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_info, 44);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_code, 48);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_data, 52);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_render, 56);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_rgn, 60);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, frag_screen, 64);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd0_base, 72);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd1_base, 80);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd3_sizeinfo, 88);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_mtile_base, 96);
+OFFSET_CHECK(struct rogue_fwif_transfer_regs, pbe_wordx_mrty, 104);
+SIZE_CHECK(struct rogue_fwif_transfer_regs, 176);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_transfer, common, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_transfer, regs, 8);
+OFFSET_CHECK(struct rogue_fwif_cmd_transfer, flags, 184);
+SIZE_CHECK(struct rogue_fwif_cmd_transfer, 192);
+
+#endif /* __PVR_ROGUE_FWIF_CLIENT_CHECK_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h
new file mode 100644
index 000000000000..c18e91a464e6
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h
@@ -0,0 +1,60 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_FWIF_COMMON_H__
+#define __PVR_ROGUE_FWIF_COMMON_H__
+
+#include <linux/build_bug.h>
+
+/*
+ * This macro represents a mask of LSBs that must be zero on data structure
+ * sizes and offsets to ensure they are 8-byte granular on types shared between
+ * the FW and host driver.
+ */
+#define PVR_FW_ALIGNMENT_LSB 7U
+
+/* Macro to test structure size alignment. */
+#define PVR_FW_STRUCT_SIZE_ASSERT(_a)                            \
+	static_assert((sizeof(_a) & PVR_FW_ALIGNMENT_LSB) == 0U, \
+		      "Size of " #_a " is not properly aligned")
+
+/* The master definition for data masters known to the firmware. */
+
+#define PVR_FWIF_DM_GP (0)
+/* Either TDM or 2D DM is present. */
+/* When the 'tla' feature is present in the hw (as per @pvr_device_features). */
+#define PVR_FWIF_DM_2D (1)
+/*
+ * When the 'fastrender_dm' feature is present in the hw (as per
+ * @pvr_device_features).
+ */
+#define PVR_FWIF_DM_TDM (1)
+
+#define PVR_FWIF_DM_GEOM (2)
+#define PVR_FWIF_DM_FRAG (3)
+#define PVR_FWIF_DM_CDM (4)
+#define PVR_FWIF_DM_RAY (5)
+#define PVR_FWIF_DM_GEOM2 (6)
+#define PVR_FWIF_DM_GEOM3 (7)
+#define PVR_FWIF_DM_GEOM4 (8)
+
+#define PVR_FWIF_DM_LAST PVR_FWIF_DM_GEOM4
+
+/* Maximum number of DM in use: GP, 2D/TDM, GEOM, 3D, CDM, RAY, GEOM2, GEOM3, GEOM4 */
+#define PVR_FWIF_DM_MAX (PVR_FWIF_DM_LAST + 1U)
+
+/* GPU Utilisation states */
+#define PVR_FWIF_GPU_UTIL_STATE_IDLE 0U
+#define PVR_FWIF_GPU_UTIL_STATE_ACTIVE 1U
+#define PVR_FWIF_GPU_UTIL_STATE_BLOCKED 2U
+#define PVR_FWIF_GPU_UTIL_STATE_NUM 3U
+#define PVR_FWIF_GPU_UTIL_STATE_MASK 0x3ULL
+
+/*
+ * Maximum amount of register writes that can be done by the register
+ * programmer (FW or META DMA). This is not a HW limitation, it is only
+ * a protection against malformed inputs to the register programmer.
+ */
+#define PVR_MAX_NUM_REGISTER_PROGRAMMER_WRITES 128U
+
+#endif /* __PVR_ROGUE_FWIF_COMMON_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h
new file mode 100644
index 000000000000..3e161827e150
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h
@@ -0,0 +1,29 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_FWIF_RESETFRAMEWORK_H__
+#define __PVR_ROGUE_FWIF_RESETFRAMEWORK_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#include "pvr_rogue_fwif_shared.h"
+
+struct rogue_fwif_rf_registers {
+	union
+	{
+		u64 cdmreg_cdm_cb_base;
+		u64 cdmreg_cdm_ctrl_stream_base;
+	};
+	u64 cdmreg_cdm_cb_queue;
+	u64 cdmreg_cdm_cb;
+};
+
+struct rogue_fwif_rf_cmd {
+	/* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */
+	struct rogue_fwif_rf_registers fw_registers __aligned(8);
+};
+
+#define ROGUE_FWIF_RF_CMD_SIZE sizeof(struct rogue_fwif_rf_cmd)
+
+#endif /* __PVR_ROGUE_FWIF_RESETFRAMEWORK_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
new file mode 100644
index 000000000000..604e3dafa589
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
@@ -0,0 +1,890 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_FWIF_SF_H__
+#define __PVR_ROGUE_FWIF_SF_H__
+
+/*
+ ******************************************************************************
+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you
+ *           WILL BREAK fw tracing message compatibility with previous
+ *           fw versions. Only add new ones, if so required.
+ ******************************************************************************
+ */
+/* Available log groups. */
+#define ROGUE_FW_LOG_SFGROUPLIST       \
+	X(ROGUE_FW_GROUP_NULL, NULL)        \
+	X(ROGUE_FW_GROUP_MAIN, MAIN)        \
+	X(ROGUE_FW_GROUP_CLEANUP, CLEANUP)  \
+	X(ROGUE_FW_GROUP_CSW, CSW)          \
+	X(ROGUE_FW_GROUP_PM, PM)           \
+	X(ROGUE_FW_GROUP_RTD, RTD)          \
+	X(ROGUE_FW_GROUP_SPM, SPM)          \
+	X(ROGUE_FW_GROUP_MTS, MTS)          \
+	X(ROGUE_FW_GROUP_BIF, BIF)          \
+	X(ROGUE_FW_GROUP_MISC, MISC)        \
+	X(ROGUE_FW_GROUP_POW, POW)          \
+	X(ROGUE_FW_GROUP_HWR, HWR)          \
+	X(ROGUE_FW_GROUP_HWP, HWP)          \
+	X(ROGUE_FW_GROUP_RPM, RPM)          \
+	X(ROGUE_FW_GROUP_DMA, DMA)          \
+	X(ROGUE_FW_GROUP_DBG, DBG)
+
+enum rogue_fw_log_sfgroups {
+#define X(A, B) A,
+	ROGUE_FW_LOG_SFGROUPLIST
+#undef X
+};
+
+#define PVR_SF_STRING_MAX_SIZE 256U
+
+/* pair of string format id and string formats */
+struct rogue_fw_stid_fmt {
+	u32 id;
+	char name[PVR_SF_STRING_MAX_SIZE];
+};
+
+/* pair of string format id and string formats */
+struct rogue_km_stid_fmt {
+	u32 id;
+	const char *name;
+};
+
+/*
+ * Table of String Format specifiers, the group they belong and the number of
+ * arguments each expects. Xmacro styled macros are used to generate what is
+ * needed without requiring hand editing.
+ *
+ * id       : id within a group
+ * gid      : group id
+ * Sym name : name of enumerations used to identify message strings
+ * String   : Actual string
+ * #args    : number of arguments the string format requires
+ */
+#define ROGUE_FW_LOG_SFIDLIST \
+/*id, gid,              id name,        string,                           # arguments */ \
+X(  0, ROGUE_FW_GROUP_NULL, ROGUE_FW_SF_FIRST, "You should not use this string", 0) \
+\
+X(  1, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d", 6) \
+X(  2, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%x, HWRTData1State=%x", 2) \
+X(  3, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d, prio: %d", 4) \
+X(  4, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished", 0) \
+X(  5, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8x @ %d, prio: %d", 3) \
+X(  6, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_COMPUTE_FINISHED, "Compute finished", 0) \
+X(  7, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d", 7) \
+X(  8, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TA_FINISHED, "TA finished", 0) \
+X(  9, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render", 0) \
+X( 10, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render", 0) \
+X( 11, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x", 2) \
+X( 12, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8x @ %d, prio:%d", 3) \
+X( 13, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TLA_FINISHED, "TLA finished", 0) \
+X( 14, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8x", 3) \
+X( 16, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx 0x%08.8x @ %d", 2) \
+X( 17, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_CHECK, "UFO Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \
+X( 18, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded", 0) \
+X( 19, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \
+X( 20, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx 0x%08.8x", 1) \
+X( 21, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= ????????, [0x%08.8x] is ???????? requires 0x%08.8x", 4) \
+X( 22, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx 0x%08.8x @ %d", 2) \
+X( 23, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \
+X( 24, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %d of:", 1) \
+X( 25, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \
+X( 26, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \
+X( 27, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW", 0) \
+X( 28, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.", 0) \
+X( 29, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u", 1) \
+X( 30, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_ALIGN_FAILED, "Alignment check %d failed: host = 0x%x, fw = 0x%x", 3) \
+X( 31, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered", 0) \
+X( 32, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler", 2) \
+X( 33, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8x", 1) \
+X( 34, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_BREAKPOINT_STORE, "Store breakpoint state", 0) \
+X( 35, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_BREAKPOINT_UNSET, "Unsetting BP Registers", 0) \
+X( 36, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_NONZERO_RT, "Active RTs expected to be zero, actually %u", 1) \
+X( 37, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_RTC_PRESENT, "RTC present, %u active render targets", 1) \
+X( 38, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_EST_POWER_DEPRECATED, "Estimated Power 0x%x", 1) \
+X( 39, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_RTA_TARGET, "RTA render target %u", 1) \
+X( 40, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u", 2) \
+X( 41, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWR_SIZES_CHECK_DEPRECATED, "HWR sizes check %d failed: addresses = %d, sizes = %d", 3) \
+X( 42, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%x", 1) \
+X( 43, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %d, Units: 0x%08.8x", 2) \
+X( 44, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %d to %d", 2) \
+X( 45, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down", 0) \
+X( 46, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)", 2) \
+X( 47, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)", 0) \
+X( 48, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)", 0) \
+X( 49, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%d FWCtx: 0x%08.8x", 2) \
+X( 50, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d", 7) \
+X( 51, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%d checker: MList[%d] = 0x%08x", 3) \
+X( 52, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%d OK", 1) \
+X( 53, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%d is empty", 1) \
+X( 54, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%d checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%d", 8) \
+X( 55, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_3D_40480KICK, "3D OQ flush kick", 0) \
+X( 56, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device", 1) \
+X( 57, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_BREAKPOINT_SET_DEPRECATED2, "Setting breakpoint: Addr 0x%08.8x DM%u", 2) \
+X( 58, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d", 3) \
+X( 59, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_RTU_FINISHED_DEPRECATED, "RDM finished on context %u", 1) \
+X( 60, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8x @ %d, prio: %d", 3) \
+X( 61, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SHG_FINISHED_DEPRECATED, "SHG finished", 0) \
+X( 62, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_FBA_FINISHED_DEPRECATED, "FBA finished on context %u", 1) \
+X( 63, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed", 0) \
+X( 64, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KILLDM_START, "Kill DM%d start", 1) \
+X( 65, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%d complete", 1) \
+X( 66, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_FC_CCB_UPDATE_DEPRECATED, "FC%u cCCB Woff update = %u", 2) \
+X( 67, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \
+X( 68, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_GPU_INIT, "GPU init", 0) \
+X( 69, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UNITS_INIT, "GPU Units init (# mask: 0x%x)", 1) \
+X( 70, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_REGTIMES, "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d", 3) \
+X( 71, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x", 3) \
+X( 72, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)", 1) \
+X( 73, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.", 0) \
+X( 74, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWR_HIT_LOCKUP_DEPRECATED, "GPU has locked up (see HWR logs for more info)", 0) \
+X( 75, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)", 0) \
+X( 76, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll (see HWR logs)", 0) \
+X( 77, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_DOPPLER_OOM_DEPRECATED, "Doppler out of memory event for FC %u", 1) \
+X( 78, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \
+X( 79, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \
+X( 80, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [0x%08.8x]", 1) \
+X( 81, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx 0x%08.8x @ %d", 2) \
+X( 82, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_RMW_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \
+X( 83, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8x @ %d", 2) \
+X( 84, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_RPM_OOM_DEPRECATED, "RPM Out of memory! Context 0x%08x, SH requestor %d", 2) \
+X( 85, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_RTU_ABORT_DISCARD_DEPRECATED, "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \
+X( 86, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \
+X( 87, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_DEFERRED_WAITING_TURN_DEPRECATED, "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)", 4) \
+X( 88, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \
+X( 89, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \
+X( 90, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \
+X( 91, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %d", 1) \
+X( 92, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %d", 1) \
+X( 93, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %d Hz", 1) \
+X( 94, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \
+X( 95, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SIGNAL_WAIT_FAILURE_DEPRECATED, "Signal check failed, Required Data: 0x%x, Address: 0x%08x%08x", 3) \
+X( 96, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SIGNAL_UPDATE_DEPRECATED, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x", 5) \
+X( 97, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_FWCONTEXT_SIGNALED, "Signalled the previously waiting FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \
+X( 98, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_COMPUTE_STALLED_DEPRECATED, "Compute stalled", 0) \
+X( 99, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_COMPUTE_STALLED, "Compute stalled (Roff = %u, Woff = %u, Size = %u)", 3) \
+X(100, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_COMPUTE_RESUMED_FROM_STALL, "Compute resumed (Roff = %u, Woff = %u, Size = %u)", 3) \
+X(101, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x", 4) \
+X(102, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SIGNAL_UPDATE_OSID_DM_DEPRECATED, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x", 4) \
+X(103, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM_DEPRECATED, "DM: %u signal check failed", 1) \
+X(104, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8x @ %d, prio:%d", 3) \
+X(105, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TDM_FINISHED, "TDM finished", 0) \
+X(106, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TE_PIPE_STATUS_DEPRECATED, "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]:  0x%08x 0x%08x)", 4) \
+X(107, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_BRN_54141_HIT_DEPRECATED, "BRN 54141 HIT", 0) \
+X(108, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA_DEPRECATED, "BRN 54141 Dummy TA kicked", 0) \
+X(109, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_BRN_54141_RESUME_TA_DEPRECATED, "BRN 54141 resume TA", 0) \
+X(110, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_BRN_54141_DOUBLE_HIT_DEPRECATED, "BRN 54141 double hit after applying WA", 0) \
+X(111, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_BRN_54141_DUMMY_TA_VDM_BASE_DEPRECATED, "BRN 54141 Dummy TA VDM base address: 0x%08x%08x", 2) \
+X(112, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%x, Current Data: 0x%x, Address: 0x%08x%08x", 4) \
+X(113, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TDM_BUFFER_STALL_DEPRECATED, "TDM stalled (Roff = %u, Woff = %u)", 2) \
+X(114, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx 0x%08.8x", 1) \
+X(115, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED, "Changing OSid %d's priority from %u to %u", 3) \
+X(116, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed", 0) \
+X(117, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \
+X(118, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \
+X(119, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \
+X(120, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \
+X(121, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \
+X(122, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 6) \
+X(123, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_RTU_DEPRECATED3, "Kick RTU: FWCtx 0x%08.8x @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \
+X(124, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_SHG_DEPRECATED2, "Kick SHG: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \
+X(125, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %d.", 1) \
+X(127, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %d.", 1) \
+X(128, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %d.", 1) \
+X(129, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KILLDM_FAILED, "Kill DM%d failed", 1) \
+X(130, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \
+X(131, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \
+X(132, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SET_HCS_TRIGGERED, "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs).", 1) \
+X(133, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HCS_SET_DEPRECATED, "HCS changed to %d ms", 1) \
+X(134, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UPDATE_TILES_IN_FLIGHT_DEPRECATED, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)", 4) \
+X(135, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SET_TILES_IN_FLIGHT, "  Phantom %d: USCTiles=%d", 2) \
+X(136, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_ISOLATION_CONF_OFF_DEPRECATED, "Isolation grouping is disabled", 0) \
+X(137, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_ISOLATION_CONF_DEPRECATED, "Isolation group configured with a priority threshold of %d", 1) \
+X(138, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_OS_ONLINE_DEPRECATED, "OS %d has come online", 1) \
+X(139, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_OS_OFFLINE_DEPRECATED, "OS %d has gone offline", 1) \
+X(140, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \
+X(141, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TDM_OFFSETS_DEPRECATED, "TDM Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \
+X(142, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes 0, Woff = %u, Size = %u)", 6) \
+X(143, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 5) \
+X(144, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_GPU_DEINIT, "GPU deinit", 0) \
+X(145, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UNITS_DEINIT, "GPU units deinit", 0) \
+X(146, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %d with config flags 0x%08x", 2) \
+X(147, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_LIMIT, "UFO limit exceeded %d/%d", 2) \
+X(148, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store", 0) \
+X(149, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x", 3) \
+X(150, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UNKNOWN_COMMAND_DEPRECATED, "Unknown Command (eCmdType=0x%08x)", 1) \
+X(151, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x", 4) \
+X(152, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x, reason %d", 5) \
+X(153, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TDM_BRN66075_CHECK, "TDM context switch check: Roff %u points to 0x%08x, Match=%u", 3) \
+X(154, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_OS_INIT_CCBS, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x", 6) \
+X(155, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_FWIRQ, "FW IRQ # %u @ %u", 2) \
+X(156, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 3) \
+X(157, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_INVALID_KERNEL_CCB_DEPRECATED, "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x", 3) \
+X(158, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x", 3) \
+X(159, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_FW_FAULT, "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x", 4) \
+X(160, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_BREAKPOINT_INVALID, "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 4) \
+X(161, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_FLUSHINVAL_CMD_INVALID_DEPRECATED, "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x", 3) \
+X(162, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE_DEPRECATED, "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x", 4) \
+X(163, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_INVALID_KCCB_KICK_CMD_DEPRECATED, "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u", 4) \
+X(164, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_FULL_CHPTCCB, "Checkpoint CCB for OSid %u is full, signalling host for full check state (Roff = %u, Woff = %u)", 3) \
+X(165, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_OS_INIT_CCBS_DEPRECATED, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x", 8) \
+X(166, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_OS_STATE_CHANGE, "OSid %d fw state transition request: from %d to %d (0-offline 1-ready 2-active 3-offloading). Status %d (1-ok 0-fail)", 4) \
+X(167, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_STALE_KCCB_CMDS, "OSid %u has %u stale commands in its KCCB", 2) \
+X(168, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TA_VCE_PAUSE, "Applying VCE pause", 0) \
+X(169, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KCCB_UPDATE_RTN_SLOT_DEPRECATED, "OSid %u KCCB slot %u value updated to %u", 3) \
+X(170, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UNKNOWN_KCCB_COMMAND, "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x", 7) \
+X(171, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UNKNOWN_CCB_COMMAND1, "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \
+X(172, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UNKNOWN_CCB_COMMAND2, "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \
+X(173, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for OSid %u with WOff %u", 2) \
+X(174, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for OSid %u, FWCtx 0x%08x", 2) \
+X(175, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from OSid %u: FWCtx 0x%08x, MemCtx 0x%08x", 3) \
+X(176, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_FW_INIT_CONFIG, "Initialised Firmware with config flags 0x%08x and extended config flags 0x%08x", 2) \
+X(177, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_PHR_CONFIG, "Set Periodic Hardware Reset Mode: %d", 1) \
+X(179, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_PHR_TRIG, "PHR mode %d, FW state: 0x%08x, HWR flags: 0x%08x", 3) \
+X(180, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_PHR_RESET_DEPRECATED, "PHR mode %d triggered a reset", 1) \
+X(181, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, Signal Id: %u", 2) \
+X(182, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_DEV_SERIES8_DEPRECATED, "WARNING: Skipping FW KCCB Cmd type %d which is not yet supported on Series8.", 1) \
+X(183, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_INCONSISTENT_MMU_FLAGS, "MMU context cache data NULL, but cache flags=0x%x (sync counter=%u, update value=%u) OSId=%u", 4) \
+X(184, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SLC_FLUSH, "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%d", 5) \
+X(185, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_FBSC_INVAL, "FBSC invalidate for Context Set [0x%08x]: Entry mask 0x%08x%08x.", 3) \
+X(186, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TDM_BRN66284_UPDATE, "TDM context switch check: Roff %u was not valid for kick starting at %u, moving back to %u", 3) \
+X(187, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SPFILTER_UPDATES, "Signal updates: FIFO: %u, Signals: 0x%08x", 2) \
+X(188, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_INVALID_FBSC_CMD, "Invalid FBSC cmd: FWCtx 0x%08x, MemCtx 0x%08x", 2) \
+X(189, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TDM_BRN68497_BLIT, "Insert BRN68497 WA blit after TDM Context store.", 0) \
+X(190, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_PENDING_UFO_UPDATE_START, "UFO Updates for previously finished FWCtx 0x%08.8x", 1) \
+X(191, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_RTC_RTA_PRESENT, "RTC with RTA present, %u active render targets", 1) \
+X(192, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_NULL_RTAS, "Invalid RTA Set-up. The ValidRenderTargets array in RTACtl is Null!", 0) \
+X(193, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_INVALID_COUNTER, "Block 0x%x / Counter 0x%x INVALID and ignored", 2) \
+X(194, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_ECC_FAULT_DEPRECATED, "ECC fault GPU=0x%08x FW=0x%08x", 2) \
+X(195, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_PROCESS_XPU_EVENT, "Processing XPU event on DM = %d", 1) \
+X(196, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_VZ_WDG_TRIGGER, "OSid %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u", 2) \
+X(197, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWR_HIT_LOCKUP, "GPU-%u has locked up (see HWR logs for more info)", 1) \
+X(198, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x)", 3) \
+X(199, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_HWR_HIT_LOCKUP_DM, "GPU has locked up (see HWR logs for more info)", 0) \
+X(200, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_REPROCESS_XPU_EVENTS, "Reprocessing outstanding XPU events from cores 0x%02x", 1) \
+X(201, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SECONDARY_XPU_EVENT, "Secondary XPU event on DM=%d, CoreMask=0x%02x, Raised=0x%02x", 3) \
+X(202, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TDM_OFFSETS, "TDM Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \
+X(203, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled Core %u (Roff = %u, Woff = %u)", 3) \
+X(204, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_COMPUTE_CORE_OFFSETS, "Compute Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \
+X(205, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_COMPUTE_CORE_STALLED, "Compute stalled core %u (Roff = %u, Woff = %u, Size = %u)", 4) \
+X(206, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_UMQ_MISMATCHED_CORE_READ_OFFSET, "User Mode Queue mismatched stream start: Core %u, FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 6) \
+X(207, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TDM_RESUMED_FROM_STALL, "TDM resumed core %u (Roff = %u, Woff = %u)", 3) \
+X(208, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_COMPUTE_CORE_RESUMED_FROM_STALL, "Compute resumed core %u (Roff = %u, Woff = %u, Size = %u)", 4) \
+X(209, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_OS_MTS_PERMISSION_CHANGED, " Updated permission for OSid %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)", 2) \
+X(210, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TEST1, "Mask = 0x%X, mask2 = 0x%X", 2) \
+X(211, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TEST2, "  core %u, reg = %u, mask = 0x%X)", 3) \
+X(212, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_ECC_FAULT_SAFETY_BUS, "ECC fault received from safety bus: 0x%08x", 1) \
+X(213, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SAFETY_WDG_CONFIG, "Safety Watchdog threshold period set to 0x%x clock cycles", 1) \
+X(214, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SAFETY_WDG_TRIGGER, "MTS Safety Event trigged by the safety watchdog.", 0) \
+X(215, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_USC_TASKS_RANGE, "DM%d USC tasks range limit 0 - %d, stride %d", 3) \
+X(216, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_GPU_ECC_FAULT, "ECC fault GPU=0x%08x", 1) \
+X(217, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_GPU_SAFETY_RESET, "GPU Hardware units reset to prevent transient faults.", 0) \
+X(218, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_ABORTCMD, "Kick Abort cmd: FWCtx 0x%08.8x @ %d", 2) \
+X(219, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_RAY_DEPRECATED, "Kick Ray: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7)\
+X(220, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_RAY_FINISHED_DEPRECATED, "Ray finished", 0) \
+X(221, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_FWDATA_INIT_STATUS, "State of firmware's private data at boot time: %d (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X", 2) \
+X(222, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_CFI_TIMEOUT, "CFI Timeout detected (%d increasing to %d)", 2) \
+X(223, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_CFI_TIMEOUT_FBM, "CFI Timeout detected for FBM (%d increasing to %d)", 2) \
+X(224, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_GEOM_OOM_DISALLOWED, "Geom OOM event not allowed", 0) \
+X(225, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing OSid %d's priority from %u to %u; Isolation = %u (0 = off; 1 = on)", 4) \
+X(226, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_SKIP_ALREADY_RUN_GEOM, "Skipping already executed TA FWCtx 0x%08.8x @ %d", 2) \
+X(227, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_ATTEMPT_TO_RUN_AHEAD_GEOM, "Attempt to execute TA FWCtx 0x%08.8x @ %d ahead of time on other GEOM", 2) \
+X(228, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_TDM_DEPRECATED2, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \
+X(229, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_TA_PIPELINE, "Kick TA: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 12) \
+X(230, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_3D_PIPELINE, "Kick 3D: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \
+X(231, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_COMPUTE_PIPELINE, "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 7) \
+X(232, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TDM_FINISHED_PIPELINE, "TDM finished: Kick ID %u ", 1) \
+X(233, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_TA_FINISHED_PIPELINE, "TA finished: Kick ID %u ", 1) \
+X(234, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_3D_FINISHED_PIPELINE, "3D finished: Kick ID %u , HWRTData0State=%x, HWRTData1State=%x", 3) \
+X(235, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_COMPUTE_FINISHED_PIPELINE, "Compute finished: Kick ID %u ", 1) \
+X(236, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_TDM_PIPELINE, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d, Base 0x%08x%08x. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \
+X(237, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_KICK_RAY_PIPELINE, "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8)\
+X(238, ROGUE_FW_GROUP_MAIN, ROGUE_FW_SF_MAIN_RAY_FINISHED_PIPELINE, "Ray finished: Kick ID %u ", 1) \
+\
+X(  1, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %d", 2) \
+X(  2, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u", 1) \
+X(  3, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_IRQ_KICK_DEPRECATED, "Irq Task DM = %u, Breq = %d, SBIrq = 0x%x", 3) \
+X(  4, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u", 1) \
+X(  5, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_KICK_MTS_BG_ALL_DEPRECATED, "Kick MTS Bg task DM=All", 0) \
+X(  6, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%d", 1) \
+X(  7, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %d", 2) \
+X(  8, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x", 2) \
+X(  9, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = 0x%x, cmd = 0x%x", 3) \
+X( 10, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_READYTORUN, "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x", 3) \
+X( 11, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_READYCELLTYPE_DEPRECATED2, "Ready queue debug DM = %u, celltype = %d, OSid = %u", 3) \
+X( 12, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_BG_KICK_DEPRECATED2, "Bg Task DM = %u, counted = %d, OSid = %u", 3) \
+X( 13, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u", 1) \
+X( 14, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.", 0) \
+X( 15, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %d PID = %d.", 7) \
+X( 16, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_KCCBCMD_EXEC_DEPRECATED, "KCCB Slot %u: DM=%u, Cmd=0x%08x, OSid=%u", 4) \
+X( 17, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_KCCBCMD_RTN_VALUE, "KCCB Slot %u: Return value %u", 2) \
+X( 18, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_BG_KICK, "Bg Task OSid = %u", 1) \
+X( 19, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_KCCBCMD_EXEC, "KCCB Slot %u: Cmd=0x%08x, OSid=%u", 3) \
+X( 20, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_IRQ_KICK, "Irq Task (EVENT_STATUS=0x%08x)", 1) \
+X( 21, ROGUE_FW_GROUP_MTS, ROGUE_FW_SF_MTS_VZ_SIDEBAND, "VZ sideband test, kicked with OSid=%u from MTS, OSid for test=%u", 2) \
+\
+X(  1, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned", 1) \
+X(  2, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d", 3) \
+X(  3, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_HWRTD_CLEANUP_DEPRECATED, "HWRTData [0x%08x] for DM=%d, received cleanup request", 2) \
+X(  4, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM_DEPRECATED, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d", 3) \
+X(  5, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy", 2) \
+X(  6, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_HWRTD_CLEANED_DEPRECATED, "HWRTData [0x%08x] HW Context %u cleaned", 2) \
+X(  7, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned", 1) \
+X(  8, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned", 1) \
+X(  9, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d", 3) \
+X( 10, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED2, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d", 4) \
+X( 11, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_HWRFD_CLEANUP_DEPRECATED, "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request", 2) \
+X( 12, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM_DEPRECATED, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d", 3) \
+X( 13, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_HWRFD_BUSY_DEPRECATED, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d", 4) \
+X( 14, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_HWRFD_CLEANED_DEPRECATED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned", 2) \
+X( 15, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_INVALID_REQUEST, "Discarding invalid cleanup request of type 0x%x", 1) \
+X( 16, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_HWRTD_CLEANUP, "Received cleanup request for HWRTData [0x%08x]", 1) \
+X( 17, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context is busy: submitted = %d, executed = %d", 3) \
+X( 18, ROGUE_FW_GROUP_CLEANUP, ROGUE_FW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %d", 3) \
+\
+X(  1, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8x needs resume", 1) \
+X(  2, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_CDM_RESUME_DEPRECATED, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \
+X(  3, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_CDM_SHARED, "CDM FWCtx shared alloc size load 0x%x", 1) \
+X(  4, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_CDM_STORE_COMPLETE, "*** CDM FWCtx store complete", 0) \
+X(  5, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_CDM_STORE_START, "*** CDM FWCtx store start", 0) \
+X(  6, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_CDM_SOFT_RESET, "CDM Soft Reset", 0) \
+X(  7, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_NEEDS_RESUME, "3D FWCtx 0x%08.8x needs resume", 1) \
+X(  8, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_RESUME, "*** 3D FWCtx 0x%08.8x resume", 1) \
+X(  9, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_STORE_COMPLETE, "*** 3D context store complete", 0) \
+X( 10, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED, "3D context store pipe state: 0x%08.8x 0x%08.8x 0x%08.8x", 3) \
+X( 11, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_STORE_START, "*** 3D context store start", 0) \
+X( 12, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_TQ_RESUME, "*** 3D TQ FWCtx 0x%08.8x resume", 1) \
+X( 13, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TA_NEEDS_RESUME, "TA FWCtx 0x%08.8x needs resume", 1) \
+X( 14, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TA_RESUME, "*** TA FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \
+X( 15, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TA_SHARED, "TA context shared alloc size store 0x%x, load 0x%x", 2) \
+X( 16, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TA_STORE_COMPLETE, "*** TA context store complete", 0) \
+X( 17, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TA_STORE_START, "*** TA context store start", 0) \
+X( 18, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED, "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d", 3) \
+X( 19, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u", 2) \
+X( 20, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%d state: 0x%08.8x", 2) \
+X( 21, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%d state: 0x%08.8x", 2) \
+X( 22, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_SHG_NEEDS_RESUME_DEPRECATED, "SHG FWCtx 0x%08.8x needs resume", 1) \
+X( 23, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_SHG_RESUME_DEPRECATED, "*** SHG FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \
+X( 24, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_SHG_SHARED_DEPRECATED, "SHG context shared alloc size store 0x%x, load 0x%x", 2) \
+X( 25, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_SHG_STORE_COMPLETE_DEPRECATED, "*** SHG context store complete", 0) \
+X( 26, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_SHG_STORE_START_DEPRECATED, "*** SHG context store start", 0) \
+X( 27, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %d", 1) \
+X( 28, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.", 0) \
+X( 29, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x, shader state %u", 4) \
+X( 30, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%d->%d)", 2) \
+X( 31, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TA_STORE_52563_HIT_DEPRECATED, "TA context store hit BRN 52563: vertex store tasks outstanding", 0) \
+X( 32, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %d)", 1) \
+X( 33, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TA_STORE_DEFERRED_DEPRECATED, "TA context store deferred due to BRN 54141.", 0) \
+X( 34, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED2, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u", 7) \
+X( 35, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TDM_STORE_START, "*** TDM context store start", 0) \
+X( 36, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete", 0) \
+X( 37, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TDM_STORE_NEEDS_RESUME_DEPRECATED, "TDM context needs resume, header [0x%08.8x, 0x%08.8x]", 2) \
+X( 38, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u", 8) \
+X( 39, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe %2d (%2d) state: 0x%08.8x", 3) \
+X( 40, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe %2d (%2d) state: 0x%08.8x", 3) \
+X( 41, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_STORE_START_VOLCANIC, "*** 3D context store start version %d (1=IPP_TILE, 2=ISP_TILE)", 1) \
+X( 42, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_STORE_PIPE_STATE_VOLCANIC, "3D context store pipe%d state: 0x%08.8x%08x", 3) \
+X( 43, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_RESUME_PIPE_STATE_VOLCANIC, "3D context resume pipe%d state: 0x%08.8x%08x", 3) \
+X( 44, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_RESUME_IPP_STATE,  "3D context resume IPP state: 0x%08.8x%08x", 2) \
+X( 45, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_PIPES_EMPTY, "All 3D pipes empty after ISP tile mode store! IPP_status: 0x%08x", 1) \
+X( 46, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TDM_RESUME_PIPE_STATE_DEPRECATED, "TDM context resume pipe%d state: 0x%08.8x%08x", 3) \
+X( 47, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_3D_LEVEL4_STORE_START, "*** 3D context store start version 4", 0) \
+X( 48, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_RESUME_MULTICORE, "Multicore context resume on DM%d active core mask 0x%04.4x", 2) \
+X( 49, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_STORE_MULTICORE, "Multicore context store on DM%d active core mask 0x%04.4x", 2) \
+X( 50, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_TDM_RESUME_PIPE_STATE, "TDM context resume Core %d, pipe%d state: 0x%08.8x%08x%08x", 5) \
+X( 51, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_RDM_STORE_COMPLETE, "*** RDM FWCtx store complete", 0) \
+X( 52, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_RDM_STORE_START, "*** RDM FWCtx store start", 0) \
+X( 53, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_RDM_NEEDS_RESUME, "RDM FWCtx 0x%08.8x needs resume", 1) \
+X( 54, ROGUE_FW_GROUP_CSW, ROGUE_FW_SF_CSW_RDM_RESUME, "RDM FWCtx 0x%08.8x resume", 1) \
+\
+X(  1, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_ACTIVATE_BIFREQ_DEPRECATED, "Activate MemCtx=0x%08x BIFreq=%d secure=%d", 3) \
+X(  2, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x", 1) \
+X(  3, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_PCREG_ALLOC_DEPRECATED, "Alloc PC reg %d", 1) \
+X(  4, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_PCSET_GRAB, "Grab reg set %d refcount now %d", 2) \
+X(  5, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_PCSET_UNGRAB, "Ungrab reg set %d refcount now %d", 2) \
+X(  6, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_SETUP_REG_BIFREQ_DEPRECATED, "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \
+X(  7, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_TRUST_DEPRECATED, "Trust enabled:%d, for BIFreq=%d", 2) \
+X(  8, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_TILECFG_DEPRECATED, "BIF Tiling Cfg %d base 0x%08x%08x len 0x%08x%08x enable %d stride %d --> 0x%08x%08x", 9) \
+X(  9, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_OSID0, "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now 0x%08x 0x%08x", 4) \
+X( 10, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_OSID1, "Wrote the Value %d to OSID1, Context  %d, Register's contents are now 0x%04x", 3) \
+X( 11, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_OSIDx, "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 7) \
+X( 12, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_MAP_GPU_MEMORY_BIFREQ_DEPRECATED, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u", 5) \
+X( 13, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_UNMAP_GPU_MEMORY, "Unmap GPU memory (event status 0x%x)", 1) \
+X( 14, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_ACTIVATE_DM, "Activate MemCtx=0x%08x DM=%d secure=%d", 3) \
+X( 15, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_SETUP_REG_DM_DEPRECATED, "Setup reg=%d DM=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \
+X( 16, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_MAP_GPU_MEMORY, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u", 4) \
+X( 17, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_TRUST_DM, "Trust enabled:%d, for DM=%d", 2) \
+X( 18, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_MAP_GPU_MEMORY_DM, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, DM %u", 5) \
+X( 19, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_SETUP_REG_DM, "Setup register set=%d DM=%d, PC address=0x%08x%08x, OSid=%u, NewPCRegRequired=%d", 6) \
+X( 20, ROGUE_FW_GROUP_BIF, ROGUE_FW_SF_BIF_PCSET_ALLOC, "Alloc PC set %d as register range [%u - %u]", 3) \
+\
+X(  1, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_GPIO_WRITE, "GPIO write 0x%02x", 1) \
+X(  2, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_GPIO_READ, "GPIO read 0x%02x", 1) \
+X(  3, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_GPIO_ENABLED, "GPIO enabled", 0) \
+X(  4, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_GPIO_DISABLED, "GPIO disabled", 0) \
+X(  5, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_GPIO_STATUS, "GPIO status=%d (0=OK, 1=Disabled)", 1) \
+X(  6, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%d byte(s))", 2) \
+X(  7, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%d byte(s))", 2) \
+X(  8, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_GPIO_AP_TIMEOUT, "GPIO_AP timeout!", 0) \
+X(  9, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)", 1) \
+X( 10, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_GPIO_ALREADY_READ, "GPIO already read 0x%02x", 1) \
+X( 11, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %d available returned %d", 2) \
+X( 12, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %d", 1) \
+X( 13, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %d (after %d ticks)", 2) \
+X( 14, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)", 2) \
+X( 15, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %d in frame", 1) \
+X( 16, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %d is a new frame", 1) \
+X( 17, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)", 1) \
+X( 18, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %d", 1) \
+X( 19, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %d)", 1) \
+X( 20, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %d)", 1) \
+X( 21, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %d)", 1) \
+X( 22, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_TRP_STATE, "TRP state: %d", 1) \
+X( 23, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_TRP_FAILURE, "TRP failure: %d", 1) \
+X( 24, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SW_TRP_STATE, "SW TRP State: %d", 1) \
+X( 25, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_SW_TRP_FAILURE, "SW TRP failure: %d", 1) \
+X( 26, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_HW_KICK, "HW kick event (%u)", 1) \
+X( 27, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_WGP_CHECKSUMS, "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x", 4) \
+X( 28, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_WGP_UNIT_CHECKSUMS, "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x", 6) \
+X( 29, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_HWR_CHECK_REG, "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \
+X( 30, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_HWR_USC_SLOTS_CHECK, "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u", 4) \
+X( 31, ROGUE_FW_GROUP_MISC, ROGUE_FW_SF_MISC_HWR_USC_REG_CHECK, "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \
+\
+X(  1, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_AMLIST, "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)", 10) \
+X(  2, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d", 8) \
+X(  3, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_UFL_3DBASE_DEPRECATED, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \
+X(  4, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_UFL_TABASE_DEPRECATED, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \
+X(  5, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_FL_GROW_COMPLETE_DEPRECATED, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \
+X(  6, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_FL_GROW_DENIED_DEPRECATED, "Grow for freelist ID=0x%08x denied by host", 1) \
+X(  7, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \
+X(  8, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed", 1) \
+X(  9, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d", 2) \
+X( 10, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %d", 2)\
+X( 11, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \
+X( 12, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \
+X( 13, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \
+X( 14, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \
+X( 15, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x", 1) \
+X( 16, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_UFL_SHARED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d", 7) \
+X( 17, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \
+X( 18, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \
+X( 19, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_FL_UPDATE_COMPLETE_VOLCANIC, "Freelist update completed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \
+X( 20, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_FL_UPDATE_FAILED, "Freelist update failed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \
+X( 21, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_UFL_3DBASE_VOLCANIC, "UFL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \
+X( 22, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_UFL_TABASE_VOLCANIC, "UFL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \
+X( 23, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_CHECK_FL_BASEADDR, "Freelist 0x%08x base address from HW: 0x%02x%08x (expected value: 0x%02x%08x)", 5) \
+X( 24, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_ANALYSE_FL_GROW, "Analysis of FL grow: Pause=(%u,%u) Paused+Valid(%u,%u) PMStateBuffer=0x%x", 5) \
+X( 25, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_ATTEMPT_FL_GROW, "Attempt FL grow for FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \
+X( 26, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_DEFER_FL_GROW, "Deferring FL grow for non-loaded FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \
+X( 27, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_UFL_SHARED_ALBIORIX, "Is GEOM: %d, finished: %d (HWRTData = 0x%08x, MemCtx = 0x%08x)", 4) \
+X( 28, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_3D_TIMEOUT, "3D Timeout Now for FWCtx 0x%08.8x", 1) \
+X( 29, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_RECYCLE, "GEOM PM Recycle for FWCtx 0x%08.8x", 1) \
+X( 30, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_PRIMARY_CONFIG, "PM running primary config (Core %d)", 1) \
+X( 31, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_SECONDARY_CONFIG, "PM running secondary config (Core %d)", 1) \
+X( 32, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_TERTIARY_CONFIG, "PM running tertiary config (Core %d)", 1) \
+X( 33, ROGUE_FW_GROUP_PM, ROGUE_FW_SF_PM_QUATERNARY_CONFIG, "PM running quaternary config (Core %d)", 1) \
+\
+X(  1, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_GLL_DYNAMIC_STATUS_DEPRECATED, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \
+X(  2, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_GLL_STATIC_STATUS_DEPRECATED, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \
+X(  3, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_STATE_WAIT_FOR_GROW_DEPRECATED, "RPM request failed. Waiting for freelist grow.", 0) \
+X(  4, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_STATE_ABORT_DEPRECATED, "RPM request failed. Aborting the current frame.", 0) \
+X(  5, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW_DEPRECATED, "RPM waiting for pending grow on freelist 0x%08x", 1) \
+X(  6, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_REQUEST_HOST_GROW_DEPRECATED, "Request freelist grow [0x%08x] current pages %d, grow size %d", 3) \
+X(  7, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_FREELIST_LOAD_DEPRECATED, "Freelist load: SHF = 0x%08x, SHG = 0x%08x", 2) \
+X(  8, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08x.0x%08x", 2) \
+X(  9, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08x.0x%08x", 2) \
+X( 10, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_GROW_FREELIST_DEPRECATED, "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)", 5) \
+X( 11, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_GROW_RESTART_DEPRECATED, "Restarting SHG", 0) \
+X( 12, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_GROW_ABORTED_DEPRECATED, "Grow failed, aborting the current frame.", 0) \
+X( 13, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_ABORT_COMPLETE_DEPRECATED, "RPM abort complete on HWFrameData [0x%08x].", 1) \
+X( 14, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_CLEANUP_NEEDS_ABORT_DEPRECATED, "RPM freelist cleanup [0x%08x] requires abort to proceed.", 1) \
+X( 15, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_RPM_PT_DEPRECATED, "RPM page table base register: 0x%08x.0x%08x", 2) \
+X( 16, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_OOM_ABORT_DEPRECATED, "Issuing RPM abort.", 0) \
+X( 17, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_OOM_TOGGLE_CHECK_FULL_DEPRECATED, "RPM OOM received but toggle bits indicate free pages available", 0) \
+X( 18, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_STATE_HW_TIMEOUT_DEPRECATED, "RPM hardware timeout. Unable to process OOM event.", 0) \
+X( 19, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_SHF_FPL_LOAD_DEPRECATED_DEPRECATED, "SHF FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \
+X( 20, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_SHG_FPL_LOAD_DEPRECATED, "SHG FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \
+X( 21, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_SHF_FPL_STORE_DEPRECATED, "SHF FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \
+X( 22, ROGUE_FW_GROUP_RPM, ROGUE_FW_SF_RPM_SHG_FPL_STORE_DEPRECATED, "SHG FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \
+\
+X(  1, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u", 2) \
+X(  2, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u", 2) \
+X(  3, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d, mmu: %d", 4) \
+X(  4, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_LOADVFP_3D_DEPRECATED, "Loading VFP table 0x%08x%08x for 3D", 2) \
+X(  5, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_LOADVFP_TA_DEPRECATED, "Loading VFP table 0x%08x%08x for TA", 2) \
+X(  6, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \
+X(  7, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store", 0) \
+X(  8, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%d: Load=No, Store=No", 2) \
+X(  9, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No", 2) \
+X( 10, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \
+X( 11, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \
+X( 12, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x", 5) \
+X( 13, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \
+X( 14, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u", 2) \
+X( 15, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u", 2) \
+X( 16, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
+X( 17, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_LOAD_FL_DEPRECATED2, "Load  Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
+X( 18, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_DEBUG_DEPRECATED, "Freelist 0x%x RESET!!!!!!!!", 1) \
+X( 19, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_DEBUG2_DEPRECATED, "Freelist 0x%x stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 5) \
+X( 20, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%x type: %d (0:local,1:global,2:mmu) on HW context %u", 3) \
+X( 21, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)", 1) \
+X( 22, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed", 0) \
+X( 23, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d", 3) \
+X( 24, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)", 3) \
+X( 25, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_TA_RTDATA_BUFFER_ADDRS_DEPRECATED, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 8) \
+X( 26, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_3D_RTDATA_LOADED_DEPRECATED, "3D RTData 0x%08x loaded on HW context %u", 2) \
+X( 27, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)", 4) \
+X( 28, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=0x%x, HWRTData1State=0x%x", 2) \
+X( 29, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d", 3) \
+X( 30, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_STORE_FL, "Store Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
+X( 31, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_LOAD_FL, "Load  Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
+X( 32, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED2, "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 5) \
+X( 33, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 7) \
+X( 34, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 4) \
+X( 35, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_LOAD_FL_V2, "Load  Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u)", 6) \
+X( 36, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_KILLED_TA, "TA RTData 0x%08x marked as killed.", 1) \
+X( 37, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_KILLED_3D, "3D RTData 0x%08x marked as killed.", 1) \
+X( 38, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_KILL_TA_AFTER_RESTART, "RTData 0x%08x will be killed after TA restart.", 1) \
+X( 39, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_RENDERSTATE_RESET, "RTData 0x%08x Render State Buffer 0x%02x%08x will be reset.", 3) \
+X( 40, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_GEOM_RENDERSTATE, "GEOM RTData 0x%08x using Render State Buffer 0x%02x%08x.", 3) \
+X( 41, ROGUE_FW_GROUP_RTD, ROGUE_FW_SF_RTD_FRAG_RENDERSTATE, "FRAG RTData 0x%08x using Render State Buffer 0x%02x%08x.", 3) \
+\
+X(  1, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZLOAD_DEPRECATED, "Force Z-Load for partial render", 0) \
+X(  2, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZSTORE_DEPRECATED, "Force Z-Store for partial render", 0) \
+X(  3, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_3DMEMFREE_LOCAL_DEPRECATED, "3D MemFree: Local FL 0x%08x", 1) \
+X(  4, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_3DMEMFREE_MMU_DEPRECATED, "3D MemFree: MMU FL 0x%08x", 1) \
+X(  5, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_3DMEMFREE_GLOBAL_DEPRECATED, "3D MemFree: Global FL 0x%08x", 1) \
+X(  6, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_OOM_TACMD_DEPRECATED, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x, HardwareSync Fence [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 6) \
+X(  7, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x", 3) \
+X(  8, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_OOM_TACMD_UN_MMU_FL_DEPRECATED, "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x", 5) \
+X(  9, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_PRENDER_AVOIDED_DEPRECATED, "Partial render avoided", 0) \
+X( 10, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_PRENDER_DISCARDED_DEPRECATED, "Partial render discarded", 0) \
+X( 11, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_PRENDER_FINISHED, "Partial Render finished", 0) \
+X( 12, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_OWNER_3DBG_DEPRECATED, "SPM Owner = 3D-BG", 0) \
+X( 13, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_OWNER_3DIRQ_DEPRECATED, "SPM Owner = 3D-IRQ", 0) \
+X( 14, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_OWNER_NONE_DEPRECATED, "SPM Owner = NONE", 0) \
+X( 15, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_OWNER_TABG_DEPRECATED, "SPM Owner = TA-BG", 0) \
+X( 16, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_OWNER_TAIRQ_DEPRECATED, "SPM Owner = TA-IRQ", 0) \
+X( 17, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZSTORE_ADDRESS, "ZStore address 0x%08x%08x", 2) \
+X( 18, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_SSTORE_ADDRESS, "SStore address 0x%08x%08x", 2) \
+X( 19, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZLOAD_ADDRESS, "ZLoad address 0x%08x%08x", 2) \
+X( 20, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_SLOAD_ADDRESS, "SLoad address 0x%08x%08x", 2) \
+X( 21, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_NO_DEFERRED_ZSBUFFER_DEPRECATED, "No deferred ZS Buffer provided", 0) \
+X( 22, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZSBUFFER_POPULATED, "ZS Buffer successfully populated (ID=0x%08x)", 1) \
+X( 23, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZSBUFFER_POP_UNNEEDED_DEPRECATED, "No need to populate ZS Buffer (ID=0x%08x)", 1) \
+X( 24, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZSBUFFER_UNPOPULATED, "ZS Buffer successfully unpopulated (ID=0x%08x)", 1) \
+X( 25, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZSBUFFER_UNPOP_UNNEEDED_DEPRECATED, "No need to unpopulate ZS Buffer (ID=0x%08x)", 1) \
+X( 26, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZSBUFFER_BACKING_REQUEST_DEPRECATED, "Send ZS-Buffer backing request to host (ID=0x%08x)", 1) \
+X( 27, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_DEPRECATED, "Send ZS-Buffer unbacking request to host (ID=0x%08x)", 1) \
+X( 28, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZSBUFFER_BACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \
+X( 29, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \
+X( 30, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)", 1) \
+X( 31, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_SBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for SBuffer to be backed (ID=0x%08x)", 1) \
+X( 32, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_STATE_NONE, "SPM State = none", 0) \
+X( 33, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_STATE_PR_BLOCKED, "SPM State = PR blocked", 0) \
+X( 34, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_STATE_WAIT_FOR_GROW, "SPM State = wait for grow", 0) \
+X( 35, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_STATE_WAIT_FOR_HW, "SPM State = wait for HW", 0) \
+X( 36, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_STATE_PR_RUNNING, "SPM State = PR running", 0) \
+X( 37, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_STATE_PR_AVOIDED, "SPM State = PR avoided", 0) \
+X( 38, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_STATE_PR_EXECUTED, "SPM State = PR executed", 0) \
+X( 39, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_FREELIST_MATCH, "3DMemFree matches freelist 0x%08x (FL type = %u)", 2) \
+X( 40, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDedected flag", 0) \
+X( 41, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_STATE_WAIT_FOR_PENDING_GROW, "Wait for pending grow on Freelist 0x%08x", 1) \
+X( 42, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ZSBUFFER_BACKING_REQUEST_FAILED, "ZS Buffer failed to be populated (ID=0x%08x)", 1) \
+X( 43, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_FL_GROW_DEBUG, "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u", 5) \
+X( 44, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_RESUMED_TA_WITH_SP, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u", 4) \
+X( 45, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ACK_GROW_UPDATE_DEPRECATED, "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u", 5) \
+X( 46, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%d) Buffer provided", 1) \
+X( 47, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_BUFFER_POP_UNNEEDED, "No need to populate PR Buffer (ID=0x%08x)", 1) \
+X( 48, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_BUFFER_UNPOP_UNNEEDED, "No need to unpopulate PR Buffer (ID=0x%08x)", 1) \
+X( 49, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_BUFFER_BACKING_REQUEST, "Send PR Buffer backing request to host (ID=0x%08x)", 1) \
+X( 50, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_BUFFER_UNBACKING_REQUEST, "Send PR Buffer unbacking request to host (ID=0x%08x)", 1) \
+X( 51, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_BUFFER_BACKING_REQUEST_PENDING, "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \
+X( 52, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_BUFFER_UNBACKING_REQUEST_PENDING, "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \
+X( 53, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)", 2) \
+X( 54, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_ACK_GROW_UPDATE, "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u", 4) \
+X( 66, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \
+X( 67, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_RESUMED_TA, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u", 3) \
+X( 68, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_PR_DEADLOCK_UNBLOCKED, "OOM TA/3D PR deadlock unblocked reordering DM%d runlist head from Context 0x%08x to 0x%08x", 3) \
+X( 69, ROGUE_FW_GROUP_SPM, ROGUE_FW_SF_SPM_STATE_PR_FORCEFREE, "SPM State = PR force free", 0) \
+\
+X(  1, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_CHECK_DEPRECATED, "Check Pow state DM%d int: 0x%x, ext: 0x%x, pow flags: 0x%x", 4) \
+X(  2, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_GPU_IDLE, "GPU idle (might be powered down). Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
+X(  3, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_OSREQ_DEPRECATED, "OS requested pow off (forced = %d), DM%d, pow flags: 0x%x", 3) \
+X(  4, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %d %d %d %d", 4) \
+X(  5, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \
+X(  6, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_GPU_OFF, "GPU ready to be powered down. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
+X(  7, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %d, Units: 0x%08.8x", 2) \
+X(  8, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %d (Power flags=%d)", 2) \
+X(  9, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %d to %d", 2) \
+X( 11, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init", 0) \
+X( 12, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: 0x%x)", 1) \
+X( 13, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.", 0) \
+X( 14, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.", 0) \
+X( 15, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \
+X( 16, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %d, TLA-DM Active? %d", 2) \
+X( 17, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_BRN37270_DEPRECATED, "Request power up due to BRN37270. Pow stat int: 0x%x", 1) \
+X( 18, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%x, ext: 0x%x, pow flags: 0x%x", 3) \
+X( 19, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%x", 1) \
+X( 20, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%x", 1) \
+X( 21, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
+X( 22, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
+X( 23, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %dms. Core clock: %d Hz", 2) \
+X( 24, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%x, %d dusts powered.", 2) \
+X( 25, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.", 0) \
+X( 26, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_POWMON_ENERGY, "Power monitor: Estimate of dynamic energy %u", 1) \
+X( 27, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_CHECK_DEPRECATED2, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x", 3) \
+X( 28, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_NEW_DEADLINE, "Proactive DVFS: New deadline, time = 0x%08x%08x", 2) \
+X( 29, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_NEW_WORKLOAD, "Proactive DVFS: New workload, cycles = 0x%08x%08x", 2) \
+X( 30, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_CALCULATE, "Proactive DVFS: Proactive frequency calculated = %u", 1) \
+X( 31, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_UTILISATION, "Proactive DVFS: Reactive utilisation = %u percent", 1) \
+X( 32, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_REACT, "Proactive DVFS: Reactive frequency calculated = %u.%u", 2) \
+X( 33, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_GPIO_SEND_DEPRECATED, "Proactive DVFS: OPP Point Sent = 0x%x", 1) \
+X( 34, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_DEADLINE_REMOVED, "Proactive DVFS: Deadline removed = 0x%08x%08x", 2) \
+X( 35, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_WORKLOAD_REMOVED, "Proactive DVFS: Workload removed = 0x%08x%08x", 2) \
+X( 36, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_THROTTLE, "Proactive DVFS: Throttle to a maximum = 0x%x", 1) \
+X( 37, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_GPIO_FAILURE, "Proactive DVFS: Failed to pass OPP point via GPIO.", 0) \
+X( 38, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_INVALID_NODE_DEPRECATED, "Proactive DVFS: Invalid node passed to function.", 0) \
+X( 39, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_GUEST_BAD_ACCESS_DEPRECATED, "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u", 1) \
+X( 40, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: %u", 1) \
+X( 41, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: %u", 1) \
+X( 42, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.", 0) \
+X( 43, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %d, Ticks: %d", 2) \
+X( 44, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_DUSTS_CHANGE_FIX_59042_DEPRECATED, "Allowed number of dusts is %d due to BRN59042.", 1) \
+X( 45, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
+X( 46, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_CHECK, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x, Fence Counters: Check: %u - Update: %u", 5) \
+X( 47, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x", 2) \
+X( 48, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_TO_IDLE, "Proactive DVFS: GPU transitioned to idle", 0) \
+X( 49, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_TO_ACTIVE, "Proactive DVFS: GPU transitioned to active", 0) \
+X( 50, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_POWDUMP_BUFFER_SIZE, "Power counter dumping: Data truncated writing register %u. Buffer too small.", 1) \
+X( 51, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_POWCTRL_ABORT, "Power controller returned ABORT for last request so retrying.", 0) \
+X( 52, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_INVALID_POWER_REQUEST_DEPRECATED, "Discarding invalid power request: type 0x%x, DM %u", 2) \
+X( 53, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_CANCEL_FORCED_IDLE_NOT_IDLE, "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \
+X( 54, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_FORCED_POW_OFF_NOT_IDLE, "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \
+X( 55, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_NUMDUST_CHANGE_NOT_IDLE, "Detected attempt to change dust count while not forced idle (pow state 0x%x)", 1) \
+X( 56, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_POWMON_RESULT, "Power monitor: Type = %d (0 = power, 1 = energy), Estimate result = 0x%08x%08x", 3) \
+X( 57, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_MINMAX_CONFLICT, "Conflicting clock frequency range: OPP min = %u, max = %u", 2) \
+X( 58, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_PDVFS_FLOOR, "Proactive DVFS: Set floor to a minimum = 0x%x", 1) \
+X( 59, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_OSREQ, "OS requested pow off (forced = %d), pow flags: 0x%x", 2) \
+X( 60, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_INVALID_POWER_REQUEST, "Discarding invalid power request: type 0x%x", 1) \
+X( 61, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_SPU_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x. Pow flags: 0x%x", 3) \
+X( 62, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_SPU_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x", 2) \
+X( 63, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_SPU_POW_CHANGE_NOT_IDLE, "Detected attempt to change SPU power state mask while not forced idle (pow state 0x%x)", 1) \
+X( 64, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_INVALID_SPU_POWER_MASK, "Invalid SPU power mask 0x%x! Changing to 1", 1) \
+X( 65, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_CLKDIV_UPDATE, "Proactive DVFS: Send OPP %u with clock divider value %u", 2) \
+X( 66, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_POWMON_PERF_MODE, "PPA block started in perf validation mode.", 0) \
+X( 67, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_POWMON_RESET, "Reset PPA block state %u (1=reset, 0=recalculate).", 1) \
+X( 68, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_POWCTRL_ABORT_WITH_CORE, "Power controller returned ABORT for Core-%d last request so retrying.", 1) \
+X( 69, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_HWREQ64BIT, "HW Request On(1)/Off(0): %d, Units: 0x%08x%08x", 3) \
+X( 70, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_SPU_RAC_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x. Pow flags: 0x%x", 5) \
+X( 71, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_SPU_RAC_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x", 4) \
+X( 72, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_REQUESTEDOFF_RAC, "RAC pending? %d, RAC Active? %d", 2) \
+X( 73, ROGUE_FW_GROUP_POW, ROGUE_FW_SF_POW_INIOFF_RAC, "Initiate powoff query for RAC.", 0) \
+\
+X(  1, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \
+X(  2, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \
+X(  3, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW", 0) \
+X(  4, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.", 0) \
+X(  5, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%d FWCtx: 0x%08.8x", 2) \
+X(  6, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \
+X(  7, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%d->%d), PER-DM(0x%08x)", 3) \
+X(  8, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08x->0x%08x)", 3) \
+X(  9, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \
+X( 10, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \
+X( 11, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08x)", 3) \
+X( 12, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08x)", 4) \
+X( 13, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08x val:0x%08x)", 3) \
+X( 14, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08x", 2) \
+X( 15, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %d), FWCtx 0x%08x @ %d", 6) \
+X( 16, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_PM_FENCE_DEPRECATED, "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d", 2) \
+X( 17, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %d, global (0x%08.8x): %d", 5) \
+X( 18, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8x, discard: %d - local (0x%08.8x): s%d?=c%d, global (0x%08.8x): s%d?=c%d", 8) \
+X( 19, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8x c%d", 2) \
+X( 20, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8x @ %d, RTD 0x%08x.", 3) \
+X( 21, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%d, extmem: %d)", 2) \
+X( 22, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %d bytes)", 4) \
+X( 23, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED2, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08x", 2) \
+X( 24, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08x", 5) \
+X( 25, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered", 1) \
+X( 26, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP, "DM%u: Hold scheduling due to R-Flag = 0x%08x", 2) \
+X( 27, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_NEEDS_RECONSTRUCTION, "Analysis: Need freelist reconstruction", 0) \
+X( 28, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_NEEDS_SKIP, "Analysis DM%u: Lockup FWCtx: 0x%08.8x. Need to skip to next command", 2) \
+X( 29, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_NEEDS_SKIP_OOM_TA, "Analysis DM%u: Lockup while TA is OOM FWCtx: 0x%08.8x. Need to skip to next command", 2) \
+X( 30, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_NEEDS_PR_CLEANUP, "Analysis DM%u: Lockup while partial render FWCtx: 0x%08.8x. Need PR cleanup", 2) \
+X( 31, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_SET_LOCKUP_DEPRECATED2, "GPU has locked up", 0) \
+X( 32, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_READY, "DM%u ready for HWR", 1) \
+X( 33, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_DM_UPDATE_RECOVERY, "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08x", 2) \
+X( 34, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_BRN37729_DEPRECATED2, "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08x)", 1) \
+X( 35, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_DM_TIMED_OUT, "DM%u timed out", 1) \
+X( 36, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_EVENT_STATUS_REG, "RGX_CR_EVENT_STATUS=0x%08x", 1) \
+X( 37, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_DM_FALSE_LOCKUP, "DM%u lockup falsely detected, R-Flags=0x%08x", 2) \
+X( 38, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline", 0) \
+X( 39, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll", 0) \
+X( 40, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x", 2) \
+X( 41, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_RESET_HW_DEPRECATED3, "Reset HW (loop:%d, poll failures: 0x%08x)", 2) \
+X( 42, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08x", 1) \
+X( 43, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08x)", 1) \
+X( 44, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).", 1) \
+X( 45, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_MMU_META_FAULT, "Meta MMU page fault detected (Meta MMU Status: 0x%08x%08x)", 2) \
+X( 46, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_CRC_CHECK_DEPRECATED, "Fast CRC Check result for DM%u is HWRNeeded=%u", 2) \
+X( 47, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FULL_CHECK_DEPRECATED, "Full Signature Check result for DM%u is HWRNeeded=%u", 2) \
+X( 48, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u", 3) \
+X( 49, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_USC_SLOTS_CHECK_DEPRECATED, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 3) \
+X( 50, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_DEADLINE_CHECK_DEPRECATED, "Deadline counter for DM%u is HWRDeadline=%u", 2) \
+X( 51, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction", 1) \
+X( 52, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%x (ID=%d)", 2) \
+X( 53, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%d complete", 1) \
+X( 54, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u", 4) \
+X( 55, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%d failed", 1) \
+X( 56, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \
+X( 57, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \
+X( 58, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u", 2) \
+X( 59, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty", 1) \
+X( 60, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_HCS_FIRE, "HCS DM%d's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x", 5) \
+X( 61, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%d)", 1) \
+X( 62, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%d)", 1) \
+X( 63, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction", 2) \
+X( 64, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes StreamStartOffset = %u)", 5) \
+X( 65, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FL_RECON_NEEDED_DEPRECATED2, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) on HW context %u", 4) \
+X( 66, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)", 3) \
+X( 67, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance", 1) \
+X( 68, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%x (ID=%d)", 2) \
+X( 69, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %d bytes)", 4) \
+X( 70, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FL_RECON_NEEDED_DEPRECATED3, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 5) \
+X( 71, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_START_LONG_HW_POLL, "Start long HW poll %u (0-Unset 1-Set) for (reg:0x%08x val:0x%08x)", 3) \
+X( 72, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_END_LONG_HW_POLL, "End long HW poll (result=%d)", 1) \
+X( 73, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_DEADLINE_CHECK, "DM%u has taken %d ticks and deadline is %d ticks", 3) \
+X( 74, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_WATCHDOG_CHECK_DEPRECATED, "USC Watchdog result for DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 5) \
+X( 75, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%x (ID=%d) OSid: %d type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 6) \
+X( 76, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_SET_LOCKUP, "GPU-%u has locked up", 1) \
+X( 77, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_SET_LOCKUP_DM, "DM%u has locked up", 1) \
+X( 78, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_CORE_EVENT_STATUS_REG, "Core %d RGX_CR_EVENT_STATUS=0x%08x", 2) \
+X( 79, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_MULTICORE_EVENT_STATUS_REG, "RGX_CR_MULTICORE_EVENT_STATUS%u=0x%08x", 2) \
+X( 80, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_CORE_BIF0_FAULT, "BIF0 page fault detected (Core %d MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)", 5) \
+X( 81, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_CORE_MMU_FAULT_S7, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x)", 3) \
+X( 82, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_CORE_MMU_FAULT, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x 0x%08x)", 4) \
+X( 83, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_RESET_HW, "Reset HW (core:%d of %d, loop:%d, poll failures: 0x%08x)", 4) \
+X( 84, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_CRC_CHECK, "Fast CRC Check result for Core%u, DM%u is HWRNeeded=%u", 3) \
+X( 85, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_FULL_CHECK, "Full Signature Check result for Core%u, DM%u is HWRNeeded=%u", 3) \
+X( 86, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 4) \
+X( 87, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_WATCHDOG_CHECK, "USC Watchdog result for Core%u DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 6) \
+X( 88, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_MMU_RISCV_FAULT, "RISC-V MMU page fault detected (FWCORE MMU Status 0x%08x Req Status 0x%08x%08x)", 3) \
+X( 89, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS1_PFS_DEPRECATED, "TEXAS1_PFS poll failed on core %d with value 0x%08x", 2) \
+X( 90, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_HWR_FAULT_POLL_BIF_PFS, "BIF_PFS poll failed on core %d with value 0x%08x", 2) \
+X( 91, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_HWR_FAULT_POLL_SET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS set poll failed on core %d with value 0x%08x", 2) \
+X( 92, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_HWR_FAULT_POLL_UNSET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS unset poll failed on core %d with value 0x%08x", 2) \
+X( 93, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_HWR_FAULT_POLL_SLC_INVAL, "MMU_CTRL_INVAL poll (all but fw) failed on core %d with value 0x%08x", 2) \
+X( 94, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_HWR_FAULT_POLL_SLCMMU_INVAL, "MMU_CTRL_INVAL poll (all) failed on core %d with value 0x%08x", 2) \
+X( 95, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS_PFS, "TEXAS%d_PFS poll failed on core %d with value 0x%08x", 3) \
+X( 96, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_EXTRA_CHECK, "Extra Registers Check result for Core%u, DM%u is HWRNeeded=%u", 3) \
+X( 97, ROGUE_FW_GROUP_HWR, ROGUE_FW_SF_HWR_WRITE_TO_GPU_READONLY_ADDR, "FW attempted to write to read-only GPU address 0x%08x", 1) \
+\
+X(  1, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CFGBLK, "Block 0x%x mapped to Config Idx %u", 2) \
+X(  2, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_OMTBLK, "Block 0x%x omitted from event - not enabled in HW", 1) \
+X(  3, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_INCBLK, "Block 0x%x included in event - enabled in HW", 1) \
+X(  4, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_SELREG, "Select register state hi_0x%x lo_0x%x", 2) \
+X(  5, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CSBHDR, "Counter stream block header word 0x%x", 1) \
+X(  6, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CTROFF, "Counter register offset 0x%x", 1) \
+X(  7, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CFGSKP, "Block 0x%x config unset, skipping", 1) \
+X(  8, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_INDBLK, "Accessing Indirect block 0x%x", 1) \
+X(  9, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_DIRBLK, "Accessing Direct block 0x%x", 1) \
+X( 10, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CNTPRG, "Programmed counter select register at offset 0x%x", 1) \
+X( 11, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_BLKPRG, "Block register offset 0x%x and value 0x%x", 2) \
+X( 12, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_UBLKCG, "Reading config block from driver 0x%x", 1) \
+X( 13, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_UBLKRG, "Reading block range 0x%x to 0x%x", 2) \
+X( 14, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_BLKREC, "Recording block 0x%x config from driver", 1) \
+X( 15, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_UBLKED, "Finished reading config block from driver", 0) \
+X( 16, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: 0x%x  value: 0x%x", 2) \
+X( 17, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u  ID:0x%x", 2) \
+X( 18, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID 0x%x is not allowed. The package [b:%u, n:%u] will be discarded", 3) \
+X( 19, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CHANGE_FILTER_STATUS_CUSTOM, "Custom Counters filter status %d", 1) \
+X( 20, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded", 2) \
+X( 21, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %d counters IDs while the upper limit is %d", 2) \
+X( 22, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CHECK_FILTER, "Check Filter 0x%x is 0x%x ?", 2) \
+X( 23, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset", 1) \
+X( 24, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_INVALID_CMD_DEPRECATED, "Encountered an invalid command (%d)", 1) \
+X( 25, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \
+X( 26, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \
+X( 27, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %d", 1) \
+X( 28, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_BLKENA, "Block 0x%x ENABLED", 1) \
+X( 29, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_BLKDIS, "Block 0x%x DISABLED", 1) \
+X( 30, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_INDBLK_INSTANCE, "Accessing Indirect block 0x%x, instance %u", 2) \
+X( 31, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CTRVAL, "Counter register 0x%x, Value 0x%x", 2) \
+X( 32, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CHANGE_FILTER_STATUS, "Counters filter status %d", 1) \
+X( 33, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CTLBLK, "Block 0x%x mapped to Ctl Idx %u", 2) \
+X( 34, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_WORKEST_EN, "Block(s) in use for workload estimation.", 0) \
+X( 35, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CYCCTR, "GPU %u Cycle counter 0x%x, Value 0x%x", 3) \
+X( 36, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_CYCMAX, "GPU Mask 0x%x Cycle counter 0x%x, Value 0x%x", 3) \
+X( 37, ROGUE_FW_GROUP_HWP, ROGUE_FW_SF_HWP_I_IGNORE_BLOCKS, "Blocks IGNORED for GPU %u", 1) \
+\
+X(  1, ROGUE_FW_GROUP_DMA, ROGUE_FW_SF_DMA_TRANSFER_REQUEST_DEPRECATED, "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u", 5) \
+X(  2, ROGUE_FW_GROUP_DMA, ROGUE_FW_SF_DMA_TRANSFER_COMPLETE, "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u", 4) \
+X(  3, ROGUE_FW_GROUP_DMA, ROGUE_FW_SF_DMA_INT_REG, "DMA Interrupt register 0x%08x", 1) \
+X(  4, ROGUE_FW_GROUP_DMA, ROGUE_FW_SF_DMA_WAIT, "Waiting for transfer of type 0x%02x completion...", 1) \
+X(  5, ROGUE_FW_GROUP_DMA, ROGUE_FW_SF_DMA_CCB_LOADING_FAILED, "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed", 3) \
+X(  6, ROGUE_FW_GROUP_DMA, ROGUE_FW_SF_DMA_CCB_LOAD_INVALID, "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)", 3) \
+X(  7, ROGUE_FW_GROUP_DMA, ROGUE_FW_SF_DMA_POLL_FAILED, "Transfer 0x%02x request poll failure", 1) \
+X(  8, ROGUE_FW_GROUP_DMA, ROGUE_FW_SF_DMA_BOOT_TRANSFER_FAILED, "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead", 2) \
+X(  9, ROGUE_FW_GROUP_DMA, ROGUE_FW_SF_DMA_TRANSFER_REQUEST, "Transfer 0x%02x request on ch. %u: system 0x%02x%08x, coremem 0x%08x, flags 0x%x, size %u", 7) \
+\
+X(  1, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_INTPAIR, "0x%08x 0x%08x", 2) \
+X(  2, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_1HEX, "0x%08x", 1) \
+X(  3, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_2HEX, "0x%08x 0x%08x", 2) \
+X(  4, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_3HEX, "0x%08x 0x%08x 0x%08x", 3) \
+X(  5, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_4HEX, "0x%08x 0x%08x 0x%08x 0x%08x", 4) \
+X(  6, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_5HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 5) \
+X(  7, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_6HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 6) \
+X(  8, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_7HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 7) \
+X(  9, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_8HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 8) \
+X( 10, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_1SIGNED, "%d", 1) \
+X( 11, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_2SIGNED, "%d %d", 2) \
+X( 12, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_3SIGNED, "%d %d %d", 3) \
+X( 13, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_4SIGNED, "%d %d %d %d", 4) \
+X( 14, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_5SIGNED, "%d %d %d %d %d", 5) \
+X( 15, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_6SIGNED, "%d %d %d %d %d %d", 6) \
+X( 16, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_7SIGNED, "%d %d %d %d %d %d %d", 7) \
+X( 17, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_8SIGNED, "%d %d %d %d %d %d %d %d", 8) \
+X( 18, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_1UNSIGNED, "%u", 1) \
+X( 19, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_2UNSIGNED, "%u %u", 2) \
+X( 20, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_3UNSIGNED, "%u %u %u", 3) \
+X( 21, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_4UNSIGNED, "%u %u %u %u", 4) \
+X( 22, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_5UNSIGNED, "%u %u %u %u %u", 5) \
+X( 23, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_6UNSIGNED, "%u %u %u %u %u %u", 6) \
+X( 24, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_7UNSIGNED, "%u %u %u %u %u %u %u", 7) \
+X( 25, ROGUE_FW_GROUP_DBG, ROGUE_FW_SF_DBG_8UNSIGNED, "%u %u %u %u %u %u %u %u", 8) \
+\
+X(65535, ROGUE_FW_GROUP_NULL, ROGUE_FW_SF_LAST, "You should not use this string", 15)
+
+
+/*
+ *  The symbolic names found in the table above are assigned an u32 value of
+ *  the following format:
+ *  31 30 28 27       20   19  16    15  12      11            0   bits
+ *  -   ---   ---- ----     ----      ----        ---- ---- ----
+ *     0-11: id number
+ *    12-15: group id number
+ *    16-19: number of parameters
+ *    20-27: unused
+ *    28-30: active: identify SF packet, otherwise regular int32
+ *       31: reserved for signed/unsigned compatibility
+ *
+ *   The following macro assigns those values to the enum generated SF ids list.
+ */
+#define ROGUE_FW_LOG_IDMARKER (0x70000000U)
+#define ROGUE_FW_LOG_CREATESFID(a, b, e) ((u32)(a) | ((u32)(b)<<12U) | ((u32)(e)<<16U)) | ROGUE_FW_LOG_IDMARKER
+
+#define ROGUE_FW_LOG_IDMASK (0xFFF00000)
+#define ROGUE_FW_LOG_VALIDID(I) (((I) & ROGUE_FW_LOG_IDMASK) == ROGUE_FW_LOG_IDMARKER)
+
+enum rogue_fw_log_sfids {
+#define X(a, b, c, d, e) c = ROGUE_FW_LOG_CREATESFID(a, b, e),
+	ROGUE_FW_LOG_SFIDLIST
+#undef X
+};
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define ROGUE_FW_SF_GID(x) (((u32)(x)>>12) & 0xfU)
+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */
+#define ROGUE_FW_SF_PARAMNUM(x) (((u32)(x)>>16) & 0xfU)
+
+#endif /* __PVR_ROGUE_FWIF_SF_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h
new file mode 100644
index 000000000000..3cecf8513fc5
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h
@@ -0,0 +1,258 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_FWIF_SHARED_H__
+#define __PVR_ROGUE_FWIF_SHARED_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#define ROGUE_FWIF_NUM_RTDATAS 2U
+#define ROGUE_FWIF_NUM_GEOMDATAS 1U
+#define ROGUE_FWIF_NUM_RTDATA_FREELISTS 2U
+#define ROGUE_NUM_GEOM_CORES 1U
+
+#define ROGUE_NUM_GEOM_CORES_SIZE 2U
+
+/*
+ * Maximum number of UFOs in a CCB command.
+ * The number is based on having 32 sync prims (as originally), plus 32 sync
+ * checkpoints.
+ * Once the use of sync prims is no longer supported, we will retain
+ * the same total (64) as the number of sync checkpoints which may be
+ * supporting a fence is not visible to the client driver and has to
+ * allow for the number of different timelines involved in fence merges.
+ */
+#define ROGUE_FWIF_CCB_CMD_MAX_UFOS (32U + 32U)
+
+/*
+ * This is a generic limit imposed on any DM (GEOMETRY,FRAGMENT,CDM,TDM,2D,TRANSFER)
+ * command passed through the bridge.
+ * Just across the bridge in the server, any incoming kick command size is
+ * checked against this maximum limit.
+ * In case the incoming command size is larger than the specified limit,
+ * the bridge call is retired with error.
+ */
+#define ROGUE_FWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U)
+
+#define ROGUE_FWIF_PRBUFFER_START (0)
+#define ROGUE_FWIF_PRBUFFER_ZSBUFFER (0)
+#define ROGUE_FWIF_PRBUFFER_MSAABUFFER (1)
+#define ROGUE_FWIF_PRBUFFER_MAXSUPPORTED (2)
+
+struct rogue_fwif_dma_addr {
+	aligned_u64 dev_addr;
+	u32 fw_addr;
+	u32 padding;
+} __aligned(8);
+
+struct rogue_fwif_ufo {
+	u32 addr;
+	u32 value;
+};
+
+#define ROGUE_FWIF_UFO_ADDR_IS_SYNC_CHECKPOINT (1)
+
+struct rogue_fwif_sync_checkpoint {
+	u32 state;
+	u32 fw_ref_count;
+};
+
+struct rogue_fwif_cleanup_ctl {
+	/* Number of commands received by the FW */
+	u32 submitted_commands;
+	/* Number of commands executed by the FW */
+	u32 executed_commands;
+} __aligned(8);
+
+/*
+ * Used to share frame numbers across UM-KM-FW,
+ * frame number is set in UM,
+ * frame number is required in both KM for HTB and FW for FW trace.
+ *
+ * May be used to house Kick flags in the future.
+ */
+struct rogue_fwif_cmd_common {
+	/* associated frame number */
+	u32 frame_num;
+};
+
+/*
+ * Geometry and fragment commands require set of firmware addresses that are stored in the Kernel.
+ * Client has handle(s) to Kernel containers storing these addresses, instead of raw addresses. We
+ * have to patch/write these addresses in KM to prevent UM from controlling FW addresses directly.
+ * Typedefs for geometry and fragment commands are shared between Client and Firmware (both
+ * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use geometry|fragment
+ * CMD type definitions directly. Therefore we have a SHARED block that is shared between UM-KM-FW
+ * across all BVNC configurations.
+ */
+struct rogue_fwif_cmd_geom_frag_shared {
+	/* Common command attributes */
+	struct rogue_fwif_cmd_common cmn;
+
+	/*
+	 * RTData associated with this command, this is used for context
+	 * selection and for storing out HW-context, when TA is switched out for
+	 * continuing later
+	 */
+	u32 hwrt_data_fw_addr;
+
+	/* Supported PR Buffers like Z/S/MSAA Scratch */
+	u32 pr_buffer_fw_addr[ROGUE_FWIF_PRBUFFER_MAXSUPPORTED];
+};
+
+/*
+ * Client Circular Command Buffer (CCCB) control structure.
+ * This is shared between the Server and the Firmware and holds byte offsets
+ * into the CCCB as well as the wrapping mask to aid wrap around. A given
+ * snapshot of this queue with Cmd 1 running on the GPU might be:
+ *
+ *          Roff                           Doff                 Woff
+ * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........]
+ *            <      runnable commands       ><   !ready to run   >
+ *
+ * Cmd 1    : Currently executing on the GPU data master.
+ * Cmd 2,3,4: Fence dependencies met, commands runnable.
+ * Cmd 5... : Fence dependency not met yet.
+ */
+struct rogue_fwif_cccb_ctl {
+	/* Host write offset into CCB. This must be aligned to 16 bytes. */
+	u32 write_offset;
+	/*
+	 * Firmware read offset into CCB. Points to the command that is runnable
+	 * on GPU, if R!=W
+	 */
+	u32 read_offset;
+	/*
+	 * Firmware fence dependency offset. Points to commands not ready, i.e.
+	 * fence dependencies are not met.
+	 */
+	u32 dep_offset;
+	/* Offset wrapping mask, total capacity in bytes of the CCB-1 */
+	u32 wrap_mask;
+
+	/* Only used if SUPPORT_AGP is present. */
+	u32 read_offset2;
+
+	/* Only used if SUPPORT_AGP4 is present. */
+	u32 read_offset3;
+	/* Only used if SUPPORT_AGP4 is present. */
+	u32 read_offset4;
+
+	u32 padding;
+} __aligned(8);
+
+#define ROGUE_FW_LOCAL_FREELIST (0)
+#define ROGUE_FW_GLOBAL_FREELIST (1)
+#define ROGUE_FW_FREELIST_TYPE_LAST ROGUE_FW_GLOBAL_FREELIST
+#define ROGUE_FW_MAX_FREELISTS (ROGUE_FW_FREELIST_TYPE_LAST + 1U)
+
+struct rogue_fwif_geom_registers_caswitch {
+	u64 geom_reg_vdm_context_state_base_addr;
+	u64 geom_reg_vdm_context_state_resume_addr;
+	u64 geom_reg_ta_context_state_base_addr;
+
+	struct {
+		u64 geom_reg_vdm_context_store_task0;
+		u64 geom_reg_vdm_context_store_task1;
+		u64 geom_reg_vdm_context_store_task2;
+
+		/* VDM resume state update controls */
+		u64 geom_reg_vdm_context_resume_task0;
+		u64 geom_reg_vdm_context_resume_task1;
+		u64 geom_reg_vdm_context_resume_task2;
+
+		u64 geom_reg_vdm_context_store_task3;
+		u64 geom_reg_vdm_context_store_task4;
+
+		u64 geom_reg_vdm_context_resume_task3;
+		u64 geom_reg_vdm_context_resume_task4;
+	} geom_state[2];
+};
+
+#define ROGUE_FWIF_GEOM_REGISTERS_CSWITCH_SIZE \
+	sizeof(struct rogue_fwif_geom_registers_caswitch)
+
+struct rogue_fwif_cdm_registers_cswitch {
+	u64 cdmreg_cdm_context_pds0;
+	u64 cdmreg_cdm_context_pds1;
+	u64 cdmreg_cdm_terminate_pds;
+	u64 cdmreg_cdm_terminate_pds1;
+
+	/* CDM resume controls */
+	u64 cdmreg_cdm_resume_pds0;
+	u64 cdmreg_cdm_context_pds0_b;
+	u64 cdmreg_cdm_resume_pds0_b;
+};
+
+struct rogue_fwif_static_rendercontext_state {
+	/* Geom registers for ctx switch */
+	struct rogue_fwif_geom_registers_caswitch ctxswitch_regs[ROGUE_NUM_GEOM_CORES_SIZE]
+		__aligned(8);
+};
+
+#define ROGUE_FWIF_STATIC_RENDERCONTEXT_SIZE \
+	sizeof(struct rogue_fwif_static_rendercontext_state)
+
+struct rogue_fwif_static_computecontext_state {
+	/* CDM registers for ctx switch */
+	struct rogue_fwif_cdm_registers_cswitch ctxswitch_regs __aligned(8);
+};
+
+#define ROGUE_FWIF_STATIC_COMPUTECONTEXT_SIZE \
+	sizeof(struct rogue_fwif_static_computecontext_state)
+
+enum rogue_fwif_prbuffer_state {
+	ROGUE_FWIF_PRBUFFER_UNBACKED = 0,
+	ROGUE_FWIF_PRBUFFER_BACKED,
+	ROGUE_FWIF_PRBUFFER_BACKING_PENDING,
+	ROGUE_FWIF_PRBUFFER_UNBACKING_PENDING,
+};
+
+struct rogue_fwif_prbuffer {
+	/* Buffer ID*/
+	u32 buffer_id;
+	/* Needs On-demand Z/S/MSAA Buffer allocation */
+	bool on_demand __aligned(4);
+	/* Z/S/MSAA -Buffer state */
+	enum rogue_fwif_prbuffer_state state;
+	/* Cleanup state */
+	struct rogue_fwif_cleanup_ctl cleanup_sate;
+	/* Compatibility and other flags */
+	u32 prbuffer_flags;
+} __aligned(8);
+
+/* Last reset reason for a context. */
+enum rogue_context_reset_reason {
+	/* No reset reason recorded */
+	ROGUE_CONTEXT_RESET_REASON_NONE = 0,
+	/* Caused a reset due to locking up */
+	ROGUE_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1,
+	/* Affected by another context locking up */
+	ROGUE_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2,
+	/* Overran the global deadline */
+	ROGUE_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3,
+	/* Affected by another context overrunning */
+	ROGUE_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4,
+	/* Forced reset to ensure scheduling requirements */
+	ROGUE_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5,
+	/* FW Safety watchdog triggered */
+	ROGUE_CONTEXT_RESET_REASON_FW_WATCHDOG = 12,
+	/* FW page fault (no HWR) */
+	ROGUE_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13,
+	/* FW execution error (GPU reset requested) */
+	ROGUE_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14,
+	/* Host watchdog detected FW error */
+	ROGUE_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15,
+	/* Geometry DM OOM event is not allowed */
+	ROGUE_CONTEXT_GEOM_OOM_DISABLED = 16,
+};
+
+struct rogue_context_reset_reason_data {
+	enum rogue_context_reset_reason reset_reason;
+	u32 reset_ext_job_ref;
+};
+
+#include "pvr_rogue_fwif_shared_check.h"
+
+#endif /* __PVR_ROGUE_FWIF_SHARED_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h
new file mode 100644
index 000000000000..a1d63fda161d
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h
@@ -0,0 +1,107 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_FWIF_SHARED_CHECK_H__
+#define __PVR_ROGUE_FWIF_SHARED_CHECK_H__
+
+#include <linux/build_bug.h>
+
+#define OFFSET_CHECK(type, member, offset) \
+	static_assert(offsetof(type, member) == offset, "offsetof(" #type ", " #member ") incorrect");
+
+#define SIZE_CHECK(type, size) \
+	static_assert(sizeof(type) == size, #type " is incorrect size");
+
+OFFSET_CHECK(struct rogue_fwif_dma_addr, dev_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_dma_addr, fw_addr, 8);
+SIZE_CHECK(struct rogue_fwif_dma_addr, 16);
+
+OFFSET_CHECK(struct rogue_fwif_ufo, addr, 0);
+OFFSET_CHECK(struct rogue_fwif_ufo, value, 4);
+SIZE_CHECK(struct rogue_fwif_ufo, 8);
+
+OFFSET_CHECK(struct rogue_fwif_cleanup_ctl, submitted_commands, 0);
+OFFSET_CHECK(struct rogue_fwif_cleanup_ctl, executed_commands, 4);
+SIZE_CHECK(struct rogue_fwif_cleanup_ctl, 8);
+
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, write_offset, 0);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset, 4);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, dep_offset, 8);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, wrap_mask, 12);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset2, 16);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset3, 20);
+OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset4, 24);
+SIZE_CHECK(struct rogue_fwif_cccb_ctl, 32);
+
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_reg_vdm_context_state_base_addr, 0);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_reg_vdm_context_state_resume_addr, 8);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_reg_ta_context_state_base_addr, 16);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[0].geom_reg_vdm_context_store_task0, 24);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[0].geom_reg_vdm_context_store_task1, 32);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[0].geom_reg_vdm_context_store_task2, 40);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[0].geom_reg_vdm_context_resume_task0, 48);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[0].geom_reg_vdm_context_resume_task1, 56);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[0].geom_reg_vdm_context_resume_task2, 64);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[0].geom_reg_vdm_context_store_task3, 72);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[0].geom_reg_vdm_context_store_task4, 80);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[0].geom_reg_vdm_context_resume_task3, 88);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[0].geom_reg_vdm_context_resume_task4, 96);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[1].geom_reg_vdm_context_store_task0, 104);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[1].geom_reg_vdm_context_store_task1, 112);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[1].geom_reg_vdm_context_store_task2, 120);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[1].geom_reg_vdm_context_resume_task0, 128);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[1].geom_reg_vdm_context_resume_task1, 136);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[1].geom_reg_vdm_context_resume_task2, 144);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[1].geom_reg_vdm_context_store_task3, 152);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[1].geom_reg_vdm_context_store_task4, 160);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[1].geom_reg_vdm_context_resume_task3, 168);
+OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch,
+	     geom_state[1].geom_reg_vdm_context_resume_task4, 176);
+SIZE_CHECK(struct rogue_fwif_geom_registers_caswitch, 184);
+
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0, 0);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds1, 8);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds, 16);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds1, 24);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0, 32);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0_b, 40);
+OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0_b, 48);
+SIZE_CHECK(struct rogue_fwif_cdm_registers_cswitch, 56);
+
+OFFSET_CHECK(struct rogue_fwif_static_rendercontext_state, ctxswitch_regs, 0);
+SIZE_CHECK(struct rogue_fwif_static_rendercontext_state, 368);
+
+OFFSET_CHECK(struct rogue_fwif_static_computecontext_state, ctxswitch_regs, 0);
+SIZE_CHECK(struct rogue_fwif_static_computecontext_state, 56);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_common, frame_num, 0);
+SIZE_CHECK(struct rogue_fwif_cmd_common, 4);
+
+OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, cmn, 0);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, hwrt_data_fw_addr, 4);
+OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, pr_buffer_fw_addr, 8);
+SIZE_CHECK(struct rogue_fwif_cmd_geom_frag_shared, 16);
+
+#endif /* __PVR_ROGUE_FWIF_SHARED_CHECK_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h
new file mode 100644
index 000000000000..7e97a981c865
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h
@@ -0,0 +1,78 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_FWIF_STREAM_H__
+#define __PVR_ROGUE_FWIF_STREAM_H__
+
+/**
+ * DOC: Streams
+ *
+ * Commands are submitted to the kernel driver in the form of streams.
+ *
+ * A command stream has the following layout :
+ *  - A 64-bit header containing:
+ *    * A u32 containing the length of the main stream inclusive of the length of the header.
+ *    * A u32 for padding.
+ *  - The main stream data.
+ *  - The extension stream (optional), which is composed of:
+ *    * One or more headers.
+ *    * The extension stream data, corresponding to the extension headers.
+ *
+ * The main stream provides the base command data. This has a fixed layout based on the features
+ * supported by a given GPU.
+ *
+ * The extension stream provides the command parameters that are required for BRNs & ERNs for the
+ * current GPU. This stream is comprised of one or more headers, followed by data for each given
+ * BRN/ERN.
+ *
+ * Each header is a u32 containing a bitmask of quirks & enhancements in the extension stream, a
+ * "type" field determining the set of quirks & enhancements the bitmask represents, and a
+ * continuation bit determining whether any more headers are present. The headers are then followed
+ * by command data; this is specific to each quirk/enhancement. All unused / reserved bits in the
+ * header must be set to 0.
+ *
+ * All parameters and headers in the main and extension streams must be naturally aligned.
+ *
+ * If a parameter appears in both the main and extension streams, then the extension parameter is
+ * used.
+ */
+
+/*
+ * Stream extension header definition
+ */
+#define PVR_STREAM_EXTHDR_TYPE_SHIFT 29U
+#define PVR_STREAM_EXTHDR_TYPE_MASK (7U << PVR_STREAM_EXTHDR_TYPE_SHIFT)
+#define PVR_STREAM_EXTHDR_TYPE_MAX 8U
+#define PVR_STREAM_EXTHDR_CONTINUATION (1U << 28U)
+
+#define PVR_STREAM_EXTHDR_DATA_MASK ~(PVR_STREAM_EXTHDR_TYPE_MASK | PVR_STREAM_EXTHDR_CONTINUATION)
+
+/*
+ * Stream extension header - Geometry 0
+ */
+#define PVR_STREAM_EXTHDR_TYPE_GEOM0 0U
+
+#define PVR_STREAM_EXTHDR_GEOM0_BRN49927 (1U << 0U)
+
+#define PVR_STREAM_EXTHDR_GEOM0_VALID PVR_STREAM_EXTHDR_GEOM0_BRN49927
+
+/*
+ * Stream extension header - Fragment 0
+ */
+#define PVR_STREAM_EXTHDR_TYPE_FRAG0 0U
+
+#define PVR_STREAM_EXTHDR_FRAG0_BRN47217 (1U << 0U)
+#define PVR_STREAM_EXTHDR_FRAG0_BRN49927 (1U << 1U)
+
+#define PVR_STREAM_EXTHDR_FRAG0_VALID PVR_STREAM_EXTHDR_FRAG0_BRN49927
+
+/*
+ * Stream extension header - Compute 0
+ */
+#define PVR_STREAM_EXTHDR_TYPE_COMPUTE0 0U
+
+#define PVR_STREAM_EXTHDR_COMPUTE0_BRN49927 (1U << 0U)
+
+#define PVR_STREAM_EXTHDR_COMPUTE0_VALID PVR_STREAM_EXTHDR_COMPUTE0_BRN49927
+
+#endif /* __PVR_ROGUE_FWIF_STREAM_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_heap_config.h b/drivers/gpu/drm/imagination/pvr_rogue_heap_config.h
new file mode 100644
index 000000000000..f854bfbd0757
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_heap_config.h
@@ -0,0 +1,113 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_HEAP_CONFIG_H__
+#define __PVR_ROGUE_HEAP_CONFIG_H__
+
+#include <linux/sizes.h>
+
+/*
+ * ROGUE Device Virtual Address Space Definitions
+ *
+ * This file defines the ROGUE virtual address heaps that are used in
+ * application memory contexts. It also shows where the Firmware memory heap
+ * fits into this, but the firmware heap is only ever created in the
+ * kernel driver and never exposed to userspace.
+ *
+ * ROGUE_PDSCODEDATA_HEAP_BASE and ROGUE_USCCODE_HEAP_BASE will be programmed,
+ * on a global basis, into ROGUE_CR_PDS_EXEC_BASE and ROGUE_CR_USC_CODE_BASE_*
+ * respectively. Therefore if client drivers use multiple configs they must
+ * still be consistent with their definitions for these heaps.
+ *
+ * Base addresses have to be a multiple of 4MiB.
+ * Heaps must not start at 0x0000000000, as this is reserved for internal
+ * use within the driver.
+ * Range comments, those starting in column 0 below are a section heading of
+ * sorts and are above the heaps in that range. Often this is the reserved
+ * size of the heap within the range.
+ */
+
+/* 0x00_0000_0000 ************************************************************/
+
+/* 0x00_0000_0000 - 0x00_0040_0000 */
+/* 0 MiB to 4 MiB, size of 4 MiB : RESERVED */
+
+/* 0x00_0040_0000 - 0x7F_FFC0_0000 **/
+/* 4 MiB to 512 GiB, size of 512 GiB less 4 MiB : RESERVED **/
+
+/* 0x80_0000_0000 ************************************************************/
+
+/* 0x80_0000_0000 - 0x9F_FFFF_FFFF **/
+/* 512 GiB to 640 GiB, size of 128 GiB : GENERAL_HEAP **/
+#define ROGUE_GENERAL_HEAP_BASE 0x8000000000ull
+#define ROGUE_GENERAL_HEAP_SIZE SZ_128G
+
+/* 0xA0_0000_0000 - 0xAF_FFFF_FFFF */
+/* 640 GiB to 704 GiB, size of 64 GiB : FREE */
+
+/* B0_0000_0000 - 0xB7_FFFF_FFFF */
+/* 704 GiB to 736 GiB, size of 32 GiB : FREE */
+
+/* 0xB8_0000_0000 - 0xBF_FFFF_FFFF */
+/* 736 GiB to 768 GiB, size of 32 GiB : RESERVED */
+
+/* 0xC0_0000_0000 ************************************************************/
+
+/* 0xC0_0000_0000 - 0xD9_FFFF_FFFF */
+/* 768 GiB to 872 GiB, size of 104 GiB : FREE */
+
+/* 0xDA_0000_0000 - 0xDA_FFFF_FFFF */
+/* 872 GiB to 876 GiB, size of 4 GiB : PDSCODEDATA_HEAP */
+#define ROGUE_PDSCODEDATA_HEAP_BASE 0xDA00000000ull
+#define ROGUE_PDSCODEDATA_HEAP_SIZE SZ_4G
+
+/* 0xDB_0000_0000 - 0xDB_FFFF_FFFF */
+/* 876 GiB to 880 GiB, size of 256 MiB (reserved 4GiB) : BRN **/
+/*
+ * The BRN63142 quirk workaround requires Region Header memory to be at the top
+ * of a 16GiB aligned range. This is so when masked with 0x03FFFFFFFF the
+ * address will avoid aliasing PB addresses. Start at 879.75GiB. Size of 256MiB.
+ */
+#define ROGUE_RGNHDR_HEAP_BASE 0xDBF0000000ull
+#define ROGUE_RGNHDR_HEAP_SIZE SZ_256M
+
+/* 0xDC_0000_0000 - 0xDF_FFFF_FFFF */
+/* 880 GiB to 896 GiB, size of 16 GiB : FREE */
+
+/* 0xE0_0000_0000 - 0xE0_FFFF_FFFF */
+/* 896 GiB to 900 GiB, size of 4 GiB : USCCODE_HEAP */
+#define ROGUE_USCCODE_HEAP_BASE 0xE000000000ull
+#define ROGUE_USCCODE_HEAP_SIZE SZ_4G
+
+/* 0xE1_0000_0000 - 0xE1_BFFF_FFFF */
+/* 900 GiB to 903 GiB, size of 3 GiB : RESERVED */
+
+/* 0xE1_C000_000 - 0xE1_FFFF_FFFF */
+/* 903 GiB to 904 GiB, reserved 1 GiB, : FIRMWARE_HEAP */
+#define ROGUE_FW_HEAP_BASE 0xE1C0000000ull
+
+/* 0xE2_0000_0000 - 0xE3_FFFF_FFFF */
+/* 904 GiB to 912 GiB, size of 8 GiB : FREE */
+
+/* 0xE4_0000_0000 - 0xE7_FFFF_FFFF */
+/* 912 GiB to 968 GiB, size of 16 GiB : TRANSFER_FRAG */
+#define ROGUE_TRANSFER_FRAG_HEAP_BASE 0xE400000000ull
+#define ROGUE_TRANSFER_FRAG_HEAP_SIZE SZ_16G
+
+/* 0xE8_0000_0000 - 0xF1_FFFF_FFFF */
+/* 928 GiB to 968 GiB, size of 40 GiB : RESERVED */
+
+/* 0xF2_0000_0000 - 0xF2_001F_FFFF **/
+/* 968 GiB to 969 GiB, size of 2 MiB : VISTEST_HEAP */
+#define ROGUE_VISTEST_HEAP_BASE 0xF200000000ull
+#define ROGUE_VISTEST_HEAP_SIZE SZ_2M
+
+/* 0xF2_4000_0000 - 0xF2_FFFF_FFFF */
+/* 969 GiB to 972 GiB, size of 3 GiB : FREE */
+
+/* 0xF3_0000_0000 - 0xFF_FFFF_FFFF */
+/* 972 GiB to 1024 GiB, size of 52 GiB : FREE */
+
+/* 0xFF_FFFF_FFFF ************************************************************/
+
+#endif /* __PVR_ROGUE_HEAP_CONFIG_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_meta.h b/drivers/gpu/drm/imagination/pvr_rogue_meta.h
new file mode 100644
index 000000000000..f6c15889fb03
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_meta.h
@@ -0,0 +1,356 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_META_H__
+#define __PVR_ROGUE_META_H__
+
+/***** The META HW register definitions in the file are updated manually *****/
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/*
+ ******************************************************************************
+ * META registers and MACROS
+ *****************************************************************************
+ */
+#define META_CR_CTRLREG_BASE(t) (0x04800000U + (0x1000U * (t)))
+
+#define META_CR_TXPRIVEXT (0x048000E8)
+#define META_CR_TXPRIVEXT_MINIM_EN BIT(7)
+
+#define META_CR_SYSC_JTAG_THREAD (0x04830030)
+#define META_CR_SYSC_JTAG_THREAD_PRIV_EN (0x00000004)
+
+#define META_CR_PERF_COUNT0 (0x0480FFE0)
+#define META_CR_PERF_COUNT1 (0x0480FFE8)
+#define META_CR_PERF_COUNT_CTRL_SHIFT (28)
+#define META_CR_PERF_COUNT_CTRL_MASK (0xF0000000)
+#define META_CR_PERF_COUNT_CTRL_DCACHEHITS (8 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEHITS (9 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEMISS \
+	(0xA << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICORE (0xD << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_THR_SHIFT (24)
+#define META_CR_PERF_COUNT_THR_MASK (0x0F000000)
+#define META_CR_PERF_COUNT_THR_0 (0x1 << META_CR_PERF_COUNT_THR_SHIFT)
+#define META_CR_PERF_COUNT_THR_1 (0x2 << META_CR_PERF_COUNT_THR_1)
+
+#define META_CR_TxVECINT_BHALT (0x04820500)
+#define META_CR_PERF_ICORE0 (0x0480FFD0)
+#define META_CR_PERF_ICORE1 (0x0480FFD8)
+#define META_CR_PERF_ICORE_DCACHEMISS (0x8)
+
+#define META_CR_PERF_COUNT(ctrl, thr)                                        \
+	((META_CR_PERF_COUNT_CTRL_##ctrl << META_CR_PERF_COUNT_CTRL_SHIFT) | \
+	 ((thr) << META_CR_PERF_COUNT_THR_SHIFT))
+
+#define META_CR_TXUXXRXDT_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF0U)
+#define META_CR_TXUXXRXRQ_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF8U)
+
+/* Poll for done. */
+#define META_CR_TXUXXRXRQ_DREADY_BIT (0x80000000U)
+/* Set for read. */
+#define META_CR_TXUXXRXRQ_RDnWR_BIT (0x00010000U)
+#define META_CR_TXUXXRXRQ_TX_S (12)
+#define META_CR_TXUXXRXRQ_RX_S (4)
+#define META_CR_TXUXXRXRQ_UXX_S (0)
+
+/* Internal ctrl regs. */
+#define META_CR_TXUIN_ID (0x0)
+/* Data unit regs. */
+#define META_CR_TXUD0_ID (0x1)
+/* Data unit regs. */
+#define META_CR_TXUD1_ID (0x2)
+/* Address unit regs. */
+#define META_CR_TXUA0_ID (0x3)
+/* Address unit regs. */
+#define META_CR_TXUA1_ID (0x4)
+/* PC registers. */
+#define META_CR_TXUPC_ID (0x5)
+
+/* Macros to calculate register access values. */
+#define META_CR_CORE_REG(thr, reg_num, unit)          \
+	(((u32)(thr) << META_CR_TXUXXRXRQ_TX_S) |     \
+	 ((u32)(reg_num) << META_CR_TXUXXRXRQ_RX_S) | \
+	 ((u32)(unit) << META_CR_TXUXXRXRQ_UXX_S))
+
+#define META_CR_THR0_PC META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID)
+#define META_CR_THR0_PCX META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID)
+#define META_CR_THR0_SP META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID)
+
+#define META_CR_THR1_PC META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID)
+#define META_CR_THR1_PCX META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID)
+#define META_CR_THR1_SP META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID)
+
+#define SP_ACCESS(thread) META_CR_CORE_REG(thread, 0, META_CR_TXUA0_ID)
+#define PC_ACCESS(thread) META_CR_CORE_REG(thread, 0, META_CR_TXUPC_ID)
+
+#define META_CR_COREREG_ENABLE (0x0000000U)
+#define META_CR_COREREG_STATUS (0x0000010U)
+#define META_CR_COREREG_DEFR (0x00000A0U)
+#define META_CR_COREREG_PRIVEXT (0x00000E8U)
+
+#define META_CR_T0ENABLE_OFFSET \
+	(META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_ENABLE)
+#define META_CR_T0STATUS_OFFSET \
+	(META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_STATUS)
+#define META_CR_T0DEFR_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_DEFR)
+#define META_CR_T0PRIVEXT_OFFSET \
+	(META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_T1ENABLE_OFFSET \
+	(META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_ENABLE)
+#define META_CR_T1STATUS_OFFSET \
+	(META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_STATUS)
+#define META_CR_T1DEFR_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_DEFR)
+#define META_CR_T1PRIVEXT_OFFSET \
+	(META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_TXENABLE_ENABLE_BIT (0x00000001U) /* Set if running */
+#define META_CR_TXSTATUS_PRIV (0x00020000U)
+#define META_CR_TXPRIVEXT_MINIM (0x00000080U)
+
+#define META_MEM_GLOBAL_RANGE_BIT (0x80000000U)
+
+#define META_CR_TXCLKCTRL (0x048000B0)
+#define META_CR_TXCLKCTRL_ALL_ON (0x55111111)
+#define META_CR_TXCLKCTRL_ALL_AUTO (0xAA222222)
+
+#define META_CR_MMCU_LOCAL_EBCTRL (0x04830600)
+#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN (0x3 << 14)
+#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN (0x3 << 6)
+#define META_CR_SYSC_DCPART(n) (0x04830200 + (n) * 0x8)
+#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE (0x1 << 31)
+#define META_CR_SYSC_ICPART(n) (0x04830220 + (n) * 0x8)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF (0x8 << 16)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE (0xF)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE (0x7)
+#define META_CR_MMCU_DCACHE_CTRL (0x04830018)
+#define META_CR_MMCU_ICACHE_CTRL (0x04830020)
+#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN (0x1)
+
+/*
+ ******************************************************************************
+ * META LDR Format
+ ******************************************************************************
+ */
+/* Block header structure. */
+struct rogue_meta_ldr_block_hdr {
+	u32 dev_id;
+	u32 sl_code;
+	u32 sl_data;
+	u16 pc_ctrl;
+	u16 crc;
+};
+
+/* High level data stream block structure. */
+struct rogue_meta_ldr_l1_data_blk {
+	u16 cmd;
+	u16 length;
+	u32 next;
+	u32 cmd_data[4];
+};
+
+/* High level data stream block structure. */
+struct rogue_meta_ldr_l2_data_blk {
+	u16 tag;
+	u16 length;
+	u32 block_data[4];
+};
+
+/* Config command structure. */
+struct rogue_meta_ldr_cfg_blk {
+	u32 type;
+	u32 block_data[4];
+};
+
+/* Block type definitions */
+#define ROGUE_META_LDR_COMMENT_TYPE_MASK (0x0010U)
+#define ROGUE_META_LDR_BLK_IS_COMMENT(x) (((x) & ROGUE_META_LDR_COMMENT_TYPE_MASK) != 0U)
+
+/*
+ * Command definitions
+ *  Value   Name            Description
+ *  0       LoadMem         Load memory with binary data.
+ *  1       LoadCore        Load a set of core registers.
+ *  2       LoadMMReg       Load a set of memory mapped registers.
+ *  3       StartThreads    Set each thread PC and SP, then enable threads.
+ *  4       ZeroMem         Zeros a memory region.
+ *  5       Config          Perform a configuration command.
+ */
+#define ROGUE_META_LDR_CMD_MASK (0x000FU)
+
+#define ROGUE_META_LDR_CMD_LOADMEM (0x0000U)
+#define ROGUE_META_LDR_CMD_LOADCORE (0x0001U)
+#define ROGUE_META_LDR_CMD_LOADMMREG (0x0002U)
+#define ROGUE_META_LDR_CMD_START_THREADS (0x0003U)
+#define ROGUE_META_LDR_CMD_ZEROMEM (0x0004U)
+#define ROGUE_META_LDR_CMD_CONFIG (0x0005U)
+
+/*
+ * Config Command definitions
+ *  Value   Name        Description
+ *  0       Pause       Pause for x times 100 instructions
+ *  1       Read        Read a value from register - No value return needed.
+ *                      Utilises effects of issuing reads to certain registers
+ *  2       Write       Write to mem location
+ *  3       MemSet      Set mem to value
+ *  4       MemCheck    check mem for specific value.
+ */
+#define ROGUE_META_LDR_CFG_PAUSE (0x0000)
+#define ROGUE_META_LDR_CFG_READ (0x0001)
+#define ROGUE_META_LDR_CFG_WRITE (0x0002)
+#define ROGUE_META_LDR_CFG_MEMSET (0x0003)
+#define ROGUE_META_LDR_CFG_MEMCHECK (0x0004)
+
+/*
+ ******************************************************************************
+ * ROGUE FW segmented MMU definitions
+ ******************************************************************************
+ */
+/* All threads can access the segment. */
+#define ROGUE_FW_SEGMMU_ALLTHRS (0xf << 8U)
+/* Writable. */
+#define ROGUE_FW_SEGMMU_WRITEABLE (0x1U << 1U)
+/* All threads can access and writable. */
+#define ROGUE_FW_SEGMMU_ALLTHRS_WRITEABLE \
+	(ROGUE_FW_SEGMMU_ALLTHRS | ROGUE_FW_SEGMMU_WRITEABLE)
+
+/* Direct map region 10 used for mapping GPU memory - max 8MB. */
+#define ROGUE_FW_SEGMMU_DMAP_GPU_ID (10U)
+#define ROGUE_FW_SEGMMU_DMAP_GPU_ADDR_START (0x07000000U)
+#define ROGUE_FW_SEGMMU_DMAP_GPU_MAX_SIZE (0x00800000U)
+
+/* Segment IDs. */
+#define ROGUE_FW_SEGMMU_DATA_ID (1U)
+#define ROGUE_FW_SEGMMU_BOOTLDR_ID (2U)
+#define ROGUE_FW_SEGMMU_TEXT_ID (ROGUE_FW_SEGMMU_BOOTLDR_ID)
+
+/*
+ * SLC caching strategy in S7 and volcanic is emitted through the segment MMU.
+ * All the segments configured through the macro ROGUE_FW_SEGMMU_OUTADDR_TOP are
+ * CACHED in the SLC.
+ * The interface has been kept the same to simplify the code changes.
+ * The bifdm argument is ignored (no longer relevant) in S7 and volcanic.
+ */
+#define ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC(pers, slc_policy, mmu_ctx)  \
+	((((u64)((pers) & 0x3)) << 52) | (((u64)((mmu_ctx) & 0xFF)) << 44) | \
+	 (((u64)((slc_policy) & 0x1)) << 40))
+#define ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(mmu_ctx) \
+	ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x3, 0x0, mmu_ctx)
+#define ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(mmu_ctx) \
+	ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x0, 0x1, mmu_ctx)
+
+/*
+ * To configure the Page Catalog and BIF-DM fed into the BIF for Garten
+ * accesses through this segment.
+ */
+#define ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(pc, bifdm) \
+	(((u64)((u64)(pc) & 0xFU) << 44U) | ((u64)((u64)(bifdm) & 0xFU) << 40U))
+
+#define ROGUE_FW_SEGMMU_META_BIFDM_ID (0x7U)
+
+/* META segments have 4kB minimum size. */
+#define ROGUE_FW_SEGMMU_ALIGN (0x1000U)
+
+/* Segmented MMU registers (n = segment id). */
+#define META_CR_MMCU_SEGMENT_N_BASE(n) (0x04850000U + ((n) * 0x10U))
+#define META_CR_MMCU_SEGMENT_N_LIMIT(n) (0x04850004U + ((n) * 0x10U))
+#define META_CR_MMCU_SEGMENT_N_OUTA0(n) (0x04850008U + ((n) * 0x10U))
+#define META_CR_MMCU_SEGMENT_N_OUTA1(n) (0x0485000CU + ((n) * 0x10U))
+
+/*
+ * The following defines must be recalculated if the Meta MMU segments used
+ * to access Host-FW data are changed
+ * Current combinations are:
+ * - SLC uncached, META cached,   FW base address 0x70000000
+ * - SLC uncached, META uncached, FW base address 0xF0000000
+ * - SLC cached,   META cached,   FW base address 0x10000000
+ * - SLC cached,   META uncached, FW base address 0x90000000
+ */
+#define ROGUE_FW_SEGMMU_DATA_BASE_ADDRESS (0x10000000U)
+#define ROGUE_FW_SEGMMU_DATA_META_CACHED (0x0U)
+#define ROGUE_FW_SEGMMU_DATA_META_UNCACHED (META_MEM_GLOBAL_RANGE_BIT)
+#define ROGUE_FW_SEGMMU_DATA_META_CACHE_MASK (META_MEM_GLOBAL_RANGE_BIT)
+/*
+ * For non-VIVT SLCs the cacheability of the FW data in the SLC is selected in
+ * the PTEs for the FW data, not in the Meta Segment MMU, which means these
+ * defines have no real effect in those cases.
+ */
+#define ROGUE_FW_SEGMMU_DATA_VIVT_SLC_CACHED (0x0U)
+#define ROGUE_FW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000U)
+#define ROGUE_FW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U)
+
+/*
+ ******************************************************************************
+ * ROGUE FW Bootloader defaults
+ ******************************************************************************
+ */
+#define ROGUE_FW_BOOTLDR_META_ADDR (0x40000000U)
+#define ROGUE_FW_BOOTLDR_DEVV_ADDR_0 (0xC0000000U)
+#define ROGUE_FW_BOOTLDR_DEVV_ADDR_1 (0x000000E1)
+#define ROGUE_FW_BOOTLDR_DEVV_ADDR                     \
+	((((u64)ROGUE_FW_BOOTLDR_DEVV_ADDR_1) << 32) | \
+	 ROGUE_FW_BOOTLDR_DEVV_ADDR_0)
+#define ROGUE_FW_BOOTLDR_LIMIT (0x1FFFF000)
+#define ROGUE_FW_MAX_BOOTLDR_OFFSET (0x1000)
+
+/* Bootloader configuration offset is in dwords (512 bytes) */
+#define ROGUE_FW_BOOTLDR_CONF_OFFSET (0x80)
+
+/*
+ ******************************************************************************
+ * ROGUE META Stack
+ ******************************************************************************
+ */
+#define ROGUE_META_STACK_SIZE (0x1000U)
+
+/*
+ ******************************************************************************
+ * ROGUE META Core memory
+ ******************************************************************************
+ */
+/* Code and data both map to the same physical memory. */
+#define ROGUE_META_COREMEM_CODE_ADDR (0x80000000U)
+#define ROGUE_META_COREMEM_DATA_ADDR (0x82000000U)
+#define ROGUE_META_COREMEM_OFFSET_MASK (0x01ffffffU)
+
+#define ROGUE_META_IS_COREMEM_CODE(a, b)                                \
+	({                                                              \
+		u32 _a = (a), _b = (b);                                 \
+		((_a) >= ROGUE_META_COREMEM_CODE_ADDR) &&               \
+			((_a) < (ROGUE_META_COREMEM_CODE_ADDR + (_b))); \
+	})
+#define ROGUE_META_IS_COREMEM_DATA(a, b)                                \
+	({                                                              \
+		u32 _a = (a), _b = (b);                                 \
+		((_a) >= ROGUE_META_COREMEM_DATA_ADDR) &&               \
+			((_a) < (ROGUE_META_COREMEM_DATA_ADDR + (_b))); \
+	})
+/*
+ ******************************************************************************
+ * 2nd thread
+ ******************************************************************************
+ */
+#define ROGUE_FW_THR1_PC (0x18930000)
+#define ROGUE_FW_THR1_SP (0x78890000)
+
+/*
+ ******************************************************************************
+ * META compatibility
+ ******************************************************************************
+ */
+
+#define META_CR_CORE_ID (0x04831000)
+#define META_CR_CORE_ID_VER_SHIFT (16U)
+#define META_CR_CORE_ID_VER_CLRMSK (0XFF00FFFFU)
+
+#define ROGUE_CR_META_MTP218_CORE_ID_VALUE 0x19
+#define ROGUE_CR_META_MTP219_CORE_ID_VALUE 0x1E
+#define ROGUE_CR_META_LTP218_CORE_ID_VALUE 0x1C
+#define ROGUE_CR_META_LTP217_CORE_ID_VALUE 0x1F
+
+#define ROGUE_FW_PROCESSOR_META "META"
+
+#endif /* __PVR_ROGUE_META_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_mips.h b/drivers/gpu/drm/imagination/pvr_rogue_mips.h
new file mode 100644
index 000000000000..7f79ecdc48e0
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_mips.h
@@ -0,0 +1,335 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_MIPS_H__
+#define __PVR_ROGUE_MIPS_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* Utility defines for memory management. */
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K (12)
+#define ROGUE_MIPSFW_PAGE_SIZE_4K (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
+#define ROGUE_MIPSFW_PAGE_MASK_4K (ROGUE_MIPSFW_PAGE_SIZE_4K - 1)
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_64K (16)
+#define ROGUE_MIPSFW_PAGE_SIZE_64K (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_64K)
+#define ROGUE_MIPSFW_PAGE_MASK_64K (ROGUE_MIPSFW_PAGE_SIZE_64K - 1)
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_256K (18)
+#define ROGUE_MIPSFW_PAGE_SIZE_256K (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_256K)
+#define ROGUE_MIPSFW_PAGE_MASK_256K (ROGUE_MIPSFW_PAGE_SIZE_256K - 1)
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_1MB (20)
+#define ROGUE_MIPSFW_PAGE_SIZE_1MB (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_1MB)
+#define ROGUE_MIPSFW_PAGE_MASK_1MB (ROGUE_MIPSFW_PAGE_SIZE_1MB - 1)
+#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_4MB (22)
+#define ROGUE_MIPSFW_PAGE_SIZE_4MB (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_4MB)
+#define ROGUE_MIPSFW_PAGE_MASK_4MB (ROGUE_MIPSFW_PAGE_SIZE_4MB - 1)
+#define ROGUE_MIPSFW_LOG2_PTE_ENTRY_SIZE (2)
+/* log2 page table sizes dependent on FW heap size and page size (for each OS). */
+#define ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_4K(pvr_dev) ((pvr_dev)->fw_dev.fw_heap_info.log2_size - \
+						      ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K +    \
+						      ROGUE_MIPSFW_LOG2_PTE_ENTRY_SIZE)
+#define ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_64K(pvr_dev) ((pvr_dev)->fw_dev.fw_heap_info.log2_size - \
+						       ROGUE_MIPSFW_LOG2_PAGE_SIZE_64K +   \
+						       ROGUE_MIPSFW_LOG2_PTE_ENTRY_SIZE)
+/* Maximum number of page table pages (both Host and MIPS pages). */
+#define ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES (4)
+/* Total number of TLB entries. */
+#define ROGUE_MIPSFW_NUMBER_OF_TLB_ENTRIES (16)
+/* "Uncached" caching policy. */
+#define ROGUE_MIPSFW_UNCACHED_CACHE_POLICY (2)
+/* "Write-back write-allocate" caching policy. */
+#define ROGUE_MIPSFW_WRITEBACK_CACHE_POLICY (3)
+/* "Write-through no write-allocate" caching policy. */
+#define ROGUE_MIPSFW_WRITETHROUGH_CACHE_POLICY (1)
+/* Cached policy used by MIPS in case of physical bus on 32 bit. */
+#define ROGUE_MIPSFW_CACHED_POLICY (ROGUE_MIPSFW_WRITEBACK_CACHE_POLICY)
+/* Cached policy used by MIPS in case of physical bus on more than 32 bit. */
+#define ROGUE_MIPSFW_CACHED_POLICY_ABOVE_32BIT (ROGUE_MIPSFW_WRITETHROUGH_CACHE_POLICY)
+/* Total number of Remap entries. */
+#define ROGUE_MIPSFW_NUMBER_OF_REMAP_ENTRIES (2 * ROGUE_MIPSFW_NUMBER_OF_TLB_ENTRIES)
+
+/* MIPS EntryLo/PTE format. */
+
+#define ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_SHIFT (31U)
+#define ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK (0X7FFFFFFF)
+#define ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_EN (0X80000000)
+
+#define ROGUE_MIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT (30U)
+#define ROGUE_MIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK (0XBFFFFFFF)
+#define ROGUE_MIPSFW_ENTRYLO_EXEC_INHIBIT_EN (0X40000000)
+
+/* Page Frame Number */
+#define ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT (6)
+#define ROGUE_MIPSFW_ENTRYLO_PFN_ALIGNSHIFT (12)
+/* Mask used for the MIPS Page Table in case of physical bus on 32 bit. */
+#define ROGUE_MIPSFW_ENTRYLO_PFN_MASK (0x03FFFFC0)
+#define ROGUE_MIPSFW_ENTRYLO_PFN_SIZE (20)
+/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit. */
+#define ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT (0x3FFFFFC0)
+#define ROGUE_MIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT (24)
+#define ROGUE_MIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT (ROGUE_MIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \
+						 ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT)
+
+#define ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT (3U)
+#define ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK (0XFFFFFFC7)
+
+#define ROGUE_MIPSFW_ENTRYLO_DIRTY_SHIFT (2U)
+#define ROGUE_MIPSFW_ENTRYLO_DIRTY_CLRMSK (0XFFFFFFFB)
+#define ROGUE_MIPSFW_ENTRYLO_DIRTY_EN (0X00000004)
+
+#define ROGUE_MIPSFW_ENTRYLO_VALID_SHIFT (1U)
+#define ROGUE_MIPSFW_ENTRYLO_VALID_CLRMSK (0XFFFFFFFD)
+#define ROGUE_MIPSFW_ENTRYLO_VALID_EN (0X00000002)
+
+#define ROGUE_MIPSFW_ENTRYLO_GLOBAL_SHIFT (0U)
+#define ROGUE_MIPSFW_ENTRYLO_GLOBAL_CLRMSK (0XFFFFFFFE)
+#define ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN (0X00000001)
+
+#define ROGUE_MIPSFW_ENTRYLO_DVG (ROGUE_MIPSFW_ENTRYLO_DIRTY_EN | \
+				  ROGUE_MIPSFW_ENTRYLO_VALID_EN | \
+				  ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN)
+#define ROGUE_MIPSFW_ENTRYLO_UNCACHED (ROGUE_MIPSFW_UNCACHED_CACHE_POLICY << \
+				       ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT)
+#define ROGUE_MIPSFW_ENTRYLO_DVG_UNCACHED (ROGUE_MIPSFW_ENTRYLO_DVG | \
+					   ROGUE_MIPSFW_ENTRYLO_UNCACHED)
+
+/* Remap Range Config Addr Out. */
+/* These defines refer to the upper half of the Remap Range Config register. */
+#define ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_MASK (0x0FFFFFF0)
+#define ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT (4) /* wrt upper half of the register. */
+#define ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12)
+#define ROGUE_MIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT (ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \
+						 ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT)
+
+/*
+ * Pages to trampoline problematic physical addresses:
+ *   - ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000
+ *   - ROGUE_MIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000
+ *   - ROGUE_MIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000
+ *   - (benign trampoline)               : 0x1FC0_3000
+ * that would otherwise be erroneously remapped by the MIPS wrapper.
+ * (see "Firmware virtual layout and remap configuration" section below)
+ */
+
+#define ROGUE_MIPSFW_TRAMPOLINE_LOG2_NUMPAGES (2)
+#define ROGUE_MIPSFW_TRAMPOLINE_NUMPAGES BIT(ROGUE_MIPSFW_TRAMPOLINE_LOG2_NUMPAGES)
+#define ROGUE_MIPSFW_TRAMPOLINE_SIZE (ROGUE_MIPSFW_TRAMPOLINE_NUMPAGES << \
+				      ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
+#define ROGUE_MIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE (ROGUE_MIPSFW_TRAMPOLINE_LOG2_NUMPAGES + \
+						   ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
+
+#define ROGUE_MIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR (ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+#define ROGUE_MIPSFW_TRAMPOLINE_OFFSET(a) ((a) - ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+
+#define ROGUE_MIPSFW_SENSITIVE_ADDR(a) (ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN == \
+					(~((1 << ROGUE_MIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE) - 1) \
+					 & (a)))
+
+/* Firmware virtual layout and remap configuration. */
+/*
+ * For each remap region we define:
+ * - the virtual base used by the Firmware to access code/data through that region
+ * - the microAptivAP physical address correspondent to the virtual base address,
+ *   used as input address and remapped to the actual physical address
+ * - log2 of size of the region remapped by the MIPS wrapper, i.e. number of bits from
+ *   the bottom of the base input address that survive onto the output address
+ *   (this defines both the alignment and the maximum size of the remapped region)
+ * - one or more code/data segments within the remapped region.
+ */
+
+/* Boot remap setup. */
+#define ROGUE_MIPSFW_BOOT_REMAP_VIRTUAL_BASE (0xBFC00000)
+#define ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN (0x1FC00000)
+#define ROGUE_MIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE (12)
+#define ROGUE_MIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE (ROGUE_MIPSFW_BOOT_REMAP_VIRTUAL_BASE)
+
+/* Data remap setup. */
+#define ROGUE_MIPSFW_DATA_REMAP_VIRTUAL_BASE (0xBFC01000)
+#define ROGUE_MIPSFW_DATA_CACHED_REMAP_VIRTUAL_BASE (0x9FC01000)
+#define ROGUE_MIPSFW_DATA_REMAP_PHYS_ADDR_IN (0x1FC01000)
+#define ROGUE_MIPSFW_DATA_REMAP_LOG2_SEGMENT_SIZE (12)
+#define ROGUE_MIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE (ROGUE_MIPSFW_DATA_REMAP_VIRTUAL_BASE)
+
+/* Code remap setup. */
+#define ROGUE_MIPSFW_CODE_REMAP_VIRTUAL_BASE (0x9FC02000)
+#define ROGUE_MIPSFW_CODE_REMAP_PHYS_ADDR_IN (0x1FC02000)
+#define ROGUE_MIPSFW_CODE_REMAP_LOG2_SEGMENT_SIZE (12)
+#define ROGUE_MIPSFW_EXCEPTIONS_VIRTUAL_BASE (ROGUE_MIPSFW_CODE_REMAP_VIRTUAL_BASE)
+
+/* Permanent mappings setup. */
+#define ROGUE_MIPSFW_PT_VIRTUAL_BASE (0xCF000000)
+#define ROGUE_MIPSFW_REGISTERS_VIRTUAL_BASE (0xCF800000)
+#define ROGUE_MIPSFW_STACK_VIRTUAL_BASE (0xCF600000)
+
+/* Bootloader configuration data. */
+/*
+ * Bootloader configuration offset (where ROGUE_MIPSFW_BOOT_DATA lives)
+ * within the bootloader/NMI data page.
+ */
+#define ROGUE_MIPSFW_BOOTLDR_CONF_OFFSET (0x0)
+
+/* NMI shared data. */
+/* Base address of the shared data within the bootloader/NMI data page. */
+#define ROGUE_MIPSFW_NMI_SHARED_DATA_BASE (0x100)
+/* Size used by Debug dump data. */
+#define ROGUE_MIPSFW_NMI_SHARED_SIZE (0x2B0)
+/* Offsets in the NMI shared area in 32-bit words. */
+#define ROGUE_MIPSFW_NMI_SYNC_FLAG_OFFSET (0x0)
+#define ROGUE_MIPSFW_NMI_STATE_OFFSET (0x1)
+#define ROGUE_MIPSFW_NMI_ERROR_STATE_SET (0x1)
+
+/* MIPS boot stage. */
+#define ROGUE_MIPSFW_BOOT_STAGE_OFFSET (0x400)
+
+/*
+ * MIPS private data in the bootloader data page.
+ * Memory below this offset is used by the FW only, no interface data allowed.
+ */
+#define ROGUE_MIPSFW_PRIVATE_DATA_OFFSET (0x800)
+
+struct rogue_mipsfw_boot_data {
+	u64 stack_phys_addr;
+	u64 reg_base;
+	u64 pt_phys_addr[ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES];
+	u32 pt_log2_page_size;
+	u32 pt_num_pages;
+	u32 reserved1;
+	u32 reserved2;
+};
+
+#define ROGUE_MIPSFW_GET_OFFSET_IN_DWORDS(offset) ((offset) / sizeof(u32))
+#define ROGUE_MIPSFW_GET_OFFSET_IN_QWORDS(offset) ((offset) / sizeof(u64))
+
+/* Used for compatibility checks. */
+#define ROGUE_MIPSFW_ARCHTYPE_VER_CLRMSK (0xFFFFE3FFU)
+#define ROGUE_MIPSFW_ARCHTYPE_VER_SHIFT (10U)
+#define ROGUE_MIPSFW_CORE_ID_VALUE (0x001U)
+#define ROGUE_FW_PROCESSOR_MIPS "MIPS"
+
+/* microAptivAP cache line size. */
+#define ROGUE_MIPSFW_MICROAPTIVEAP_CACHELINE_SIZE (16U)
+
+/*
+ * The SOCIF transactions are identified with the top 16 bits of the physical address emitted by
+ * the MIPS.
+ */
+#define ROGUE_MIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN (16U)
+
+/* Values to put in the MIPS selectors for performance counters. */
+/* Icache accesses in COUNTER0. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_ICACHE_ACCESSES_C0 (9U)
+/* Icache misses in COUNTER1. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_ICACHE_MISSES_C1 (9U)
+
+/* Dcache accesses in COUNTER0. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_DCACHE_ACCESSES_C0 (10U)
+/* Dcache misses in COUNTER1. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_DCACHE_MISSES_C1 (11U)
+
+/* ITLB instruction accesses in COUNTER0. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_ITLB_INSTR_ACCESSES_C0 (5U)
+/* JTLB instruction accesses misses in COUNTER1. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_JTLB_INSTR_MISSES_C1 (7U)
+
+  /* Instructions completed in COUNTER0. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_INSTR_COMPLETED_C0 (1U)
+/* JTLB data misses in COUNTER1. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_JTLB_DATA_MISSES_C1 (8U)
+
+/* Shift for the Event field in the MIPS perf ctrl registers. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_EVENT_SHIFT (5U)
+
+/* Additional flags for performance counters. See MIPS manual for further reference. */
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_COUNT_USER_MODE (8U)
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_COUNT_KERNEL_MODE (2U)
+#define ROGUE_MIPSFW_PERF_COUNT_CTRL_COUNT_EXL (1U)
+
+#define ROGUE_MIPSFW_C0_NBHWIRQ	8
+
+/* Macros to decode C0_Cause register. */
+#define ROGUE_MIPSFW_C0_CAUSE_EXCCODE(cause) (((cause) & 0x7c) >> 2)
+#define ROGUE_MIPSFW_C0_CAUSE_EXCCODE_FWERROR 9
+/* Use only when Coprocessor Unusable exception. */
+#define ROGUE_MIPSFW_C0_CAUSE_UNUSABLE_UNIT(cause) (((cause) >> 28) & 0x3)
+#define ROGUE_MIPSFW_C0_CAUSE_PENDING_HWIRQ(cause) (((cause) & 0x3fc00) >> 10)
+#define ROGUE_MIPSFW_C0_CAUSE_FDCIPENDING BIT(21)
+#define ROGUE_MIPSFW_C0_CAUSE_IV BIT(23)
+#define ROGUE_MIPSFW_C0_CAUSE_IC BIT(25)
+#define ROGUE_MIPSFW_C0_CAUSE_PCIPENDING BIT(26)
+#define ROGUE_MIPSFW_C0_CAUSE_TIPENDING BIT(30)
+#define ROGUE_MIPSFW_C0_CAUSE_BRANCH_DELAY BIT(31)
+
+/* Macros to decode C0_Debug register. */
+#define ROGUE_MIPSFW_C0_DEBUG_EXCCODE(debug) (((debug) >> 10) & 0x1f)
+#define ROGUE_MIPSFW_C0_DEBUG_DSS BIT(0)
+#define ROGUE_MIPSFW_C0_DEBUG_DBP BIT(1)
+#define ROGUE_MIPSFW_C0_DEBUG_DDBL BIT(2)
+#define ROGUE_MIPSFW_C0_DEBUG_DDBS BIT(3)
+#define ROGUE_MIPSFW_C0_DEBUG_DIB BIT(4)
+#define ROGUE_MIPSFW_C0_DEBUG_DINT BIT(5)
+#define ROGUE_MIPSFW_C0_DEBUG_DIBIMPR BIT(6)
+#define ROGUE_MIPSFW_C0_DEBUG_DDBLIMPR BIT(18)
+#define ROGUE_MIPSFW_C0_DEBUG_DDBSIMPR BIT(19)
+#define ROGUE_MIPSFW_C0_DEBUG_IEXI BIT(20)
+#define ROGUE_MIPSFW_C0_DEBUG_DBUSEP BIT(21)
+#define ROGUE_MIPSFW_C0_DEBUG_CACHEEP BIT(22)
+#define ROGUE_MIPSFW_C0_DEBUG_MCHECKP BIT(23)
+#define ROGUE_MIPSFW_C0_DEBUG_IBUSEP BIT(24)
+#define ROGUE_MIPSFW_C0_DEBUG_DM BIT(30)
+#define ROGUE_MIPSFW_C0_DEBUG_DBD BIT(31)
+
+/* Macros to decode TLB entries. */
+#define ROGUE_MIPSFW_TLB_GET_MASK(page_mask) (((page_mask) >> 13) & 0XFFFFU)
+/* Page size in KB. */
+#define ROGUE_MIPSFW_TLB_GET_PAGE_SIZE(page_mask) ((((page_mask) | 0x1FFF) + 1) >> 11)
+/* Page size in KB. */
+#define ROGUE_MIPSFW_TLB_GET_PAGE_MASK(page_size) ((((page_size) << 11) - 1) & ~0x7FF)
+#define ROGUE_MIPSFW_TLB_GET_VPN2(entry_hi) ((entry_hi) >> 13)
+#define ROGUE_MIPSFW_TLB_GET_COHERENCY(entry_lo) (((entry_lo) >> 3) & 0x7U)
+#define ROGUE_MIPSFW_TLB_GET_PFN(entry_lo) (((entry_lo) >> 6) & 0XFFFFFU)
+/* GET_PA uses a non-standard PFN mask for 36 bit addresses. */
+#define ROGUE_MIPSFW_TLB_GET_PA(entry_lo) (((u64)(entry_lo) & \
+					    ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) << 6)
+#define ROGUE_MIPSFW_TLB_GET_INHIBIT(entry_lo) (((entry_lo) >> 30) & 0x3U)
+#define ROGUE_MIPSFW_TLB_GET_DGV(entry_lo) ((entry_lo) & 0x7U)
+#define ROGUE_MIPSFW_TLB_GLOBAL BIT(0)
+#define ROGUE_MIPSFW_TLB_VALID BIT(1)
+#define ROGUE_MIPSFW_TLB_DIRTY BIT(2)
+#define ROGUE_MIPSFW_TLB_XI BIT(30)
+#define ROGUE_MIPSFW_TLB_RI BIT(31)
+
+#define ROGUE_MIPSFW_REMAP_GET_REGION_SIZE(region_size_encoding) (1 << (((region_size_encoding) \
+									+ 1) << 1))
+
+struct rogue_mips_tlb_entry {
+	u32 tlb_page_mask;
+	u32 tlb_hi;
+	u32 tlb_lo0;
+	u32 tlb_lo1;
+};
+
+struct rogue_mips_remap_entry {
+	u32 remap_addr_in;  /* Always 4k aligned. */
+	u32 remap_addr_out; /* Always 4k aligned. */
+	u32 remap_region_size;
+};
+
+struct rogue_mips_state {
+	u32 error_state; /* This must come first in the structure. */
+	u32 error_epc;
+	u32 status_register;
+	u32 cause_register;
+	u32 bad_register;
+	u32 epc;
+	u32 sp;
+	u32 debug;
+	u32 depc;
+	u32 bad_instr;
+	u32 unmapped_address;
+	struct rogue_mips_tlb_entry tlb[ROGUE_MIPSFW_NUMBER_OF_TLB_ENTRIES];
+	struct rogue_mips_remap_entry remap[ROGUE_MIPSFW_NUMBER_OF_REMAP_ENTRIES];
+};
+
+#include "pvr_rogue_mips_check.h"
+
+#endif /* __PVR_ROGUE_MIPS_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_mips_check.h b/drivers/gpu/drm/imagination/pvr_rogue_mips_check.h
new file mode 100644
index 000000000000..3ecaffe45a55
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_mips_check.h
@@ -0,0 +1,56 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_ROGUE_MIPS_CHECK_H__
+#define __PVR_ROGUE_MIPS_CHECK_H__
+
+static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_page_mask) == 0,
+				"offsetof(struct rogue_mips_tlb_entry, tlb_page_mask) incorrect");
+static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_hi) == 4,
+				"offsetof(struct rogue_mips_tlb_entry, tlb_hi) incorrect");
+static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_lo0) == 8,
+				"offsetof(struct rogue_mips_tlb_entry, tlb_lo0) incorrect");
+static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_lo1) == 12,
+				"offsetof(struct rogue_mips_tlb_entry, tlb_lo1) incorrect");
+static_assert(sizeof(struct rogue_mips_tlb_entry) == 16,
+				"struct rogue_mips_tlb_entry is incorrect size");
+
+static_assert(offsetof(struct rogue_mips_remap_entry, remap_addr_in) == 0,
+				"offsetof(struct rogue_mips_remap_entry, remap_addr_in) incorrect");
+static_assert(offsetof(struct rogue_mips_remap_entry, remap_addr_out) == 4,
+				"offsetof(struct rogue_mips_remap_entry, remap_addr_out) incorrect");
+static_assert(offsetof(struct rogue_mips_remap_entry, remap_region_size) == 8,
+				"offsetof(struct rogue_mips_remap_entry, remap_region_size) incorrect");
+static_assert(sizeof(struct rogue_mips_remap_entry) == 12,
+				"struct rogue_mips_remap_entry is incorrect size");
+
+static_assert(offsetof(struct rogue_mips_state, error_state) == 0,
+				"offsetof(struct rogue_mips_state, error_state) incorrect");
+static_assert(offsetof(struct rogue_mips_state, error_epc) == 4,
+				"offsetof(struct rogue_mips_state, error_epc) incorrect");
+static_assert(offsetof(struct rogue_mips_state, status_register) == 8,
+				"offsetof(struct rogue_mips_state, status_register) incorrect");
+static_assert(offsetof(struct rogue_mips_state, cause_register) == 12,
+				"offsetof(struct rogue_mips_state, cause_register) incorrect");
+static_assert(offsetof(struct rogue_mips_state, bad_register) == 16,
+				"offsetof(struct rogue_mips_state, bad_register) incorrect");
+static_assert(offsetof(struct rogue_mips_state, epc) == 20,
+				"offsetof(struct rogue_mips_state, epc) incorrect");
+static_assert(offsetof(struct rogue_mips_state, sp) == 24,
+				"offsetof(struct rogue_mips_state, sp) incorrect");
+static_assert(offsetof(struct rogue_mips_state, debug) == 28,
+				"offsetof(struct rogue_mips_state, debug) incorrect");
+static_assert(offsetof(struct rogue_mips_state, depc) == 32,
+				"offsetof(struct rogue_mips_state, depc) incorrect");
+static_assert(offsetof(struct rogue_mips_state, bad_instr) == 36,
+				"offsetof(struct rogue_mips_state, bad_instr) incorrect");
+static_assert(offsetof(struct rogue_mips_state, unmapped_address) == 40,
+				"offsetof(struct rogue_mips_state, unmapped_address) incorrect");
+static_assert(offsetof(struct rogue_mips_state, tlb) == 44,
+				"offsetof(struct rogue_mips_state, tlb) incorrect");
+static_assert(offsetof(struct rogue_mips_state, remap) == 300,
+				"offsetof(struct rogue_mips_state, remap) incorrect");
+static_assert(sizeof(struct rogue_mips_state) == 684,
+				"struct rogue_mips_state is incorrect size");
+
+#endif /* __PVR_ROGUE_MIPS_CHECK_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h
new file mode 100644
index 000000000000..9ee958401f5a
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h
@@ -0,0 +1,136 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+/*  *** Autogenerated C -- do not edit ***  */
+
+#ifndef __PVR_ROGUE_MMU_DEFS_H__
+#define __PVR_ROGUE_MMU_DEFS_H__
+
+#define ROGUE_MMU_DEFS_REVISION 0
+
+#define ROGUE_BIF_DM_ENCODING_VERTEX (0x00000000U)
+#define ROGUE_BIF_DM_ENCODING_PIXEL (0x00000001U)
+#define ROGUE_BIF_DM_ENCODING_COMPUTE (0x00000002U)
+#define ROGUE_BIF_DM_ENCODING_TLA (0x00000003U)
+#define ROGUE_BIF_DM_ENCODING_PB_VCE (0x00000004U)
+#define ROGUE_BIF_DM_ENCODING_PB_TE (0x00000005U)
+#define ROGUE_BIF_DM_ENCODING_META (0x00000007U)
+#define ROGUE_BIF_DM_ENCODING_HOST (0x00000008U)
+#define ROGUE_BIF_DM_ENCODING_PM_ALIST (0x00000009U)
+
+#define ROGUE_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U)
+#define ROGUE_MMUCTRL_VADDR_PC_INDEX_CLRMSK (0xFFFFFF003FFFFFFFULL)
+#define ROGUE_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U)
+#define ROGUE_MMUCTRL_VADDR_PD_INDEX_CLRMSK (0xFFFFFFFFC01FFFFFULL)
+#define ROGUE_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U)
+#define ROGUE_MMUCTRL_VADDR_PT_INDEX_CLRMSK (0xFFFFFFFFFFE00FFFULL)
+
+#define ROGUE_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U)
+#define ROGUE_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U)
+#define ROGUE_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U)
+
+#define ROGUE_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U)
+#define ROGUE_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U)
+#define ROGUE_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U)
+
+#define ROGUE_MMUCTRL_PAGE_SIZE_MASK (0x00000007U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_4KB (0x00000000U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_16KB (0x00000001U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_64KB (0x00000002U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_256KB (0x00000003U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_1MB (0x00000004U)
+#define ROGUE_MMUCTRL_PAGE_SIZE_2MB (0x00000005U)
+
+#define ROGUE_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U)
+#define ROGUE_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (0xFFFFFF0000000FFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U)
+#define ROGUE_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (0xFFFFFF0000003FFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U)
+#define ROGUE_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (0xFFFFFF000000FFFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U)
+#define ROGUE_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (0xFFFFFF000003FFFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U)
+#define ROGUE_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (0xFFFFFF00000FFFFFULL)
+
+#define ROGUE_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U)
+#define ROGUE_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (0xFFFFFF00001FFFFFULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U)
+#define ROGUE_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (0xFFFFFF0000000FFFULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U)
+#define ROGUE_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (0xFFFFFF00000003FFULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U)
+#define ROGUE_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (0xFFFFFF00000000FFULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U)
+#define ROGUE_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (0xFFFFFF000000003FULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U)
+#define ROGUE_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (0xFFFFFF000000001FULL)
+
+#define ROGUE_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U)
+#define ROGUE_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (0xFFFFFF000000001FULL)
+
+#define ROGUE_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U)
+#define ROGUE_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (0xBFFFFFFFFFFFFFFFULL)
+#define ROGUE_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (0x4000000000000000ULL)
+#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U)
+#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (0xC00000FFFFFFFFFFULL)
+#define ROGUE_MMUCTRL_PT_DATA_PAGE_SHIFT (12U)
+#define ROGUE_MMUCTRL_PT_DATA_PAGE_CLRMSK (0xFFFFFF0000000FFFULL)
+#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U)
+#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (0xFFFFFFFFFFFFF03FULL)
+#define ROGUE_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U)
+#define ROGUE_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFFFFFFFFDFULL)
+#define ROGUE_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (0x0000000000000020ULL)
+#define ROGUE_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U)
+#define ROGUE_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (0xFFFFFFFFFFFFFFEFULL)
+#define ROGUE_MMUCTRL_PT_DATA_PM_SRC_EN (0x0000000000000010ULL)
+#define ROGUE_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT (3U)
+#define ROGUE_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK (0xFFFFFFFFFFFFFFF7ULL)
+#define ROGUE_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN (0x0000000000000008ULL)
+#define ROGUE_MMUCTRL_PT_DATA_CC_SHIFT (2U)
+#define ROGUE_MMUCTRL_PT_DATA_CC_CLRMSK (0xFFFFFFFFFFFFFFFBULL)
+#define ROGUE_MMUCTRL_PT_DATA_CC_EN (0x0000000000000004ULL)
+#define ROGUE_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U)
+#define ROGUE_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (0xFFFFFFFFFFFFFFFDULL)
+#define ROGUE_MMUCTRL_PT_DATA_READ_ONLY_EN (0x0000000000000002ULL)
+#define ROGUE_MMUCTRL_PT_DATA_VALID_SHIFT (0U)
+#define ROGUE_MMUCTRL_PT_DATA_VALID_CLRMSK (0xFFFFFFFFFFFFFFFEULL)
+#define ROGUE_MMUCTRL_PT_DATA_VALID_EN (0x0000000000000001ULL)
+
+#define ROGUE_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U)
+#define ROGUE_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFEFFFFFFFFFFULL)
+#define ROGUE_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (0x0000010000000000ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U)
+#define ROGUE_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (0xFFFFFF000000001FULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (0xFFFFFFFFFFFFFFF1ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (0x0000000000000000ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (0x0000000000000002ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (0x0000000000000004ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (0x0000000000000006ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (0x0000000000000008ULL)
+#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (0x000000000000000aULL)
+#define ROGUE_MMUCTRL_PD_DATA_VALID_SHIFT (0U)
+#define ROGUE_MMUCTRL_PD_DATA_VALID_CLRMSK (0xFFFFFFFFFFFFFFFEULL)
+#define ROGUE_MMUCTRL_PD_DATA_VALID_EN (0x0000000000000001ULL)
+
+#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U)
+#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU)
+#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U)
+#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U)
+#define ROGUE_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U)
+#define ROGUE_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU)
+#define ROGUE_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U)
+#define ROGUE_MMUCTRL_PC_DATA_VALID_SHIFT (0U)
+#define ROGUE_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU)
+#define ROGUE_MMUCTRL_PC_DATA_VALID_EN (0x00000001U)
+
+#endif /* __PVR_ROGUE_MMU_DEFS_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_stream.c b/drivers/gpu/drm/imagination/pvr_stream.c
new file mode 100644
index 000000000000..21db9b51f6e1
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_stream.c
@@ -0,0 +1,321 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_job.h"
+#include "pvr_rogue_fwif_stream.h"
+#include "pvr_stream.h"
+
+#include <linux/align.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <uapi/drm/pvr_drm.h>
+
+static __always_inline bool
+stream_def_is_supported(struct pvr_device *pvr_dev, const struct pvr_stream_def *stream_def)
+{
+	if (stream_def->feature == PVR_FEATURE_NONE)
+		return true;
+
+	if (!(stream_def->feature & PVR_FEATURE_NOT) &&
+	    pvr_device_has_feature(pvr_dev, stream_def->feature)) {
+		return true;
+	}
+
+	if ((stream_def->feature & PVR_FEATURE_NOT) &&
+	    !pvr_device_has_feature(pvr_dev, stream_def->feature & ~PVR_FEATURE_NOT)) {
+		return true;
+	}
+
+	return false;
+}
+
+static int
+pvr_stream_get_data(u8 *stream, u32 *stream_offset, u32 stream_size, int data_size, int align_size,
+		    void *dest)
+{
+	int err = 0;
+
+	*stream_offset = ALIGN(*stream_offset, align_size);
+
+	if ((*stream_offset + data_size) > stream_size) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	memcpy(dest, stream + *stream_offset, data_size);
+
+	(*stream_offset) += data_size;
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_stream_process_1() - Process a single stream and fill destination structure
+ * @pvr_dev: Device pointer.
+ * @stream_def: Stream definition.
+ * @nr_entries: Number of entries in &stream_def.
+ * @stream: Pointer to stream.
+ * @stream_offset: Starting offset within stream.
+ * @stream_size: Size of input stream, in bytes.
+ * @dest: Pointer to destination structure.
+ * @dest_size: Size of destination structure.
+ * @stream_offset_out: Pointer to variable to write updated stream offset to. May be NULL.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * -%EINVAL on malformed stream.
+ */
+static int
+pvr_stream_process_1(struct pvr_device *pvr_dev, const struct pvr_stream_def *stream_def,
+		     u32 nr_entries, u8 *stream, u32 stream_offset, u32 stream_size,
+		     u8 *dest, u32 dest_size, u32 *stream_offset_out)
+{
+	int err = 0;
+	u32 i;
+
+	for (i = 0; i < nr_entries; i++) {
+		if (stream_def[i].offset >= dest_size) {
+			err = -EINVAL;
+			break;
+		}
+
+		if (!stream_def_is_supported(pvr_dev, &stream_def[i]))
+			continue;
+
+		switch (stream_def[i].size) {
+		case PVR_STREAM_SIZE_8:
+			err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u8),
+						  sizeof(u8), dest + stream_def[i].offset);
+			if (err)
+				goto err_out;
+			break;
+
+		case PVR_STREAM_SIZE_16:
+			err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u16),
+						  sizeof(u16), dest + stream_def[i].offset);
+			if (err)
+				goto err_out;
+			break;
+
+		case PVR_STREAM_SIZE_32:
+			err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u32),
+						  sizeof(u32), dest + stream_def[i].offset);
+			if (err)
+				goto err_out;
+			break;
+
+		case PVR_STREAM_SIZE_64:
+			err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u64),
+						  sizeof(u64), dest + stream_def[i].offset);
+			if (err)
+				goto err_out;
+			break;
+
+		case PVR_STREAM_SIZE_ARRAY:
+			err = pvr_stream_get_data(stream, &stream_offset, stream_size,
+						  stream_def[i].array_size, sizeof(u64),
+						  dest + stream_def[i].offset);
+			if (err)
+				goto err_out;
+			break;
+		}
+	}
+
+	if (stream_offset_out && !err)
+		*stream_offset_out = stream_offset;
+
+err_out:
+	return err;
+}
+
+static int
+pvr_stream_process_ext_stream(struct pvr_device *pvr_dev,
+			      const struct pvr_stream_cmd_defs *cmd_defs, void *ext_stream,
+			      u32 stream_offset, u32 ext_stream_size, void *dest)
+{
+	u32 musthave_masks[PVR_STREAM_EXTHDR_TYPE_MAX];
+	u32 ext_header;
+	int err = 0;
+	u32 i;
+
+	/* Copy "must have" mask from device. We clear this as we process the stream. */
+	memcpy(musthave_masks, pvr_dev->stream_musthave_quirks[cmd_defs->type],
+	       sizeof(musthave_masks));
+
+	do {
+		const struct pvr_stream_ext_header *header;
+		u32 type;
+		u32 data;
+
+		err = pvr_stream_get_data(ext_stream, &stream_offset, ext_stream_size, sizeof(u32),
+					  sizeof(ext_header), &ext_header);
+		if (err)
+			goto err_out;
+
+		type = (ext_header & PVR_STREAM_EXTHDR_TYPE_MASK) >> PVR_STREAM_EXTHDR_TYPE_SHIFT;
+		data = ext_header & PVR_STREAM_EXTHDR_DATA_MASK;
+
+		if (type >= cmd_defs->ext_nr_headers) {
+			err = -EINVAL;
+			goto err_out;
+		}
+
+		header = &cmd_defs->ext_headers[type];
+		if (data & ~header->valid_mask) {
+			err = -EINVAL;
+			goto err_out;
+		}
+
+		musthave_masks[type] &= ~data;
+
+		for (i = 0; i < header->ext_streams_num; i++) {
+			const struct pvr_stream_ext_def *ext_def = &header->ext_streams[i];
+
+			if (!(ext_header & ext_def->header_mask))
+				continue;
+
+			if (!pvr_device_has_uapi_quirk(pvr_dev, ext_def->quirk)) {
+				err = -EINVAL;
+				goto err_out;
+			}
+
+			err = pvr_stream_process_1(pvr_dev, ext_def->stream, ext_def->stream_len,
+						   ext_stream, stream_offset,
+						   ext_stream_size, dest,
+						   cmd_defs->dest_size, &stream_offset);
+			if (err)
+				goto err_out;
+		}
+	} while (ext_header & PVR_STREAM_EXTHDR_CONTINUATION);
+
+	/*
+	 * Verify that "must have" mask is now zero. If it isn't then one of the "must have" quirks
+	 * for this command was not present.
+	 */
+	for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+		if (musthave_masks[i]) {
+			err = -EINVAL;
+			goto err_out;
+		}
+	}
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_stream_process() - Build FW structure from stream
+ * @pvr_dev: Device pointer.
+ * @cmd_defs: Stream definition.
+ * @stream: Pointer to command stream.
+ * @stream_size: Size of command stream, in bytes.
+ * @dest_out: Pointer to location to store address of FW structure.
+ *
+ * Caller is responsible for freeing the output structure.
+ *
+ * Returns:
+ *  * 0 on success,
+ *  * -%ENOMEM on out of memory, or
+ *  * -%EINVAL on malformed stream.
+ */
+int
+pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
+		   void *stream, u32 stream_size, struct pvr_job *job)
+{
+	u32 stream_offset = 0;
+	u32 main_stream_len;
+	u32 padding;
+	int err;
+
+	if (!stream || !stream_size) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	job->cmd = kzalloc(cmd_defs->dest_size, GFP_KERNEL);
+	if (!job->cmd) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	job->cmd_len = cmd_defs->dest_size;
+
+	err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u32),
+				  sizeof(u32), &main_stream_len);
+	if (err)
+		goto err_free_dest;
+
+	/*
+	 * u32 after stream length is padding to ensure u64 alignment, but may be used for expansion
+	 * in the future. Verify it's zero.
+	 */
+	err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u32),
+				  sizeof(u32), &padding);
+	if (err)
+		goto err_free_dest;
+
+	if (main_stream_len < stream_offset || main_stream_len > stream_size || padding) {
+		err = -EINVAL;
+		goto err_free_dest;
+	}
+
+	err = pvr_stream_process_1(pvr_dev, cmd_defs->main_stream, cmd_defs->main_stream_len,
+				   stream, stream_offset, main_stream_len, job->cmd,
+				   cmd_defs->dest_size, &stream_offset);
+	if (err)
+		goto err_free_dest;
+
+	if (stream_offset < stream_size) {
+		err = pvr_stream_process_ext_stream(pvr_dev, cmd_defs, stream, stream_offset,
+						    stream_size, job->cmd);
+		if (err)
+			goto err_free_dest;
+	} else {
+		u32 i;
+
+		/*
+		 * If we don't have an extension stream then there must not be any "must have"
+		 * quirks for this command.
+		 */
+		for (i = 0; i < cmd_defs->ext_nr_headers; i++) {
+			if (pvr_dev->stream_musthave_quirks[cmd_defs->type][i]) {
+				err = -EINVAL;
+				goto err_out;
+			}
+		}
+	}
+
+	return 0;
+
+err_free_dest:
+	kfree(job->cmd);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_stream_create_musthave_masks() - Create "must have" masks for streams based on current device
+ *                                      quirks
+ * @pvr_dev: Device pointer.
+ */
+void
+pvr_stream_create_musthave_masks(struct pvr_device *pvr_dev)
+{
+	memset(pvr_dev->stream_musthave_quirks, 0, sizeof(pvr_dev->stream_musthave_quirks));
+
+	if (pvr_device_has_uapi_quirk(pvr_dev, 47217))
+		pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_FRAG][0] |=
+			PVR_STREAM_EXTHDR_FRAG0_BRN47217;
+
+	if (pvr_device_has_uapi_quirk(pvr_dev, 49927)) {
+		pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_GEOM][0] |=
+			PVR_STREAM_EXTHDR_GEOM0_BRN49927;
+		pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_FRAG][0] |=
+			PVR_STREAM_EXTHDR_FRAG0_BRN49927;
+		pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_COMPUTE][0] |=
+			PVR_STREAM_EXTHDR_COMPUTE0_BRN49927;
+	}
+}
diff --git a/drivers/gpu/drm/imagination/pvr_stream.h b/drivers/gpu/drm/imagination/pvr_stream.h
new file mode 100644
index 000000000000..77c3872f2f12
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_stream.h
@@ -0,0 +1,74 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_STREAM_H__
+#define __PVR_STREAM_H__
+
+#include <linux/bits.h>
+#include <linux/limits.h>
+#include <linux/types.h>
+
+struct pvr_device;
+
+struct pvr_job;
+
+enum pvr_stream_type {
+	PVR_STREAM_TYPE_GEOM = 0,
+	PVR_STREAM_TYPE_FRAG,
+	PVR_STREAM_TYPE_COMPUTE,
+	PVR_STREAM_TYPE_TRANSFER,
+
+	PVR_STREAM_TYPE_MAX
+};
+
+enum pvr_stream_size {
+	PVR_STREAM_SIZE_8 = 0,
+	PVR_STREAM_SIZE_16,
+	PVR_STREAM_SIZE_32,
+	PVR_STREAM_SIZE_64,
+	PVR_STREAM_SIZE_ARRAY,
+};
+
+#define PVR_FEATURE_NOT  BIT(31)
+#define PVR_FEATURE_NONE U32_MAX
+
+struct pvr_stream_def {
+	u32 offset;
+	enum pvr_stream_size size;
+	u32 array_size;
+	u32 feature;
+};
+
+struct pvr_stream_ext_def {
+	const struct pvr_stream_def *stream;
+	u32 stream_len;
+	u32 header_mask;
+	u32 quirk;
+};
+
+struct pvr_stream_ext_header {
+	const struct pvr_stream_ext_def *ext_streams;
+	u32 ext_streams_num;
+	u32 valid_mask;
+};
+
+struct pvr_stream_cmd_defs {
+	enum pvr_stream_type type;
+
+	const struct pvr_stream_def *main_stream;
+	u32 main_stream_len;
+
+	u32 ext_nr_headers;
+	const struct pvr_stream_ext_header *ext_headers;
+
+	size_t dest_size;
+};
+
+int
+pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
+		   void *stream, u32 stream_size, struct pvr_job *job);
+
+void
+pvr_stream_create_musthave_masks(struct pvr_device *pvr_dev);
+
+#endif /* __PVR_STREAM_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_stream_defs.c b/drivers/gpu/drm/imagination/pvr_stream_defs.c
new file mode 100644
index 000000000000..25710122b203
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_stream_defs.c
@@ -0,0 +1,270 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_device_info.h"
+#include "pvr_rogue_fwif_client.h"
+#include "pvr_rogue_fwif_stream.h"
+#include "pvr_stream.h"
+#include "pvr_stream_defs.h"
+
+#include <linux/stddef.h>
+#include <uapi/drm/pvr_drm.h>
+
+#define PVR_STREAM_DEF_SET(owner, member, _size, _array_size, _feature) \
+	{ .offset = offsetof(struct owner, member), \
+	  .size = _size,  \
+	  .array_size = _array_size, \
+	  .feature = _feature }
+
+#define PVR_STREAM_DEF(owner, member, member_size)  \
+	PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ ## member_size, 0, PVR_FEATURE_NONE)
+
+#define PVR_STREAM_DEF_FEATURE(owner, member, member_size, feature) \
+	PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ ## member_size, 0, feature)
+
+#define PVR_STREAM_DEF_NOT_FEATURE(owner, member, member_size, feature)       \
+	PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ ## member_size, 0, \
+			   (feature) | PVR_FEATURE_NOT)
+
+#define PVR_STREAM_DEF_ARRAY(owner, member)                                       \
+	PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ARRAY,                  \
+			   sizeof(((struct owner *)0)->member), PVR_FEATURE_NONE)
+
+#define PVR_STREAM_DEF_ARRAY_FEATURE(owner, member, feature)            \
+	PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ARRAY,         \
+			   sizeof(((struct owner *)0)->member), feature)
+
+#define PVR_STREAM_DEF_ARRAY_NOT_FEATURE(owner, member, feature)                             \
+	PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ARRAY,                             \
+			   sizeof(((struct owner *)0)->member), (feature) | PVR_FEATURE_NOT)
+
+/*
+ * When adding new parameters to the stream definition, the new parameters must go after the
+ * existing parameters, to preserve order. As parameters are naturally aligned, care must be taken
+ * with respect to implicit padding in the stream; padding should be minimised as much as possible.
+ */
+static const struct pvr_stream_def rogue_fwif_cmd_geom_stream[] = {
+	PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.vdm_ctrl_stream_base, 64),
+	PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.tpu_border_colour_table, 64),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.vdm_draw_indirect0, 64,
+			       PVR_FEATURE_VDM_DRAWINDIRECT),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.vdm_draw_indirect1, 32,
+			       PVR_FEATURE_VDM_DRAWINDIRECT),
+	PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.ppp_ctrl, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.te_psg, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.vdm_context_resume_task0_size, 32),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.vdm_context_resume_task3_size, 32,
+			       PVR_FEATURE_VDM_OBJECT_LEVEL_LLS),
+	PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.view_idx, 32),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.pds_coeff_free_prog, 32,
+			       PVR_FEATURE_TESSELLATION),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_geom_stream_brn49927[] = {
+	PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.tpu, 32),
+};
+
+static const struct pvr_stream_ext_def cmd_geom_ext_streams_0[] = {
+	{
+		.stream = rogue_fwif_cmd_geom_stream_brn49927,
+		.stream_len = ARRAY_SIZE(rogue_fwif_cmd_geom_stream_brn49927),
+		.header_mask = PVR_STREAM_EXTHDR_GEOM0_BRN49927,
+		.quirk = 49927,
+	},
+};
+
+static const struct pvr_stream_ext_header cmd_geom_ext_headers[] = {
+	{
+		.ext_streams = cmd_geom_ext_streams_0,
+		.ext_streams_num = ARRAY_SIZE(cmd_geom_ext_streams_0),
+		.valid_mask = PVR_STREAM_EXTHDR_GEOM0_VALID,
+	},
+};
+
+const struct pvr_stream_cmd_defs pvr_cmd_geom_stream = {
+	.type = PVR_STREAM_TYPE_GEOM,
+
+	.main_stream = rogue_fwif_cmd_geom_stream,
+	.main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_geom_stream),
+
+	.ext_nr_headers = ARRAY_SIZE(cmd_geom_ext_headers),
+	.ext_headers = cmd_geom_ext_headers,
+
+	.dest_size = sizeof(struct rogue_fwif_cmd_geom),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_frag_stream[] = {
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_scissor_base, 64),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_dbias_base, 64),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_oclqry_base, 64),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_zlsctl, 64),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_zload_store_base, 64),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_stencil_load_store_base, 64),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.fb_cdc_zls, 64,
+			       PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP),
+	PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.pbe_word),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.tpu_border_colour_table, 64),
+	PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.pds_bgnd),
+	PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.pds_pr_bgnd),
+	PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.usc_clear_register),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.usc_pixel_output_ctrl, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_bgobjdepth, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_bgobjvals, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_aa, 32),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.isp_xtp_pipe_enable, 32,
+			       PVR_FEATURE_S7_TOP_INFRASTRUCTURE),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_ctl, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.event_pixel_pds_info, 32),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.pixel_phantom, 32,
+			       PVR_FEATURE_CLUSTER_GROUPING),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.view_idx, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.event_pixel_pds_data, 32),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.isp_oclqry_stride, 32,
+			       PVR_FEATURE_GPU_MULTICORE_SUPPORT),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.isp_zls_pixels, 32,
+			       PVR_FEATURE_ZLS_SUBTILE),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.rgx_cr_blackpearl_fix, 32,
+			       PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, zls_stride, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, sls_stride, 32),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, execute_count, 32,
+			       PVR_FEATURE_GPU_MULTICORE_SUPPORT),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_frag_stream_brn47217[] = {
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_oclqry_stride, 32),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_frag_stream_brn49927[] = {
+	PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.tpu, 32),
+};
+
+static const struct pvr_stream_ext_def cmd_frag_ext_streams_0[] = {
+	{
+		.stream = rogue_fwif_cmd_frag_stream_brn47217,
+		.stream_len = ARRAY_SIZE(rogue_fwif_cmd_frag_stream_brn47217),
+		.header_mask = PVR_STREAM_EXTHDR_FRAG0_BRN47217,
+		.quirk = 47217,
+	},
+	{
+		.stream = rogue_fwif_cmd_frag_stream_brn49927,
+		.stream_len = ARRAY_SIZE(rogue_fwif_cmd_frag_stream_brn49927),
+		.header_mask = PVR_STREAM_EXTHDR_FRAG0_BRN49927,
+		.quirk = 49927,
+	},
+};
+
+static const struct pvr_stream_ext_header cmd_frag_ext_headers[] = {
+	{
+		.ext_streams = cmd_frag_ext_streams_0,
+		.ext_streams_num = ARRAY_SIZE(cmd_frag_ext_streams_0),
+		.valid_mask = PVR_STREAM_EXTHDR_FRAG0_VALID,
+	},
+};
+
+const struct pvr_stream_cmd_defs pvr_cmd_frag_stream = {
+	.type = PVR_STREAM_TYPE_FRAG,
+
+	.main_stream = rogue_fwif_cmd_frag_stream,
+	.main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_frag_stream),
+
+	.ext_nr_headers = ARRAY_SIZE(cmd_frag_ext_headers),
+	.ext_headers = cmd_frag_ext_headers,
+
+	.dest_size = sizeof(struct rogue_fwif_cmd_frag),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_compute_stream[] = {
+	PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.tpu_border_colour_table, 64),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_cb_queue, 64,
+			       PVR_FEATURE_CDM_USER_MODE_QUEUE),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_cb_base, 64,
+			       PVR_FEATURE_CDM_USER_MODE_QUEUE),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_cb, 64,
+			       PVR_FEATURE_CDM_USER_MODE_QUEUE),
+	PVR_STREAM_DEF_NOT_FEATURE(rogue_fwif_cmd_compute, regs.cdm_ctrl_stream_base, 64,
+				   PVR_FEATURE_CDM_USER_MODE_QUEUE),
+	PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.cdm_context_state_base_addr, 64),
+	PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.cdm_resume_pds1, 32),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_item, 32,
+			       PVR_FEATURE_COMPUTE_MORTON_CAPABLE),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.compute_cluster, 32,
+			       PVR_FEATURE_CLUSTER_GROUPING),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.tpu_tag_cdm_ctrl, 32,
+			       PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, stream_start_offset, 32,
+			       PVR_FEATURE_CDM_USER_MODE_QUEUE),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, execute_count, 32,
+			       PVR_FEATURE_GPU_MULTICORE_SUPPORT),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_compute_stream_brn49927[] = {
+	PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.tpu, 32),
+};
+
+static const struct pvr_stream_ext_def cmd_compute_ext_streams_0[] = {
+	{
+		.stream = rogue_fwif_cmd_compute_stream_brn49927,
+		.stream_len = ARRAY_SIZE(rogue_fwif_cmd_compute_stream_brn49927),
+		.header_mask = PVR_STREAM_EXTHDR_COMPUTE0_BRN49927,
+		.quirk = 49927,
+	},
+};
+
+static const struct pvr_stream_ext_header cmd_compute_ext_headers[] = {
+	{
+		.ext_streams = cmd_compute_ext_streams_0,
+		.ext_streams_num = ARRAY_SIZE(cmd_compute_ext_streams_0),
+		.valid_mask = PVR_STREAM_EXTHDR_COMPUTE0_VALID,
+	},
+};
+
+const struct pvr_stream_cmd_defs pvr_cmd_compute_stream = {
+	.type = PVR_STREAM_TYPE_COMPUTE,
+
+	.main_stream = rogue_fwif_cmd_compute_stream,
+	.main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_compute_stream),
+
+	.ext_nr_headers = ARRAY_SIZE(cmd_compute_ext_headers),
+	.ext_headers = cmd_compute_ext_headers,
+
+	.dest_size = sizeof(struct rogue_fwif_cmd_compute),
+};
+
+static const struct pvr_stream_def rogue_fwif_cmd_transfer_stream[] = {
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.pds_bgnd0_base, 64),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.pds_bgnd1_base, 64),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.pds_bgnd3_sizeinfo, 64),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_mtile_base, 64),
+	PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_transfer, regs.pbe_wordx_mrty),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_bgobjvals, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_pixel_output_ctrl, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register0, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register1, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register2, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register3, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_mtile_size, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_render_origin, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_ctl, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_aa, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.event_pixel_pds_info, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.event_pixel_pds_code, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.event_pixel_pds_data, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_render, 32),
+	PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_rgn, 32),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_transfer, regs.isp_xtp_pipe_enable, 32,
+			       PVR_FEATURE_S7_TOP_INFRASTRUCTURE),
+	PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_transfer, regs.frag_screen, 32,
+			       PVR_FEATURE_GPU_MULTICORE_SUPPORT),
+};
+
+const struct pvr_stream_cmd_defs pvr_cmd_transfer_stream = {
+	.type = PVR_STREAM_TYPE_TRANSFER,
+
+	.main_stream = rogue_fwif_cmd_transfer_stream,
+	.main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_transfer_stream),
+
+	.ext_nr_headers = 0,
+
+	.dest_size = sizeof(struct rogue_fwif_cmd_transfer),
+};
diff --git a/drivers/gpu/drm/imagination/pvr_stream_defs.h b/drivers/gpu/drm/imagination/pvr_stream_defs.h
new file mode 100644
index 000000000000..4ddd2912f0d6
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_stream_defs.h
@@ -0,0 +1,14 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_STREAM_DEFS_H__
+#define __PVR_STREAM_DEFS_H__
+
+#include "pvr_stream.h"
+
+extern const struct pvr_stream_cmd_defs pvr_cmd_geom_stream;
+extern const struct pvr_stream_cmd_defs pvr_cmd_frag_stream;
+extern const struct pvr_stream_cmd_defs pvr_cmd_compute_stream;
+extern const struct pvr_stream_cmd_defs pvr_cmd_transfer_stream;
+
+#endif /* __PVR_STREAM_DEFS_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_vendor.h b/drivers/gpu/drm/imagination/pvr_vendor.h
new file mode 100644
index 000000000000..fa52e21f4bbc
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vendor.h
@@ -0,0 +1,77 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_VENDOR_H__
+#define __PVR_VENDOR_H__
+
+/* Forward declaration from "pvr_device.h". */
+struct pvr_device;
+
+/**
+ * struct pvr_vendor_callbacks - Vendor-specific callbacks
+ */
+struct pvr_vendor_callbacks {
+	/**
+	 * @init: Initialise vendor-specific functionality
+	 * @pvr_dev: Target PowerVR device.
+	 *
+	 * This callback should initialise any vendor-specific data in @pvr_dev->vendor.data, and
+	 * retrieve any required resources. It should not attempt to power on the GPU or interact
+	 * with the GPU hardware in any way.
+	 *
+	 * This callback is optional.
+	 *
+	 * Returns:
+	 * * 0 on success, or
+	 * * Any negative error (see individual implementations for details).
+	 */
+	int (*init)(struct pvr_device *pvr_dev);
+
+	/**
+	 * @fini: Close vendor-specific functionality
+	 * @pvr_dev: Target PowerVR device.
+	 *
+	 * This callback is optional.
+	 */
+	void (*fini)(struct pvr_device *pvr_dev);
+
+	/**
+	 * @power_enable: Enable GPU power
+	 * @pvr_dev: Target PowerVR device.
+	 *
+	 * On function entry the GPU clocks in device tree will be enabled, but the GPU will
+	 * otherwise be unpowered.
+	 *
+	 * On function exit the GPU will be fully powered and the register file will be accessible.
+	 *
+	 * This callback is optional.
+	 *
+	 * Returns:
+	 *  * 0 on success, or
+	 *  * -%EINVAL if the GPU was already powered, or
+	 *  * -%EBUSY if the GPU fails to power on.
+	 */
+	int (*power_enable)(struct pvr_device *pvr_dev);
+
+	/**
+	 * @power_disable: Disable GPU power
+	 * @pvr_dev: Target PowerVR device.
+	 *
+	 * On function entry the GPU will be fully powered.
+	 *
+	 * On function exit the GPU will be unpowered. This function will not affect the clocks
+	 * defined in device tree; the driver will disable these later.
+	 *
+	 * This callback is optional.
+	 *
+	 * Returns:
+	 *  * 0 on success, or
+	 *  * -%EINVAL if the GPU was already unpowered, or
+	 *  * -%EBUSY if the GPU fails to power off.
+	 */
+	int (*power_disable)(struct pvr_device *pvr_dev);
+};
+
+extern const struct pvr_vendor_callbacks pvr_mt8173_callbacks;
+
+#endif /* __PVR_VENDOR_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
new file mode 100644
index 000000000000..2ddb492a5f83
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -0,0 +1,3811 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_vm.h"
+
+#include "drm/pvr_drm.h"
+#include "linux/gfp_types.h"
+#include "pvr_device.h"
+#include "pvr_drv.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_heap_config.h"
+#include "pvr_rogue_mmu_defs.h"
+
+#include <drm/drm_gem.h>
+
+#include <linux/bitops.h>
+#include <linux/compiler_attributes.h>
+#include <linux/container_of.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/math.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/rbtree.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+/*
+ * The value of the device page size (%PVR_DEVICE_PAGE_SIZE) is currently
+ * pegged to the host page size (%PAGE_SIZE). This chunk of macro goodness both
+ * ensures that the selected host page size corresponds to a valid device page
+ * size and sets up values needed by the MMU code below.
+ */
+#if (PVR_DEVICE_PAGE_SIZE == SZ_4K)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_4KB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_4KB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_4KB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_16K)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_16KB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_16KB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_16KB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_64K)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_64KB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_64KB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_64KB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_256K)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_256KB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_256KB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_256KB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_1M)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_1MB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_1MB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_1MB_RANGE_CLRMSK
+#elif (PVR_DEVICE_PAGE_SIZE == SZ_2M)
+# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_2MB
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_2MB_RANGE_SHIFT
+# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_2MB_RANGE_CLRMSK
+#else
+# error Unsupported device page size PVR_DEVICE_PAGE_SIZE
+#endif
+
+#define ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X   \
+	(ROGUE_MMUCTRL_ENTRIES_PT_VALUE >> \
+	 (PVR_DEVICE_PAGE_SHIFT - PVR_SHIFT_FROM_SIZE(SZ_4K)))
+
+/**
+ * pvr_vm_mmu_flush() - Request flush of all MMU caches.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function must be called following any possible change to the MMU page
+ * tables.
+ *
+ * Returns:
+ *  * 0 on success, or
+ *  * Any error encountered while submitting the flush command via the KCCB.
+ */
+int
+pvr_vm_mmu_flush(struct pvr_device *pvr_dev)
+{
+	struct rogue_fwif_kccb_cmd cmd_mmu_cache;
+	struct rogue_fwif_mmucachedata *cmd_mmu_cache_data =
+		&cmd_mmu_cache.cmd_data.mmu_cache_data;
+	u32 slot;
+	int err;
+
+	/* Can't flush MMU if the firmware hasn't booted yet. */
+	if (!pvr_dev->fw_dev.booted) {
+		err = 0;
+		goto err_out;
+	}
+
+	cmd_mmu_cache.cmd_type = ROGUE_FWIF_KCCB_CMD_MMUCACHE;
+	/* Request a complete MMU flush, across all pagetable levels, TLBs and contexts. */
+	cmd_mmu_cache_data->cache_flags = ROGUE_FWIF_MMUCACHEDATA_FLAGS_PT |
+					  ROGUE_FWIF_MMUCACHEDATA_FLAGS_PD |
+					  ROGUE_FWIF_MMUCACHEDATA_FLAGS_PC |
+					  ROGUE_FWIF_MMUCACHEDATA_FLAGS_TLB |
+					  ROGUE_FWIF_MMUCACHEDATA_FLAGS_INTERRUPT;
+	pvr_gem_get_fw_addr(pvr_dev->fw_dev.mem.mmucache_sync_obj,
+			    &cmd_mmu_cache_data->mmu_cache_sync_fw_addr);
+	cmd_mmu_cache_data->mmu_cache_sync_update_value = 0;
+
+	err = pvr_kccb_send_cmd(pvr_dev, &cmd_mmu_cache, &slot);
+	if (err)
+		goto err_out;
+
+	err = pvr_kccb_wait_for_completion(pvr_dev, slot, HZ, NULL);
+	if (err)
+		goto err_out;
+
+err_out:
+	return err;
+}
+
+/**
+ * DOC: PowerVR Virtual Memory Handling
+ */
+/**
+ * DOC: PowerVR Virtual Memory Handling (constants)
+ *
+ * .. c:macro:: PVR_IDX_INVALID
+ *
+ *    Default value for a u16-based index.
+ *
+ *    This value cannot be zero, since zero is a valid index value.
+ */
+#define PVR_IDX_INVALID ((u16)(-1))
+
+/**
+ * DOC: VM backing pages
+ */
+/**
+ * DOC: VM backing pages (constants)
+ *
+ * .. c:macro:: PVR_VM_BACKING_PAGE_SIZE
+ *
+ *    Page size of a PowerVR device's integrated MMU. The CPU page size must be
+ *    at least as large as this value for the current implementation; this is
+ *    checked at compile-time.
+ */
+#define PVR_VM_BACKING_PAGE_SIZE SZ_4K
+static_assert(PAGE_SIZE >= PVR_VM_BACKING_PAGE_SIZE);
+
+/**
+ * struct pvr_vm_backing_page - Represents a single page used to back a page
+ *                              table of any level.
+ * @dma_addr: DMA address of this page.
+ * @host_ptr: CPU address of this page.
+ * @pvr_dev: The PowerVR device to which this page is associated. **For
+ *           internal use only.**
+ */
+struct pvr_vm_backing_page {
+	dma_addr_t dma_addr;
+	void *host_ptr;
+/* private: internal use only */
+	struct pvr_device *pvr_dev;
+};
+
+/**
+ * pvr_vm_backing_page_init() - Initialize a VM backing page.
+ * @page: Target backing page.
+ * @pvr_dev: Target PowerVR device.
+ *
+ * This function performs three distinct operations:
+ *
+ * 1. Allocate a single page,
+ * 2. Map the page to the CPU, and
+ * 3. Map the page to DMA-space.
+ *
+ * It is expected that @page be zeroed (e.g. from kzalloc()) before calling
+ * this function.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * -%ENOMEM if allocation of the backing page or mapping of the backing
+ *    page to DMA fails.
+ */
+static int
+pvr_vm_backing_page_init(struct pvr_vm_backing_page *page,
+			 struct pvr_device *pvr_dev)
+{
+	struct device *dev = from_pvr_device(pvr_dev)->dev;
+
+	struct page *raw_page;
+	int err;
+
+	dma_addr_t dma_addr;
+	void *host_ptr;
+
+	raw_page = alloc_page(__GFP_ZERO | GFP_KERNEL);
+	if (!raw_page) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	host_ptr = kmap(raw_page);
+
+	dma_addr = dma_map_page(dev, raw_page, 0, PVR_VM_BACKING_PAGE_SIZE,
+				DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, dma_addr)) {
+		err = -ENOMEM;
+		goto err_unmap_free_page;
+	}
+
+	page->dma_addr = dma_addr;
+	page->host_ptr = host_ptr;
+	page->pvr_dev = pvr_dev;
+
+	return 0;
+
+err_unmap_free_page:
+	kunmap(raw_page);
+	__free_page(raw_page);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_vm_backing_page_fini() - Teardown a VM backing page.
+ * @page: Target backing page.
+ *
+ * This function performs the mirror operations to pvr_vm_backing_page_init(),
+ * in reverse order:
+ *
+ * 1. Unmap the page from DMA-space,
+ * 2. Unmap the page from the CPU, and
+ * 3. Free the page.
+ *
+ * It also zeros @page.
+ *
+ * It is a no-op to call this function a second (or further) time on any @page.
+ */
+static void
+pvr_vm_backing_page_fini(struct pvr_vm_backing_page *page)
+{
+	struct device *dev = from_pvr_device(page->pvr_dev)->dev;
+	struct page *raw_page = kmap_to_page(page->host_ptr);
+
+	/* Do nothing if no allocation is present. */
+	if (!page->pvr_dev)
+		return;
+
+	dma_unmap_page(dev, page->dma_addr, PVR_VM_BACKING_PAGE_SIZE,
+		       DMA_TO_DEVICE);
+
+	kunmap(raw_page);
+
+	__free_page(raw_page);
+
+	memset(page, 0, sizeof(*page));
+}
+
+/**
+ * pvr_vm_backing_page_sync() - Flush a VM backing page from the CPU to the
+ *                              device.
+ * @page: Target backing page.
+ *
+ * .. caution::
+ *
+ *    **This is potentially an expensive function call.** Only call
+ *    pvr_vm_backing_page_sync() once you're sure you have no more changes to
+ *    make to the backing page in the immediate future.
+ */
+static void
+pvr_vm_backing_page_sync(struct pvr_vm_backing_page *page)
+{
+	struct device *dev;
+
+	/*
+	 * Do nothing if no allocation is present. This may be the case if
+	 * we are unmapping pages.
+	 */
+	if (!page->pvr_dev)
+		return;
+
+	dev = from_pvr_device(page->pvr_dev)->dev;
+
+	dma_sync_single_for_device(dev, page->dma_addr,
+				   PVR_VM_BACKING_PAGE_SIZE, DMA_TO_DEVICE);
+}
+
+/**
+ * DOC: Raw page tables
+ */
+
+#define PVR_PAGE_TABLE_TYPEOF_ENTRY(level_) \
+	typeof_member(struct pvr_page_table_l##level_##_entry_raw, val)
+
+#define PVR_PAGE_TABLE_FIELD_GET(level_, name_, field_, entry_)           \
+	(((entry_).val &                                           \
+	  ~ROGUE_MMUCTRL_##name_##_DATA_##field_##_CLRMSK) >> \
+	 ROGUE_MMUCTRL_##name_##_DATA_##field_##_SHIFT)
+
+#define PVR_PAGE_TABLE_FIELD_PREP(level_, name_, field_, val_)            \
+	((((PVR_PAGE_TABLE_TYPEOF_ENTRY(level_))(val_))            \
+	  << ROGUE_MMUCTRL_##name_##_DATA_##field_##_SHIFT) & \
+	 ~ROGUE_MMUCTRL_##name_##_DATA_##field_##_CLRMSK)
+
+/**
+ * struct pvr_page_table_l2_entry_raw - A single entry in a level 2 page table.
+ * @val: The raw value of this entry.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %ROGUE_MMUCTRL_ENTRY_SIZE_PC_VALUE.
+ *
+ * The value stored in this structure can be decoded using the following bitmap:
+ *
+ * .. flat-table::
+ *    :widths: 1 5
+ *    :stub-columns: 1
+ *
+ *    * - 31..4
+ *      - **Level 1 Page Table Base Address:** Bits 39..12 of the L1
+ *        page table base address, which is 4KiB aligned.
+ *
+ *    * - 3..2
+ *      - *(reserved)*
+ *
+ *    * - 1
+ *      - **Pending:** When valid bit is not set, indicates that a valid
+ *        entry is pending and the MMU should wait for the driver to map
+ *        the entry. This is used to support page demand mapping of
+ *        memory.
+ *
+ *    * - 0
+ *      - **Valid:** Indicates that the entry contains a valid L1 page
+ *        table. If the valid bit is not set, then an attempted use of
+ *        the page would result in a page fault.
+ */
+struct pvr_page_table_l2_entry_raw {
+	u32 val;
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l2_entry_raw) * 8 ==
+	      ROGUE_MMUCTRL_ENTRY_SIZE_PC_VALUE);
+
+static __always_inline bool
+pvr_page_table_l2_entry_raw_is_valid(struct pvr_page_table_l2_entry_raw entry)
+{
+	return PVR_PAGE_TABLE_FIELD_GET(2, PC, VALID, entry);
+}
+
+/**
+ * pvr_page_table_l2_entry_raw_set() - Write a valid entry into a raw level 2
+ *                                     page table.
+ * @entry: Target raw level 2 page table entry.
+ * @child_table_dma_addr: DMA address of the level 1 page table to be
+ *                        associated with @entry.
+ *
+ * When calling this function, @child_table_dma_addr must be a valid DMA
+ * address and a multiple of %ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE.
+ */
+static __always_inline void
+pvr_page_table_l2_entry_raw_set(struct pvr_page_table_l2_entry_raw *entry,
+				dma_addr_t child_table_dma_addr)
+{
+	child_table_dma_addr >>= ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
+
+	entry->val =
+		PVR_PAGE_TABLE_FIELD_PREP(2, PC, VALID, true) |
+		PVR_PAGE_TABLE_FIELD_PREP(2, PC, ENTRY_PENDING, false) |
+		PVR_PAGE_TABLE_FIELD_PREP(2, PC, PD_BASE, child_table_dma_addr);
+}
+
+static __always_inline void
+pvr_page_table_l2_entry_raw_clear(struct pvr_page_table_l2_entry_raw *entry)
+{
+	entry->val = 0;
+}
+
+/**
+ * struct pvr_page_table_l1_entry_raw - A single entry in a level 1 page table.
+ * @val: The raw value of this entry.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %ROGUE_MMUCTRL_ENTRY_SIZE_PD_VALUE.
+ *
+ * The value stored in this structure can be decoded using the following bitmap:
+ *
+ * .. flat-table::
+ *    :widths: 1 5
+ *    :stub-columns: 1
+ *
+ *    * - 63..41
+ *      - *(reserved)*
+ *
+ *    * - 40
+ *      - **Pending:** When valid bit is not set, indicates that a valid entry
+ *        is pending and the MMU should wait for the driver to map the entry.
+ *        This is used to support page demand mapping of memory.
+ *
+ *    * - 39..5
+ *      - **Level 0 Page Table Base Address:** The way this value is
+ *        interpreted depends on the page size. Bits not specified in the
+ *        table below (e.g. bits 11..5 for page size 4KiB) should be
+ *        considered reserved.
+ *
+ *        This table shows the bits used in an L1 page table entry to
+ *        represent the Physical Table Base Address for a given Page Size.
+ *        Since each L1 page table entry covers 2MiB of address space, the
+ *        maximum page size is 2MiB.
+ *
+ *        .. flat-table::
+ *           :widths: 1 1 1 1
+ *           :header-rows: 1
+ *           :stub-columns: 1
+ *
+ *           * - Page size
+ *             - L0 page table base address bits
+ *             - Number of L0 page table entries
+ *             - Size of L0 page table
+ *
+ *           * - 4KiB
+ *             - 39..12
+ *             - 512
+ *             - 4KiB
+ *
+ *           * - 16KiB
+ *             - 39..10
+ *             - 128
+ *             - 1KiB
+ *
+ *           * - 64KiB
+ *             - 39..8
+ *             - 32
+ *             - 256B
+ *
+ *           * - 256KiB
+ *             - 39..6
+ *             - 8
+ *             - 64B
+ *
+ *           * - 1MiB
+ *             - 39..5 (4 = '0')
+ *             - 2
+ *             - 16B
+ *
+ *           * - 2MiB
+ *             - 39..5 (4..3 = '00')
+ *             - 1
+ *             - 8B
+ *
+ *    * - 4
+ *      - *(reserved)*
+ *
+ *    * - 3..1
+ *      - **Page Size:** Sets the page size, from 4KiB to 2MiB.
+ *
+ *    * - 0
+ *      - **Valid:** Indicates that the entry contains a valid L0 page table.
+ *        If the valid bit is not set, then an attempted use of the page would
+ *        result in a page fault.
+ */
+struct pvr_page_table_l1_entry_raw {
+	u64 val;
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l1_entry_raw) * 8 ==
+	      ROGUE_MMUCTRL_ENTRY_SIZE_PD_VALUE);
+
+static __always_inline bool
+pvr_page_table_l1_entry_raw_is_valid(struct pvr_page_table_l1_entry_raw entry)
+{
+	return PVR_PAGE_TABLE_FIELD_GET(1, PD, VALID, entry);
+}
+
+/**
+ * pvr_page_table_l1_entry_raw_set() - Write a valid entry into a raw level 1
+ *                                     page table.
+ * @entry: Target raw level 1 page table entry.
+ * @child_table_dma_addr: DMA address of the level 0 page table to be
+ *                        associated with @entry.
+ *
+ * When calling this function, @child_table_dma_addr must be a valid DMA
+ * address and a multiple of 4 KiB.
+ */
+static void
+pvr_page_table_l1_entry_raw_set(struct pvr_page_table_l1_entry_raw *entry,
+				dma_addr_t child_table_dma_addr)
+{
+	entry->val = PVR_PAGE_TABLE_FIELD_PREP(1, PD, VALID, true) |
+		     PVR_PAGE_TABLE_FIELD_PREP(1, PD, ENTRY_PENDING, false) |
+		     PVR_PAGE_TABLE_FIELD_PREP(1, PD, PAGE_SIZE,
+					       ROGUE_MMUCTRL_PAGE_SIZE_X) |
+		     /*
+		      * The use of a 4K-specific macro here is correct. It is
+		      * a future optimization to allocate sub-host-page-sized
+		      * blocks for individual tables, so the condition that any
+		      * page table address is aligned to the size of the
+		      * largest (a 4KB) table currently holds.
+		      */
+		     (child_table_dma_addr &
+		      ~ROGUE_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK);
+}
+
+static __always_inline void
+pvr_page_table_l1_entry_raw_clear(struct pvr_page_table_l1_entry_raw *entry)
+{
+	entry->val = 0;
+}
+
+/**
+ * struct pvr_page_table_l0_entry_raw - A single entry in a level 0 page table.
+ * @val: The raw value of this entry.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %ROGUE_MMUCTRL_ENTRY_SIZE_PT_VALUE.
+ *
+ * The value stored in this structure can be decoded using the following bitmap:
+ *
+ * .. flat-table::
+ *    :widths: 1 5
+ *    :stub-columns: 1
+ *
+ *    * - 63
+ *      - *(reserved)*
+ *
+ *    * - 62
+ *      - **PM/FW Protect:** Indicates a protected region which only the
+ *        Parameter Manager (PM) or firmware processor can write to.
+ *
+ *    * - 61..40
+ *      - **VP Page (High):** Virtual-physical page used for Parameter Manager
+ *        (PM) memory. This field is only used if the additional level of PB
+ *        virtualization is enabled. The VP Page field is needed by the PM in
+ *        order to correctly reconstitute the free lists after render
+ *        completion. This (High) field holds bits 39..18 of the value; the
+ *        Low field holds bits 17..12. Bits 11..0 are always zero because the
+ *        value is always aligned to the 4KiB page size.
+ *
+ *    * - 39..12
+ *      - **Physical Page Address:** The way this value is interpreted depends
+ *        on the page size. Bits not specified in the table below (e.g. bits
+ *        20..12 for page size 2MiB) should be considered reserved.
+ *
+ *        This table shows the bits used in an L0 page table entry to represent
+ *        the Physical Page Address for a given page size (as defined in the
+ *        associated L1 page table entry).
+ *
+ *        .. flat-table::
+ *           :widths: 1 1
+ *           :header-rows: 1
+ *           :stub-columns: 1
+ *
+ *           * - Page size
+ *             - Physical address bits
+ *
+ *           * - 4KiB
+ *             - 39..12
+ *
+ *           * - 16KiB
+ *             - 39..14
+ *
+ *           * - 64KiB
+ *             - 39..16
+ *
+ *           * - 256KiB
+ *             - 39..18
+ *
+ *           * - 1MiB
+ *             - 39..20
+ *
+ *           * - 2MiB
+ *             - 39..21
+ *
+ *    * - 11..6
+ *      - **VP Page (Low):** Continuation of VP Page (High).
+ *
+ *    * - 5
+ *      - **Pending:** When valid bit is not set, indicates that a valid entry
+ *        is pending and the MMU should wait for the driver to map the entry.
+ *        This is used to support page demand mapping of memory.
+ *
+ *    * - 4
+ *      - **PM Src:** Set on Parameter Manager (PM) allocated page table
+ *        entries when indicated by the PM. Note that this bit will only be set
+ *        by the PM, not by the device driver.
+ *
+ *    * - 3
+ *      - **SLC Bypass Control:** Specifies requests to this page should bypass
+ *        the System Level Cache (SLC), if enabled in SLC configuration.
+ *
+ *    * - 2
+ *      - **Cache Coherency:** Indicates that the page is coherent (i.e. it
+ *        does not require a cache flush between operations on the CPU and the
+ *        device).
+ *
+ *    * - 1
+ *      - **Read Only:** If set, this bit indicates that the page is read only.
+ *        An attempted write to this page would result in a write-protection
+ *        fault.
+ *
+ *    * - 0
+ *      - **Valid:** Indicates that the entry contains a valid page. If the
+ *        valid bit is not set, then an attempted use of the page would result
+ *        in a page fault.
+ */
+struct pvr_page_table_l0_entry_raw {
+	u64 val;
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l0_entry_raw) * 8 ==
+	      ROGUE_MMUCTRL_ENTRY_SIZE_PT_VALUE);
+
+/**
+ * struct pvr_page_flags_raw - The configurable flags from a single entry in a
+ *                             level 0 page table.
+ * @val: The raw value of these flags. Since these are a strict subset of
+ *       &struct pvr_page_table_l0_entry_raw; use that type for our member here.
+ *
+ * The flags stored in this type are: PM/FW Protect; SLC Bypass Control; Cache
+ * Coherency, and Read Only (bits 62, 3, 2 and 1 respectively).
+ *
+ * This type should never be instantiated directly; instead use
+ * pvr_page_flags_raw_create() to ensure only valid bits of @val are set.
+ */
+struct pvr_page_flags_raw {
+	struct pvr_page_table_l0_entry_raw val;
+} __packed;
+static_assert(sizeof(struct pvr_page_flags_raw) ==
+	      sizeof(struct pvr_page_table_l0_entry_raw));
+
+static __always_inline bool
+pvr_page_table_l0_entry_raw_is_valid(struct pvr_page_table_l0_entry_raw entry)
+{
+	return PVR_PAGE_TABLE_FIELD_GET(0, PT, VALID, entry);
+}
+
+/**
+ * pvr_page_table_l0_entry_raw_set() - Write a valid entry into a raw level 0
+ *                                     page table.
+ * @entry: Target raw level 0 page table entry.
+ * @dma_addr: DMA address of the physical page to be associated with @entry.
+ * @flags: Options to be set on @entry.
+ *
+ * When calling this function, @child_table_dma_addr must be a valid DMA
+ * address and a multiple of %PVR_DEVICE_PAGE_SIZE.
+ *
+ * The @flags parameter is directly assigned into @entry. It is the callers
+ * responsibility to ensure that only bits specified in
+ * &struct pvr_page_flags_raw are set in @flags.
+ */
+static void
+pvr_page_table_l0_entry_raw_set(struct pvr_page_table_l0_entry_raw *entry,
+				dma_addr_t dma_addr,
+				struct pvr_page_flags_raw flags)
+{
+	entry->val = PVR_PAGE_TABLE_FIELD_PREP(0, PT, VALID, true) |
+		     PVR_PAGE_TABLE_FIELD_PREP(0, PT, ENTRY_PENDING, false) |
+		     (dma_addr & ~ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK) |
+		     flags.val.val;
+}
+
+static __always_inline void
+pvr_page_table_l0_entry_raw_clear(struct pvr_page_table_l0_entry_raw *entry)
+{
+	entry->val = 0;
+}
+
+/**
+ * pvr_page_flags_raw_create() - Initialize the flag bits of a raw level 0 page
+ *                               table entry.
+ * @read_only: This page is read-only (see: Read Only).
+ * @cache_coherent: This page does not require cache flushes (see: Cache
+ *                  Coherency).
+ * @slc_bypass: This page bypasses the device cache (see: SLC Bypass Control).
+ * @pm_fw_protect: This page is only for use by the firmware or Parameter
+ *                 Manager (see PM/FW Protect).
+ *
+ * For more details on the use of these four options, see their respective
+ * entries in the table under &struct pvr_page_table_l0_entry_raw.
+ *
+ * Return:
+ * A new &struct pvr_page_flags_raw instance which can be passed directly to
+ * pvr_page_table_l0_entry_raw_set() or pvr_page_table_l0_insert().
+ */
+static struct pvr_page_flags_raw
+pvr_page_flags_raw_create(bool read_only, bool cache_coherent, bool slc_bypass,
+			  bool pm_fw_protect)
+{
+	struct pvr_page_flags_raw flags;
+
+	flags.val.val =
+		PVR_PAGE_TABLE_FIELD_PREP(0, PT, READ_ONLY, read_only) |
+		PVR_PAGE_TABLE_FIELD_PREP(0, PT, CC, cache_coherent) |
+		PVR_PAGE_TABLE_FIELD_PREP(0, PT, SLC_BYPASS_CTRL, slc_bypass) |
+		PVR_PAGE_TABLE_FIELD_PREP(0, PT, PM_META_PROTECT, pm_fw_protect);
+
+	return flags;
+}
+
+/**
+ * struct pvr_page_table_l2_raw - The raw data of a level 2 page table.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %PVR_VM_BACKING_PAGE_SIZE.
+ */
+struct pvr_page_table_l2_raw {
+	/** @entries: The raw values of this table. */
+	struct pvr_page_table_l2_entry_raw
+		entries[ROGUE_MMUCTRL_ENTRIES_PC_VALUE];
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l2_raw) == PVR_VM_BACKING_PAGE_SIZE);
+
+/**
+ * struct pvr_page_table_l1_raw - The raw data of a level 1 page table.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %PVR_VM_BACKING_PAGE_SIZE.
+ */
+struct pvr_page_table_l1_raw {
+	/** @entries: The raw values of this table. */
+	struct pvr_page_table_l1_entry_raw
+		entries[ROGUE_MMUCTRL_ENTRIES_PD_VALUE];
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l1_raw) == PVR_VM_BACKING_PAGE_SIZE);
+
+/**
+ * struct pvr_page_table_l0_raw - The raw data of a level 0 page table.
+ *
+ * This type is a structure for type-checking purposes. At compile-time, its
+ * size is checked against %PVR_VM_BACKING_PAGE_SIZE.
+ *
+ * .. caution::
+ *
+ *    The size of level 0 page tables is variable depending on the page size
+ *    specified in the associated level 1 page table entry. Since the device
+ *    page size in use is pegged to the host page size, it cannot vary at
+ *    runtime. This structure is therefore only defined to contain the required
+ *    number of entries for the current device page size. **You should never
+ *    read or write beyond the last supported entry.**
+ */
+struct pvr_page_table_l0_raw {
+	/** @entries: The raw values of this table. */
+	struct pvr_page_table_l0_entry_raw
+		entries[ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X];
+} __packed;
+static_assert(sizeof(struct pvr_page_table_l0_raw) <= PVR_VM_BACKING_PAGE_SIZE);
+
+/**
+ * DOC: Mirror page tables
+ */
+
+/*
+ * We pre-declare these types because they cross-depend on pointers to each
+ * other.
+ */
+struct pvr_page_table_l2;
+struct pvr_page_table_l1;
+struct pvr_page_table_l0;
+
+/**
+ * struct pvr_page_table_ptr - A reference to a single physical page as indexed
+ *                             by the page table structure.
+ * @pvr_dev: The PowerVR device associated with the VM context the
+ *           pointer is traversing.
+ * @l1_free_list: List of level 1 page tables to free when the pointer is destroyed.
+ * @l0_free_list: List of level 0 page tables to free when the pointer is destroyed.
+ * @l2_table: A cached handle to the level 2 page table the pointer is
+ *            currently traversing.
+ * @l1_table: A cached handle to the level 1 page table the pointer is
+ *            currently traversing.
+ * @l0_table: A cached handle to the level 0 page table the pointer is
+ *            currently traversing.
+ * @l2_idx: Index into the level 2 page table the pointer is currently
+ *          referencing.
+ * @l1_idx: Index into the level 1 page table the pointer is currently
+ *          referencing.
+ * @l0_idx: Index into the level 0 page table the pointer is currently
+ *          referencing.
+ * @sync_level_required: The maximum level of the page table tree structure
+ *                       which has (possibly) been modified since it was last
+ *                       flushed to the device.
+ *
+ *                       This field should only be set with
+ *                       pvr_page_table_ptr_require_sync() or indirectly by
+ *                       pvr_page_table_ptr_sync_partial().
+ */
+struct pvr_page_table_ptr {
+	struct pvr_device *pvr_dev;
+	struct pvr_page_table_l1 *l1_free_list;
+	struct pvr_page_table_l0 *l0_free_list;
+	struct pvr_page_table_l2 *l2_table;
+	struct pvr_page_table_l1 *l1_table;
+	struct pvr_page_table_l0 *l0_table;
+	u16 l2_idx;
+	u16 l1_idx;
+	u16 l0_idx;
+	s8 sync_level_required;
+};
+
+/**
+ * struct pvr_page_table_l2 - A wrapped level 2 page table.
+ *
+ * To access the raw part of this table, use pvr_page_table_l2_get_raw().
+ * Alternatively to access a raw entry directly, use
+ * pvr_page_table_l2_get_entry_raw().
+ *
+ * A level 2 page table forms the root of the page table tree structure, so
+ * this type has no &parent or &parent_idx members.
+ */
+struct pvr_page_table_l2 {
+	/**
+	 * @entries: The children of this node in the page table tree
+	 * structure. These are also mirror tables. The indexing of this array
+	 * is identical to that of the raw equivalent
+	 * (&pvr_page_table_l1_raw.entries).
+	 */
+	struct pvr_page_table_l1 *entries[ROGUE_MMUCTRL_ENTRIES_PC_VALUE];
+
+	/**
+	 * @backing_page: A handle to the memory which holds the raw
+	 * equivalent of this table. **For internal use only.**
+	 */
+	struct pvr_vm_backing_page backing_page;
+
+	/**
+	 * @entry_count: The current number of valid entries (that we know of)
+	 * in this table. This value is essentially a refcount - the table is
+	 * destroyed when this value is decremented to zero by
+	 * pvr_page_table_l2_remove().
+	 */
+	u16 entry_count;
+};
+
+/**
+ * pvr_page_table_l2_init() - Initialize a level 2 page table.
+ * @table: Target level 2 page table.
+ * @pvr_dev: Target PowerVR device
+ *
+ * It is expected that @table be zeroed (e.g. from kzalloc()) before calling
+ * this function.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error encountered while intializing &table->backing_page using
+ *    pvr_vm_backing_page_init().
+ */
+static __always_inline int
+pvr_page_table_l2_init(struct pvr_page_table_l2 *table,
+		       struct pvr_device *pvr_dev)
+{
+	return pvr_vm_backing_page_init(&table->backing_page, pvr_dev);
+}
+
+/**
+ * pvr_page_table_l2_fini() - Teardown a level 2 page table.
+ * @table: Target level 2 page table.
+ *
+ * It is an error to attempt to use @table after calling this function.
+ */
+static __always_inline void
+pvr_page_table_l2_fini(struct pvr_page_table_l2 *table)
+{
+	pvr_vm_backing_page_fini(&table->backing_page);
+}
+
+/**
+ * pvr_page_table_l2_sync() - Flush a level 2 page table from the CPU to the
+ *                            device.
+ * @table: Target level 2 page table.
+ *
+ * This is just a thin wrapper around pvr_vm_backing_page_sync(), so the
+ * warning there applies here too: **Only call pvr_page_table_l2_sync() once
+ * you're sure you have no more changes to make to** @table **in the immediate
+ * future.**
+ *
+ * If child level 1 page tables of @table also need to be flushed, this should
+ * be done first using pvr_page_table_l1_sync() *before* calling this function.
+ */
+static __always_inline void
+pvr_page_table_l2_sync(struct pvr_page_table_l2 *table)
+{
+	pvr_vm_backing_page_sync(&table->backing_page);
+}
+
+/**
+ * pvr_page_table_l2_get_raw() - Access the raw equivalent of a mirror level 2
+ *                               page table.
+ * @table: Target level 2 page table.
+ *
+ * Essentially returns the CPU address of the raw equivalent of @table, cast to
+ * a &struct pvr_page_table_l2_raw pointer.
+ *
+ * You probably want to call pvr_page_table_l2_get_entry_raw() instead.
+ *
+ * Return:
+ * The raw equivalent of @table.
+ */
+static __always_inline struct pvr_page_table_l2_raw *
+pvr_page_table_l2_get_raw(struct pvr_page_table_l2 *table)
+{
+	return table->backing_page.host_ptr;
+}
+
+/**
+ * pvr_page_table_l2_get_entry_raw() - Access an entry from the raw equivalent
+ *                                     of a mirror level 2 page table.
+ * @table: Target level 2 page table.
+ * @idx: Index of the entry to access.
+ *
+ * Technically this function returns a pointer to a slot in a raw level 2 page
+ * table, since the returned "entry" is not guaranteed to be valid. The caller
+ * must verify the validity of the entry at the returned address (perhaps using
+ * pvr_page_table_l2_entry_raw_is_valid()) before reading or overwriting it.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before dereferencing the
+ * returned pointer.
+ *
+ * Return:
+ * A pointer to the requested raw level 2 page table entry.
+ */
+static __always_inline struct pvr_page_table_l2_entry_raw *
+pvr_page_table_l2_get_entry_raw(struct pvr_page_table_l2 *table, u16 idx)
+{
+	return &pvr_page_table_l2_get_raw(table)->entries[idx];
+}
+
+/**
+ * pvr_page_table_l2_entry_is_valid() - Check if a level 2 page table entry is
+ *                                      marked as valid.
+ * @table: Target level 2 page table.
+ * @idx: Index of the entry to check.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before calling this
+ * function.
+ */
+static __always_inline bool
+pvr_page_table_l2_entry_is_valid(struct pvr_page_table_l2 *table, u16 idx)
+{
+	struct pvr_page_table_l2_entry_raw entry_raw =
+		*pvr_page_table_l2_get_entry_raw(table, idx);
+
+	return pvr_page_table_l2_entry_raw_is_valid(entry_raw);
+}
+
+/**
+ * struct pvr_page_table_l1 - A wrapped level 1 page table.
+ *
+ * To access the raw part of this table, use pvr_page_table_l1_get_raw().
+ * Alternatively to access a raw entry directly, use
+ * pvr_page_table_l1_get_entry_raw().
+ */
+struct pvr_page_table_l1 {
+	/**
+	 * @entries: The children of this node in the page table tree
+	 * structure. These are also mirror tables. The indexing of this array
+	 * is identical to that of the raw equivalent
+	 * (&pvr_page_table_l0_raw.entries).
+	 */
+	struct pvr_page_table_l0 *entries[ROGUE_MMUCTRL_ENTRIES_PD_VALUE];
+
+	/**
+	 * @backing_page: A handle to the memory which holds the raw
+	 * equivalent of this table. **For internal use only.**
+	 */
+	struct pvr_vm_backing_page backing_page;
+
+	union {
+		/**
+		 * @parent: The parent of this node in the page table tree structure.
+		 *
+		 * This is also a mirror table.
+		 *
+		 * Only valid when the L1 page table is active. When the L1 page table
+		 * has been removed and queued for destruction, the next_free field
+		 * should be used instead.
+		 */
+		struct pvr_page_table_l2 *parent;
+
+		/**
+		 * @next_free: Pointer to the next L1 page table to free.
+		 *
+		 * Used to form a linked list of L1 page tables queued for destruction.
+		 * Only valid when the page table has been removed and queued for
+		 * destruction.
+		 */
+		struct pvr_page_table_l1 *next_free;
+	};
+
+	/**
+	 * @parent_idx: The index of the entry in the parent table (see
+	 * @parent) which corresponds to this table.
+	 */
+	u16 parent_idx;
+
+	/**
+	 * @entry_count: The current number of valid entries (that we know of)
+	 * in this table. This value is essentially a refcount - the table is
+	 * destroyed when this value is decremented to zero by
+	 * pvr_page_table_l1_remove().
+	 */
+	u16 entry_count;
+};
+
+/**
+ * pvr_page_table_l1_init() - Initialize a level 1 page table.
+ * @table: Target level 1 page table.
+ * @pvr_dev: Target PowerVR device
+ *
+ * When this function returns successfully, @table is still not considered
+ * valid. It must be inserted into the page table tree structure with
+ * pvr_page_table_l2_insert() before it is ready for use.
+ *
+ * It is expected that @table be zeroed (e.g. from kzalloc()) before calling
+ * this function.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error encountered while intializing &table->backing_page using
+ *    pvr_vm_backing_page_init().
+ */
+static __always_inline int
+pvr_page_table_l1_init(struct pvr_page_table_l1 *table,
+		       struct pvr_device *pvr_dev)
+{
+	table->parent_idx = PVR_IDX_INVALID;
+
+	return pvr_vm_backing_page_init(&table->backing_page, pvr_dev);
+}
+
+/**
+ * pvr_page_table_l1_fini() - Teardown a level 1 page table.
+ * @table: Target level 1 page table.
+ *
+ * It is an error to attempt to use @table after calling this function, even
+ * indirectly. This includes calling pvr_page_table_l2_remove(), which must
+ * be called *before* pvr_page_table_l1_fini().
+ */
+static __always_inline void
+pvr_page_table_l1_fini(struct pvr_page_table_l1 *table)
+{
+	pvr_vm_backing_page_fini(&table->backing_page);
+}
+
+/**
+ * pvr_page_table_l1_sync() - Flush a level 1 page table from the CPU to the
+ *                            device.
+ * @table: Target level 1 page table.
+ *
+ * This is just a thin wrapper around pvr_vm_backing_page_sync(), so the
+ * warning there applies here too: **Only call pvr_page_table_l1_sync() once
+ * you're sure you have no more changes to make to** @table **in the immediate
+ * future.**
+ *
+ * If child level 0 page tables of @table also need to be flushed, this should
+ * be done first using pvr_page_table_l0_sync() *before* calling this function.
+ */
+static __always_inline void
+pvr_page_table_l1_sync(struct pvr_page_table_l1 *table)
+{
+	pvr_vm_backing_page_sync(&table->backing_page);
+}
+
+/**
+ * pvr_page_table_l1_get_raw() - Access the raw equivalent of a mirror level 1
+ *                               page table.
+ * @table: Target level 1 page table.
+ *
+ * Essentially returns the CPU address of the raw equivalent of @table, cast to
+ * a &struct pvr_page_table_l1_raw pointer.
+ *
+ * You probably want to call pvr_page_table_l1_get_entry_raw() instead.
+ *
+ * Return:
+ * The raw equivalent of @table.
+ */
+static __always_inline struct pvr_page_table_l1_raw *
+pvr_page_table_l1_get_raw(struct pvr_page_table_l1 *table)
+{
+	return table->backing_page.host_ptr;
+}
+
+/**
+ * pvr_page_table_l1_get_entry_raw() - Access an entry from the raw equivalent
+ *                                     of a mirror level 1 page table.
+ * @table: Target level 1 page table.
+ * @idx: Index of the entry to access.
+ *
+ * Technically this function returns a pointer to a slot in a raw level 1 page
+ * table, since the returned "entry" is not guaranteed to be valid. The caller
+ * must verify the validity of the entry at the returned address (perhaps using
+ * pvr_page_table_l1_entry_raw_is_valid()) before reading or overwriting it.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before dereferencing the
+ * returned pointer.
+ *
+ * Return:
+ * A pointer to the requested raw level 1 page table entry.
+ */
+static __always_inline struct pvr_page_table_l1_entry_raw *
+pvr_page_table_l1_get_entry_raw(struct pvr_page_table_l1 *table, u16 idx)
+{
+	return &pvr_page_table_l1_get_raw(table)->entries[idx];
+}
+
+/**
+ * pvr_page_table_l1_entry_is_valid() - Check if a level 1 page table entry is
+ *                                      marked as valid.
+ * @table: Target level 1 page table.
+ * @idx: Index of the entry to check.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before calling this
+ * function.
+ */
+static __always_inline bool
+pvr_page_table_l1_entry_is_valid(struct pvr_page_table_l1 *table, u16 idx)
+{
+	struct pvr_page_table_l1_entry_raw entry_raw =
+		*pvr_page_table_l1_get_entry_raw(table, idx);
+
+	return pvr_page_table_l1_entry_raw_is_valid(entry_raw);
+}
+
+/**
+ * struct pvr_page_table_l0 - A wrapped level 0 page table.
+ *
+ * To access the raw part of this table, use pvr_page_table_l0_get_raw().
+ * Alternatively to access a raw entry directly, use
+ * pvr_page_table_l0_get_entry_raw().
+ *
+ * There is no mirror representation of an individual page, so this type has no
+ * &entries member.
+ */
+struct pvr_page_table_l0 {
+	/**
+	 * @backing_page: A handle to the memory which holds the raw
+	 * equivalent of this table. **For internal use only.**
+	 */
+	struct pvr_vm_backing_page backing_page;
+
+	union {
+		/**
+		 * @parent: The parent of this node in the page table tree structure.
+		 *
+		 * This is also a mirror table.
+		 *
+		 * Only valid when the L0 page table is active. When the L0 page table
+		 * has been removed and queued for destruction, the next_free field
+		 * should be used instead.
+		 */
+		struct pvr_page_table_l1 *parent;
+
+		/**
+		 * @next_free: Pointer to the next L0 page table to free.
+		 *
+		 * Used to form a linked list of L0 page tables queued for destruction.
+		 *
+		 * Only valid when the page table has been removed and queued for
+		 * destruction.
+		 */
+		struct pvr_page_table_l0 *next_free;
+	};
+
+	/**
+	 * @parent_idx: The index of the entry in the parent table (see
+	 * @parent) which corresponds to this table.
+	 */
+	u16 parent_idx;
+
+	/**
+	 * @entry_count: The current number of valid entries (that we know of)
+	 * in this table. This value is essentially a refcount - the table is
+	 * destroyed when this value is decremented to zero by
+	 * pvr_page_table_l0_remove().
+	 */
+	u16 entry_count;
+};
+
+/**
+ * pvr_page_table_l0_init() - Initialize a level 0 page table.
+ * @table: Target level 0 page table.
+ * @pvr_dev: Target PowerVR device
+ *
+ * When this function returns successfully, @table is still not considered
+ * valid. It must be inserted into the page table tree structure with
+ * pvr_page_table_l1_insert() before it is ready for use.
+ *
+ * It is expected that @table be zeroed (e.g. from kzalloc()) before calling
+ * this function.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error encountered while intializing &table->backing_page using
+ *    pvr_vm_backing_page_init().
+ */
+static __always_inline int
+pvr_page_table_l0_init(struct pvr_page_table_l0 *table,
+		       struct pvr_device *pvr_dev)
+{
+	table->parent_idx = PVR_IDX_INVALID;
+
+	return pvr_vm_backing_page_init(&table->backing_page, pvr_dev);
+}
+
+/**
+ * pvr_page_table_l0_fini() - Teardown a level 0 page table.
+ * @table: Target level 0 page table.
+ *
+ * It is an error to attempt to use @table after calling this function, even
+ * indirectly. This includes calling pvr_page_table_l1_remove(), which must
+ * be called *before* pvr_page_table_l0_fini().
+ */
+static __always_inline void
+pvr_page_table_l0_fini(struct pvr_page_table_l0 *table)
+{
+	pvr_vm_backing_page_fini(&table->backing_page);
+}
+
+/**
+ * pvr_page_table_l0_sync() - Flush a level 0 page table from the CPU to the
+ *                            device.
+ * @table: Target level 0 page table.
+ *
+ * This is just a thin wrapper around pvr_vm_backing_page_sync(), so the
+ * warning there applies here too: **Only call pvr_page_table_l0_sync() once
+ * you're sure you have no more changes to make to** @table **in the immediate
+ * future.**
+ *
+ * If child pages of @table also need to be flushed, this should be done first
+ * using a DMA sync function (e.g. dma_sync_sg_for_device()) *before* calling
+ * this function.
+ */
+static __always_inline void
+pvr_page_table_l0_sync(struct pvr_page_table_l0 *table)
+{
+	pvr_vm_backing_page_sync(&table->backing_page);
+}
+
+/**
+ * pvr_page_table_l0_get_raw() - Access the raw equivalent of a mirror level 0
+ *                               page table.
+ * @table: Target level 0 page table.
+ *
+ * Essentially returns the CPU address of the raw equivalent of @table, cast to
+ * a &struct pvr_page_table_l0_raw pointer.
+ *
+ * You probably want to call pvr_page_table_l0_get_entry_raw() instead.
+ *
+ * Return:
+ * The raw equivalent of @table.
+ */
+static __always_inline struct pvr_page_table_l0_raw *
+pvr_page_table_l0_get_raw(struct pvr_page_table_l0 *table)
+{
+	return table->backing_page.host_ptr;
+}
+
+/**
+ * pvr_page_table_l0_get_entry_raw() - Access an entry from the raw equivalent
+ *                                     of a mirror level 0 page table.
+ * @table: Target level 0 page table.
+ * @idx: Index of the entry to access.
+ *
+ * Technically this function returns a pointer to a slot in a raw level 0 page
+ * table, since the returned "entry" is not guaranteed to be valid. The caller
+ * must verify the validity of the entry at the returned address (perhaps using
+ * pvr_page_table_l0_entry_raw_is_valid()) before reading or overwriting it.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before dereferencing the
+ * returned pointer. This is espcially important for level 0 page tables, which
+ * can have a variable number of entries.
+ *
+ * Return:
+ * A pointer to the requested raw level 0 page table entry.
+ */
+static __always_inline struct pvr_page_table_l0_entry_raw *
+pvr_page_table_l0_get_entry_raw(struct pvr_page_table_l0 *table, u16 idx)
+{
+	return &pvr_page_table_l0_get_raw(table)->entries[idx];
+}
+
+/**
+ * pvr_page_table_l0_entry_is_valid() - Check if a level 0 page table entry is
+ *                                      marked as valid.
+ * @table: Target level 0 page table.
+ * @idx: Index of the entry to check.
+ *
+ * The value of @idx is not checked here; it is the callers responsibility to
+ * ensure @idx refers to a valid index within @table before calling this
+ * function.
+ */
+static __always_inline bool
+pvr_page_table_l0_entry_is_valid(struct pvr_page_table_l0 *table, u16 idx)
+{
+	struct pvr_page_table_l0_entry_raw entry_raw =
+		*pvr_page_table_l0_get_entry_raw(table, idx);
+
+	return pvr_page_table_l0_entry_raw_is_valid(entry_raw);
+}
+
+/**
+ * pvr_page_table_l2_insert() - Insert an entry referring to a level 1 page
+ *                              table into a level 2 page table.
+ * @ptr: Page table pointer pointing to the entry to insert the L1 page table into.
+ * @child_table: Target level 1 page table to be referenced by the new entry.
+ *
+ * The values of @ptr are not checked here; it is the callers responsibility to
+ * ensure @ptr points to a valid L2 entry.
+ *
+ * This function is unchecked. Do not call it unless you're absolutely sure
+ * there is not already a valid entry at @ptr.
+ */
+static void
+pvr_page_table_l2_insert(struct pvr_page_table_ptr *ptr,
+			 struct pvr_page_table_l1 *child_table)
+{
+	struct pvr_page_table_l2_entry_raw *entry_raw =
+		pvr_page_table_l2_get_entry_raw(ptr->l2_table, ptr->l2_idx);
+
+	pvr_page_table_l2_entry_raw_set(entry_raw,
+					child_table->backing_page.dma_addr);
+
+	child_table->parent = ptr->l2_table;
+	child_table->parent_idx = ptr->l2_idx;
+	ptr->l2_table->entries[ptr->l2_idx] = child_table;
+	++ptr->l2_table->entry_count;
+	ptr->l1_table = child_table;
+}
+
+/**
+ * pvr_page_table_l2_remove() - Remove a level 1 page table from a level 2 page
+ *                              table.
+ * @ptr: Page table pointer pointing to the L2 entry to remove.
+ *
+ * The values of @ptr are not checked here; it is the callers responsibility to
+ * ensure @ptr points to a valid L2 entry before calling this function.
+ *
+ * This function is unchecked. Do not call it unless you're absolutely sure
+ * there is a valid entry pointed by @ptr. It is **not** a no-op to call this
+ * function twice, and subsequent calls **will** place @table into an invalid
+ * state.
+ */
+static void
+pvr_page_table_l2_remove(struct pvr_page_table_ptr *ptr)
+{
+	struct pvr_page_table_l2_entry_raw *entry_raw =
+		pvr_page_table_l2_get_entry_raw(ptr->l2_table, ptr->l1_table->parent_idx);
+
+	WARN_ON(ptr->l1_table->parent != ptr->l2_table);
+
+	pvr_page_table_l2_entry_raw_clear(entry_raw);
+
+	ptr->l2_table->entries[ptr->l1_table->parent_idx] = NULL;
+	ptr->l1_table->parent_idx = PVR_IDX_INVALID;
+	ptr->l1_table->next_free = ptr->l1_free_list;
+	ptr->l1_free_list = ptr->l1_table;
+	ptr->l1_table = NULL;
+
+	--ptr->l2_table->entry_count;
+}
+
+/**
+ * pvr_page_table_l1_insert() - Insert an entry referring to a level 0 page
+ *                              table into a level 1 page table.
+ * @ptr: Page table pointer pointing to the entry to insert the L0 page table into.
+ * @child_table: L0 page table to insert.
+ *
+ * The value of @ptr is not checked here; it is the callers responsibility to
+ * ensure @ptr points to valid L1 entry before calling this function.
+ *
+ * This function is unchecked. Do not call it unless you're absolutely sure
+ * there is not already a valid entry at @ptr.
+ */
+static void
+pvr_page_table_l1_insert(struct pvr_page_table_ptr *ptr,
+			 struct pvr_page_table_l0 *child_table)
+{
+	struct pvr_page_table_l1_entry_raw *entry_raw =
+		pvr_page_table_l1_get_entry_raw(ptr->l1_table, ptr->l1_idx);
+
+	pvr_page_table_l1_entry_raw_set(entry_raw,
+					child_table->backing_page.dma_addr);
+
+	child_table->parent = ptr->l1_table;
+	child_table->parent_idx = ptr->l1_idx;
+	ptr->l1_table->entries[ptr->l1_idx] = child_table;
+	++ptr->l1_table->entry_count;
+	ptr->l0_table = child_table;
+}
+
+/**
+ * pvr_page_table_l1_remove() - Remove a level 0 page table from a level 1 page
+ *                              table.
+ * @ptr: Page table pointer pointing to the L1 entry to remove.
+ *
+ * If this function results in the L1 table becoming empty, it will be removed
+ * from its parent level 2 page table and destroyed.
+ *
+ * The values of @ptr are not checked here; it is the callers responsibility to
+ * ensure @ptr points to a valid L1 entry before calling this function.
+ *
+ * This function is unchecked. Do not call it unless you're absolutely sure
+ * there is a valid entry pointed by @ptr. It is **not** a no-op to call this
+ * function twice, and subsequent calls **will** place @table into an invalid
+ * state.
+ */
+static void
+pvr_page_table_l1_remove(struct pvr_page_table_ptr *ptr)
+{
+	struct pvr_page_table_l1_entry_raw *entry_raw =
+		pvr_page_table_l1_get_entry_raw(ptr->l0_table->parent,
+						ptr->l0_table->parent_idx);
+
+	WARN_ON(ptr->l0_table->parent != ptr->l1_table);
+
+	pvr_page_table_l1_entry_raw_clear(entry_raw);
+
+	ptr->l1_table->entries[ptr->l0_table->parent_idx] = NULL;
+	ptr->l0_table->parent_idx = PVR_IDX_INVALID;
+	ptr->l0_table->next_free = ptr->l0_free_list;
+	ptr->l0_free_list = ptr->l0_table;
+	ptr->l0_table = NULL;
+
+	if (--ptr->l1_table->entry_count == 0) {
+		/* Clear the parent L2 page table entry. */
+		if (ptr->l1_table->parent_idx != PVR_IDX_INVALID)
+			pvr_page_table_l2_remove(ptr);
+	}
+}
+
+/**
+ * pvr_page_table_l0_insert() - Insert an entry referring to a physical page
+ *                              into a level 0 page table.
+ * @ptr: Page table pointer pointing to the L0 entry to insert.
+ * @dma_addr: Target DMA address to be referenced by the new entry.
+ * @flags: Page options to be stored in the new entry.
+ *
+ * The values of @ptr are not checked here; it is the callers responsibility to
+ * ensure @ptr points to a valid L0 entry before calling this function.
+ *
+ * This function is unchecked. Do not call it unless you're absolutely sure
+ * there is not already a valid entry in the L0 table @ptr points to.
+ */
+static void
+pvr_page_table_l0_insert(struct pvr_page_table_ptr *ptr,
+			 dma_addr_t dma_addr, struct pvr_page_flags_raw flags)
+{
+	struct pvr_page_table_l0_entry_raw *entry_raw =
+		pvr_page_table_l0_get_entry_raw(ptr->l0_table, ptr->l0_idx);
+
+	pvr_page_table_l0_entry_raw_set(entry_raw, dma_addr, flags);
+
+	/*
+	 * There is no entry to set here - we don't keep a mirror of
+	 * individual pages.
+	 */
+
+	++ptr->l0_table->entry_count;
+}
+
+/**
+ * pvr_page_table_l0_remove() - Remove a physical page from a level 0 page
+ *                              table.
+ * @ptr: Page table pointer pointing to the L0 entry to remove.
+ *
+ * If this function results in the L0 table becoming empty, it will be removed
+ * from its parent L1 page table and destroyed.
+ *
+ * The values of @ptr are not checked here; it is the callers responsibility to
+ * ensure @ptr points to a valid L0 entry before calling this function.
+ *
+ * This function is unchecked. Do not call it unless you're absolutely sure
+ * there is a valid entry pointed by @ptr. It is **not** a no-op to call this
+ * function twice, and subsequent calls **will** place @table into an invalid
+ * state.
+ */
+static void
+pvr_page_table_l0_remove(struct pvr_page_table_ptr *ptr)
+{
+	struct pvr_page_table_l0_entry_raw *entry_raw =
+		pvr_page_table_l0_get_entry_raw(ptr->l0_table, ptr->l0_idx);
+
+	pvr_page_table_l0_entry_raw_clear(entry_raw);
+
+	/*
+	 * There is no entry to clear here - we don't keep a mirror of
+	 * individual pages.
+	 */
+
+	if (--ptr->l0_table->entry_count == 0) {
+		/* Clear the parent L1 page table entry. */
+		if (ptr->l0_table->parent_idx != PVR_IDX_INVALID)
+			pvr_page_table_l1_remove(ptr);
+	}
+}
+
+/**
+ * DOC: Page table index utilities
+ */
+/**
+ * DOC: Page table index utilities (constants)
+ *
+ * .. c:macro:: PVR_PAGE_TABLE_ADDR_SPACE_SIZE
+ *
+ *    Size of device-virtual address space which can be represented in the page
+ *    table structure.
+ *
+ *    This value is checked at runtime against
+ *    &pvr_device_features.virtual_address_space_bits by
+ *    pvr_vm_create_context(), which will return an error if the feature value
+ *    does not match this constant.
+ *
+ *    .. admonition:: Future work
+ *
+ *       It should be possible to support other values of
+ *       &pvr_device_features.virtual_address_space_bits, but so far no
+ *       hardware has been created which advertises an unsupported value.
+ *
+ * .. c:macro:: PVR_PAGE_TABLE_ADDR_BITS
+ *
+ *    Number of bits needed to represent any value less than
+ *    %PVR_PAGE_TABLE_ADDR_SPACE_SIZE exactly.
+ *
+ * .. c:macro:: PVR_PAGE_TABLE_ADDR_MASK
+ *
+ *    Bitmask of device-virtual addresses which are valid in the page table
+ *    structure.
+ *
+ *    This value is derived from %PVR_PAGE_TABLE_ADDR_SPACE_SIZE, so the same
+ *    notes on that constant apply here.
+ */
+#define PVR_PAGE_TABLE_ADDR_SPACE_SIZE SZ_1T
+#define PVR_PAGE_TABLE_ADDR_BITS __ffs(PVR_PAGE_TABLE_ADDR_SPACE_SIZE)
+#define PVR_PAGE_TABLE_ADDR_MASK (PVR_PAGE_TABLE_ADDR_SPACE_SIZE - 1)
+
+/**
+ * pvr_page_table_l2_idx() - Calculate the level 2 page table index for a
+ *                           device-virtual address.
+ * @device_addr: Target device-virtual address.
+ *
+ * This function does not perform any bounds checking - it is the caller's
+ * responsibility to ensure that @device_addr is valid before interpreting
+ * the result.
+ *
+ * Return:
+ * The index into a level 2 page table corresponding to @device_addr.
+ */
+static __always_inline u16
+pvr_page_table_l2_idx(u64 device_addr)
+{
+	return (device_addr & ~ROGUE_MMUCTRL_VADDR_PC_INDEX_CLRMSK) >>
+	       ROGUE_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+}
+
+/**
+ * pvr_page_table_l1_idx() - Calculate the level 1 page table index for a
+ *                           device-virtual address.
+ * @device_addr: Target device-virtual address.
+ *
+ * This function does not perform any bounds checking - it is the caller's
+ * responsibility to ensure that @device_addr is valid before interpreting
+ * the result.
+ *
+ * Return:
+ * The index into a level 1 page table corresponding to @device_addr.
+ */
+static __always_inline u16
+pvr_page_table_l1_idx(u64 device_addr)
+{
+	return (device_addr & ~ROGUE_MMUCTRL_VADDR_PD_INDEX_CLRMSK) >>
+	       ROGUE_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+}
+
+/**
+ * pvr_page_table_l0_idx() - Calculate the level 0 page table index for a
+ *                           device-virtual address.
+ * @device_addr: Target device-virtual address.
+ *
+ * This function does not perform any bounds checking - it is the caller's
+ * responsibility to ensure that @device_addr is valid before interpreting
+ * the result.
+ *
+ * Return:
+ * The index into a level 0 page table corresponding to @device_addr.
+ */
+static __always_inline u16
+pvr_page_table_l0_idx(u64 device_addr)
+{
+	return (device_addr & ~ROGUE_MMUCTRL_VADDR_PT_INDEX_CLRMSK) >>
+	       ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT;
+}
+
+/**
+ * DOC: High-level page table operations
+ */
+
+/**
+ * pvr_page_table_l1_create_unchecked() - Create a level 1 page table and
+ *                                        insert it into a level 2 page table.
+ * @pvr_dev: Target PowerVR device.
+ * @ptr: Page table pointer pointing to the entry to insert the L1 page table into.
+ *
+ * This function is unchecked. By using it, the caller is asserting that @ptr
+ * points to a valid L2 slot, and that this slot does not contain a valid entry.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%ENOMEM if allocation of a &struct pvr_page_table_l1 fails, or
+ *  * Any error encountered while initializing the new level 1 page table with
+ *    pvr_page_table_l1_init().
+ */
+static int
+pvr_page_table_l1_create_unchecked(struct pvr_page_table_ptr *ptr)
+{
+	struct pvr_page_table_l1 *table;
+	int err;
+
+	table = kzalloc(sizeof(*table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	err = pvr_page_table_l1_init(table, ptr->pvr_dev);
+	if (err)
+		goto err_free_table;
+
+	pvr_page_table_l2_insert(ptr, table);
+	return 0;
+
+err_free_table:
+	kfree(table);
+	return err;
+}
+
+/**
+ * pvr_page_table_l1_get_or_create() - Retrieves (optionally creating if
+ *                                     necessary) a level 1 page table from the
+ *                                     specified level 2 page table entry.
+ * @ptr: [IN] Page table pointer.
+ * @should_create: [IN] Specifies whether new page tables should be created
+ *                 when empty page table entries are encountered during
+ *                 traversal.
+ * @did_create: [OUT] Optional pointer to a flag which is set when
+ *              @should_create is %true and new page table entries are created.
+ *              In any other case, the value will not be modified.
+ *
+ * Return:
+ *  * 0 on success, or
+ *
+ *    If @should_create is %false:
+ *     * -%ENXIO if a level 1 page table would have been created.
+ *
+ *    If @should_create is %true:
+ *     * Any error encountered while creating the level 1 page table with
+ *       pvr_page_table_l1_create_unchecked() if one needs to be created.
+ */
+static int
+pvr_page_table_l1_get_or_create(struct pvr_page_table_ptr *ptr,
+				bool should_create, bool *did_create)
+{
+	int err;
+
+	if (pvr_page_table_l2_entry_is_valid(ptr->l2_table, ptr->l2_idx)) {
+		ptr->l1_table = ptr->l2_table->entries[ptr->l2_idx];
+		return 0;
+	}
+
+	if (!should_create)
+		return -ENXIO;
+
+	/* Safe, because we just verified the entry does not exist yet. */
+	err = pvr_page_table_l1_create_unchecked(ptr);
+	if (!err && did_create)
+		*did_create = true;
+
+	return err;
+}
+
+/**
+ * pvr_page_table_l0_create_unchecked() - Create a level 0 page table and
+ *                                        insert it into a level 1 page table.
+ * @ptr: Page table pointer pointing to the L1 entry to insert the L0 page table into.
+ *
+ * This function is unchecked. By using it, the caller is asserting that @ptr
+ * points to a valid L1 slot, and that slot does not contain a valid entry.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%ENOMEM if allocation of a &struct pvr_page_table_l0 fails, or
+ *  * Any error encountered while initializing the new level 0 page table with
+ *    pvr_page_table_l1_init().
+ */
+static int
+pvr_page_table_l0_create_unchecked(struct pvr_page_table_ptr *ptr)
+{
+	struct pvr_page_table_l0 *table;
+	int err;
+
+	table = kzalloc(sizeof(*table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	err = pvr_page_table_l0_init(table, ptr->pvr_dev);
+	if (err)
+		goto err_free_table;
+
+	pvr_page_table_l1_insert(ptr, table);
+	return 0;
+
+err_free_table:
+	kfree(table);
+	return err;
+}
+
+/**
+ * pvr_page_table_l0_get_or_create() - Retrieves (optionally creating if
+ *                                     necessary) a level 0 page table from the
+ *                                     specified level 1 page table entry.
+ * @ptr: [IN] Page table pointer.
+ * @should_create: [IN] Specifies whether new page tables should be created
+ *                 when empty page table entries are encountered during
+ *                 traversal.
+ * @did_create: [OUT] Optional pointer to a flag which is set when
+ *              @should_create is %true and new page table entries are created.
+ *              In any other case, the value will not be modified.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%ENXIO if @should_create is %false and a level 0 page table would have
+ *    been created, or
+ *  * Any error returned by pvr_page_table_l1_create_unchecked() if
+ *    @should_create is %true and a new level 0 page table needs to be created.
+ */
+static int
+pvr_page_table_l0_get_or_create(struct pvr_page_table_ptr *ptr,
+				bool should_create, bool *did_create)
+{
+	int err;
+
+	if (pvr_page_table_l1_entry_is_valid(ptr->l1_table, ptr->l1_idx)) {
+		ptr->l0_table = ptr->l1_table->entries[ptr->l1_idx];
+		return 0;
+	}
+
+	if (!should_create)
+		return -ENXIO;
+
+	/* Safe, because we just verified the entry does not exist yet. */
+	err = pvr_page_table_l0_create_unchecked(ptr);
+	if (!err && did_create)
+		*did_create = true;
+
+	return err;
+}
+
+/**
+ * DOC: Page table pointer
+ */
+/**
+ * DOC: Page table pointer (constants)
+ *
+ * .. c:macro:: PVR_PAGE_TABLE_PTR_IN_SYNC
+ *
+ *    Negative value to indicate that a page table pointer is fully in sync
+ *    when assigned to &pvr_page_table_ptr->sync_level_required.
+ */
+#define PVR_PAGE_TABLE_PTR_IN_SYNC ((s8)(-1))
+
+/**
+ * pvr_page_table_ptr_require_sync() - Mark a page table pointer as requiring a
+ *                                     sync operation for the referenced page
+ *                                     tables up to a specified level.
+ * @ptr: Target page table pointer.
+ * @level: Maximum page table level for which a sync is required.
+ */
+static __always_inline void
+pvr_page_table_ptr_require_sync(struct pvr_page_table_ptr *ptr, s8 level)
+{
+	if (ptr->sync_level_required < level)
+		ptr->sync_level_required = level;
+}
+
+/**
+ * pvr_page_table_ptr_sync_manual() - Trigger a sync of some or all of the
+ *                                    page tables referenced by a page table
+ *                                    pointer.
+ * @ptr: Target page table pointer.
+ * @level: Maximum page table level to sync.
+ *
+ * Do not call this function directly. Instead use
+ * pvr_page_table_ptr_sync_partial() which is checked against the current
+ * value of &ptr->sync_level_required as set by
+ * pvr_page_table_ptr_require_sync().
+ */
+static void
+pvr_page_table_ptr_sync_manual(struct pvr_page_table_ptr *ptr, s8 level)
+{
+	/*
+	 * We sync the page table levels in ascending order (starting from the
+	 * leaf node) to ensure consistency.
+	 */
+
+	if (level < 0)
+		return;
+
+	if (ptr->l0_table)
+		pvr_page_table_l0_sync(ptr->l0_table);
+
+	if (level < 1)
+		return;
+
+	if (ptr->l1_table)
+		pvr_page_table_l1_sync(ptr->l1_table);
+
+	if (level < 2)
+		return;
+
+	pvr_page_table_l2_sync(ptr->l2_table);
+}
+
+/**
+ * pvr_page_table_ptr_sync_partial() - Trigger a sync of some or all of the
+ *                                     page tables referenced by a page table
+ *                                     pointer.
+ * @ptr: Target page table pointer.
+ * @level: Requested page table level to sync up to (inclusive).
+ *
+ * If @level is greater than the maximum level recorded by @ptr as requiring
+ * a sync operation, only the previously recorded maximum will be used.
+ *
+ * Additionally, if @level is greater than or equal to the maximum level
+ * recorded by @ptr as requiring a sync operation, that maximum level will be
+ * reset as a full sync will be performed. This is equivalent to calling
+ * pvr_page_table_ptr_sync().
+ */
+static void
+pvr_page_table_ptr_sync_partial(struct pvr_page_table_ptr *ptr, s8 level)
+{
+	/*
+	 * If the requested sync level is greater than or equal to the
+	 * currently required sync level, we do two things:
+	 *  * Don't waste time syncing levels we haven't previously marked as
+	 *    requiring a sync, and
+	 *  * Reset the required sync level since we are about to sync
+	 *    everything that was previously marked as requiring a sync.
+	 */
+	if (level >= ptr->sync_level_required) {
+		level = ptr->sync_level_required;
+		ptr->sync_level_required = PVR_PAGE_TABLE_PTR_IN_SYNC;
+	}
+
+	pvr_page_table_ptr_sync_manual(ptr, level);
+}
+
+/**
+ * pvr_page_table_ptr_sync() - Trigger a sync of every page table referenced by
+ *                             a page table pointer.
+ * @ptr: Target page table pointer.
+ *
+ * The maximum level marked internally as requiring a sync will be reset so
+ * that subsequent calls to this function will be no-ops unless @ptr is
+ * otherwise updated.
+ */
+static __always_inline void
+pvr_page_table_ptr_sync(struct pvr_page_table_ptr *ptr)
+{
+	pvr_page_table_ptr_sync_manual(ptr, ptr->sync_level_required);
+
+	ptr->sync_level_required = PVR_PAGE_TABLE_PTR_IN_SYNC;
+}
+
+/**
+ * pvr_page_table_ptr_load_tables() - Load pointers to tables in each level of
+ *                                    the page table tree structure needed to
+ *                                    reference the physical page referenced by
+ *                                    a page table pointer.
+ * @ptr: Target page table pointer.
+ * @should_create: Specifies whether new page tables should be created when
+ *                 empty page table entries are encountered during traversal.
+ * @load_level_required: Maximum page table level to load.
+ *
+ * If @should_create is %true, this function may modify the stored required
+ * sync level of @ptr as new page tables are created and inserted into their
+ * respective parents.
+ *
+ * Since there is only one root page table, it is technically incorrect to call
+ * this function with a value of @load_level_required greater than or equal to
+ * the root level number. However, this is not explicitly disallowed here.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * Any error returned by pvr_page_table_l1_get_or_create() if
+ *    @load_level_required >= 1 except -%ENXIO, or
+ *  * Any error returned by pvr_page_table_l0_get_or_create() if
+ *    @load_level_required >= 0 except -%ENXIO.
+ */
+static int
+pvr_page_table_ptr_load_tables(struct pvr_page_table_ptr *ptr,
+			       bool should_create, s8 load_level_required)
+{
+	bool did_create_l1;
+	bool did_create_l0;
+	int err;
+
+	/* Clear tables we're about to fetch in case of error states. */
+	if (load_level_required >= 1)
+		ptr->l1_table = NULL;
+
+	if (load_level_required >= 0)
+		ptr->l0_table = NULL;
+
+	/* Get or create L1 page table. */
+	if (load_level_required >= 1) {
+		err = pvr_page_table_l1_get_or_create(ptr, should_create, &did_create_l1);
+		if (err) {
+			/*
+			 * If @should_create is %false and no L1 page table was
+			 * found, return early but without an error. Since
+			 * pvr_page_table_l1_get_or_create() can only return
+			 * -%ENXIO if @should_create is %false, there is no
+			 * need to check it here.
+			 */
+			if (err == -ENXIO)
+				err = 0;
+
+			goto err_out;
+		}
+	}
+
+	/* Get or create L0 page table. */
+	if (load_level_required >= 0) {
+		err = pvr_page_table_l0_get_or_create(ptr, should_create, &did_create_l0);
+		if (err) {
+			/*
+			 * If @should_create is %false and no L0 page table was
+			 * found, return early but without an error. Since
+			 * pvr_page_table_l0_get_or_create() can only return
+			 * -%ENXIO if @should_create is %false, there is no
+			 * need to check it here.
+			 */
+			if (err == -ENXIO)
+				err = 0;
+
+			/*
+			 * At this point, an L1 page table could have been
+			 * created but is now empty due to the failed attempt
+			 * at creating an L0 page table. In this instance, we
+			 * must remove the empty L1 page table ourselves as
+			 * pvr_page_table_l1_remove() is never called as part
+			 * of the error path in
+			 * pvr_page_table_l0_get_or_create().
+			 */
+			if (did_create_l1) {
+				pvr_page_table_l2_remove(ptr);
+				pvr_page_table_ptr_require_sync(ptr, 2);
+			}
+
+			goto err_out;
+		}
+	}
+
+	if (did_create_l1)
+		pvr_page_table_ptr_require_sync(ptr, 2);
+	else if (did_create_l0)
+		pvr_page_table_ptr_require_sync(ptr, 1);
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_page_table_ptr_set() - Reassign a page table pointer, syncing any
+ *                            page tables previously assigned to it which are
+ *                            no longer relevant.
+ * @ptr: Target page table pointer.
+ * @device_addr: New pointer target.
+ * @should_create: Specify whether new page tables should be created when
+ *                 empty page table entries are encountered during traversal.
+ *
+ * This function performs a full sync on the pointer, regardless of which
+ * levels are modified.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_page_table_ptr_load_tables().
+ */
+static int
+pvr_page_table_ptr_set(struct pvr_page_table_ptr *ptr, u64 device_addr,
+		       bool should_create)
+{
+	pvr_page_table_ptr_sync(ptr);
+
+	ptr->l2_idx = pvr_page_table_l2_idx(device_addr);
+	ptr->l1_idx = pvr_page_table_l1_idx(device_addr);
+	ptr->l0_idx = pvr_page_table_l0_idx(device_addr);
+
+	return pvr_page_table_ptr_load_tables(ptr, should_create, 1);
+}
+
+/**
+ * pvr_page_table_ptr_init() - Initialize a page table pointer.
+ * @ptr: Target page table pointer.
+ * @pvr_dev: Target PowerVR device.
+ * @root_table: Root of the target page table tree structure.
+ * @device_addr: Pointer target.
+ * @should_create: Specify whether new page tables should be created when
+ *                 empty page table entries are encountered during traversal.
+ *
+ * This function zeroes @ptr; it must not be a valid page table pointer when it
+ * is called.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_page_table_ptr_set().
+ */
+static int
+pvr_page_table_ptr_init(struct pvr_page_table_ptr *ptr,
+			struct pvr_device *pvr_dev,
+			struct pvr_page_table_l2 *root_table, u64 device_addr,
+			bool should_create)
+{
+	memset(ptr, 0, sizeof(*ptr));
+
+	ptr->pvr_dev = pvr_dev;
+	ptr->l2_table = root_table;
+	ptr->sync_level_required = PVR_PAGE_TABLE_PTR_IN_SYNC;
+
+	return pvr_page_table_ptr_set(ptr, device_addr, should_create);
+}
+
+/**
+ * pvr_page_table_ptr_fini() - Teardown a page table pointer.
+ * @ptr: Target page table pointer.
+ */
+static void
+pvr_page_table_ptr_fini(struct pvr_page_table_ptr *ptr)
+{
+	bool flush_caches = ptr->sync_level_required != PVR_PAGE_TABLE_PTR_IN_SYNC;
+
+	if (WARN_ON(!flush_caches && (ptr->l0_free_list || ptr->l1_free_list)))
+		flush_caches = true;
+
+	pvr_page_table_ptr_sync(ptr);
+
+	if (flush_caches)
+		WARN_ON(pvr_vm_mmu_flush(ptr->pvr_dev));
+
+	while (ptr->l0_free_list) {
+		struct pvr_page_table_l0 *l0 = ptr->l0_free_list;
+
+		ptr->l0_free_list = l0->next_free;
+		pvr_page_table_l0_fini(l0);
+		kfree(l0);
+	}
+
+	while (ptr->l1_free_list) {
+		struct pvr_page_table_l1 *l1 = ptr->l1_free_list;
+
+		ptr->l1_free_list = l1->next_free;
+		pvr_page_table_l1_fini(l1);
+		kfree(l1);
+	}
+}
+
+/**
+ * pvr_page_table_ptr_next_page() - Advance a page table pointer.
+ * @ptr: Target page table pointer.
+ * @should_create: Specify whether new page tables should be created when
+ *                 empty page table entries are encountered during traversal.
+ *
+ * If @should_create is %false, it is the caller's responsibility to verify that
+ * the state of the table references in @ptr is valid on return. If -%ENXIO is
+ * returned, at least one of the table references is invalid. It should be
+ * noted that @ptr as a whole will be left in a valid state if -%ENXIO is
+ * returned, unlike other error codes. The caller should check which references
+ * are invalid by comparing them to %NULL. Only &@ptr->l2_table is guaranteed
+ * to be valid, since it represents the root of the page table tree structure.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%EPERM if the operation would wrap at the top of the page table
+ *    hierarchy,
+ *  * -%ENXIO if @should_create is %false and a page table of any level would
+ *    have otherwise been created, or
+ *  * Any error returned while attempting to create missing page tables if
+ *    @should_create is %true.
+ */
+static int
+pvr_page_table_ptr_next_page(struct pvr_page_table_ptr *ptr, bool should_create)
+{
+	s8 load_level_required = PVR_PAGE_TABLE_PTR_IN_SYNC;
+
+	if (++ptr->l0_idx != ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X)
+		goto load_tables;
+
+	ptr->l0_idx = 0;
+	load_level_required = 0;
+
+	if (++ptr->l1_idx != ROGUE_MMUCTRL_ENTRIES_PD_VALUE)
+		goto load_tables;
+
+	ptr->l1_idx = 0;
+	load_level_required = 1;
+
+	if (++ptr->l2_idx != ROGUE_MMUCTRL_ENTRIES_PC_VALUE)
+		goto load_tables;
+
+	/*
+	 * If the pattern continued, we would set &ptr->l2_idx to zero here.
+	 * However, that would wrap the top layer of the page table hierarchy
+	 * which is not a valid operation. Instead, we warn and return an
+	 * error.
+	 */
+	WARN(true,
+	     "%s(%p) attempted to loop the top of the page table hierarchy",
+	     __func__, ptr);
+	return -EPERM;
+
+	/* If indices have wrapped, we need to load new tables. */
+load_tables:
+	/* First, flush tables which will be unloaded. */
+	pvr_page_table_ptr_sync_partial(ptr, load_level_required);
+
+	/* Then load tables from the required level down. */
+	return pvr_page_table_ptr_load_tables(ptr, should_create,
+					      load_level_required);
+}
+
+/**
+ * pvr_page_table_ptr_copy() - Duplicate a page table pointer.
+ * @dst: [OUT] New page table pointer.
+ * @src: [IN] Original page table pointer.
+ *
+ * The pointer at @dst will be marked as "synced" so that any sync operations
+ * required on @src are not duplicated.
+ */
+static void
+pvr_page_table_ptr_copy(struct pvr_page_table_ptr *dst,
+			const struct pvr_page_table_ptr *src)
+{
+	memcpy(dst, src, sizeof(*dst));
+
+	/*
+	 * Nothing currently in the clone requires a sync later on, since the
+	 * original will handle it either when advancing or during teardown.
+	 */
+	dst->sync_level_required = PVR_PAGE_TABLE_PTR_IN_SYNC;
+
+	/* The clone starts with an empty list of L1/L0 tables to free. */
+	dst->l1_free_list = NULL;
+	dst->l0_free_list = NULL;
+}
+
+/**
+ * DOC: Single page operations
+ */
+
+/**
+ * pvr_page_create() - Create a device-virtual memory page and insert it into
+ * a level 0 page table.
+ * @ptr: Page table pointer to the device-virtual address of the target page.
+ * @dma_addr: DMA address of the physical page backing the created page.
+ * @flags: Page options saved on the level 0 page table entry for reading by
+ *         the device.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * -%EEXIST if the requested page already exists.
+ */
+static int
+pvr_page_create(struct pvr_page_table_ptr *ptr, dma_addr_t dma_addr,
+		struct pvr_page_flags_raw flags)
+{
+	/* Do not create a new page if one already exists. */
+	if (pvr_page_table_l0_entry_is_valid(ptr->l0_table, ptr->l0_idx))
+		return -EEXIST;
+
+	pvr_page_table_l0_insert(ptr, dma_addr, flags);
+
+	pvr_page_table_ptr_require_sync(ptr, 0);
+
+	return 0;
+}
+
+/**
+ * pvr_page_destroy() - Destroy a device page after removing it from its
+ *                      parent level 0 page table.
+ * @ptr: Page table pointer to the device-virtual address of the target page.
+ */
+static void
+pvr_page_destroy(struct pvr_page_table_ptr *ptr)
+{
+	/* Do nothing if the page does not exist. */
+	if (!pvr_page_table_l0_entry_is_valid(ptr->l0_table, ptr->l0_idx))
+		return;
+
+	/* Clear the parent L0 page table entry. */
+	pvr_page_table_l0_remove(ptr);
+
+	pvr_page_table_ptr_require_sync(ptr, 0);
+}
+
+/**
+ * DOC: Mapping tree implementation
+ */
+
+static __always_inline u64
+pvr_vm_mapping_tree_compute_last(u64 start, u64 size)
+{
+	if (!size)
+		return start;
+
+	return start + size - 1;
+}
+
+/**
+ * struct pvr_vm_mapping_tree_node - A node in our mapping tree.
+ * @rb: Base RB-tree node. **For internal use only.**
+ * @start: The start value of the range represented by this node. **For
+ *         internal use only.** Do not access this member directly, instead
+ *         call pvr_vm_mapping_tree_node_start().
+ * @size: The size of the range represented by this node. **For internal use
+ *        only.** Do not access this member directly, instead call
+ *        pvr_vm_mapping_tree_node_size().
+ * @__subtree_last: Required for the implementation generated by
+ *                  INTERVAL_TREE_DEFINE(). **For internal use only.**
+ *
+ * Unlike the generic implementation in <linux/interval_tree.h>, we store the
+ * size of the interval instead of the last value. To access the last value (as
+ * required by the implementation behind INTERVAL_TREE_DEFINE()) use
+ * pvr_vm_internal_tree_node_last().
+ */
+struct pvr_vm_mapping_tree_node {
+	struct rb_node rb;
+	u64 start;
+	u64 size;
+/* private: internal use only */
+	u64 __subtree_last;
+};
+
+static __always_inline bool
+pvr_vm_mapping_tree_node_is_inserted(struct pvr_vm_mapping_tree_node *node)
+{
+	return !RB_EMPTY_NODE(&node->rb);
+}
+
+static __always_inline void
+pvr_vm_mapping_tree_node_mark_removed(struct pvr_vm_mapping_tree_node *node)
+{
+	RB_CLEAR_NODE(&node->rb);
+}
+
+/**
+ * pvr_vm_mapping_tree_node_init() - Initialize an VM mapping tree node
+ * @node: Target VM mapping tree node.
+ * @start: Start value of @node.
+ * @size: Size of @node.
+ */
+static __always_inline void
+pvr_vm_mapping_tree_node_init(struct pvr_vm_mapping_tree_node *node,
+			      u64 start, u64 size)
+{
+	pvr_vm_mapping_tree_node_mark_removed(node);
+
+	node->start = start;
+	node->size = size;
+}
+
+/**
+ * pvr_vm_mapping_tree_node_fini() - Teardown an VM mapping tree node
+ * @node: Target VM mapping tree node.
+ *
+ * There are no actual teardown operations required for a
+ * &struct pvr_vm_mapping_tree_node. However, this function does verify that
+ * @node has been removed from its parent tree and emits a kernel warning if
+ * this is not the case.
+ */
+static __always_inline void
+pvr_vm_mapping_tree_node_fini(struct pvr_vm_mapping_tree_node *node)
+{
+	WARN(pvr_vm_mapping_tree_node_is_inserted(node),
+	     "%s(%p) called before removing node from tree", __func__, node);
+}
+
+/**
+ * pvr_vm_mapping_tree_node_start() - Obtain the start value of the mapping
+ *                                    represented by a VM mapping tree node
+ * @node: Target VM mapping tree node.
+ *
+ * Return:
+ * The start value of @node.
+ */
+static __always_inline u64
+pvr_vm_mapping_tree_node_start(struct pvr_vm_mapping_tree_node *node)
+{
+	return node->start;
+}
+
+/**
+ * pvr_vm_mapping_tree_node_size() - Obtain the size of the mapping
+ *                                   represented by a VM mapping tree node
+ * @node: Target VM mapping tree node.
+ *
+ * Return:
+ * The size of @node.
+ */
+static __always_inline u64
+pvr_vm_mapping_tree_node_size(struct pvr_vm_mapping_tree_node *node)
+{
+	return node->size;
+}
+
+/**
+ * pvr_vm_mapping_tree_node_last() - Obtain the last (inclusive) value of the
+ *                                   mapping represented by a VM mapping tree
+ *                                   node
+ * @node: Target VM mapping tree node.
+ *
+ * Return:
+ * The last (inclusive) value of @node.
+ */
+static __always_inline u64
+pvr_vm_mapping_tree_node_last(struct pvr_vm_mapping_tree_node *node)
+{
+	return pvr_vm_mapping_tree_compute_last(node->start, node->size);
+}
+
+INTERVAL_TREE_DEFINE(struct pvr_vm_mapping_tree_node, rb, u64, __subtree_last,
+		     pvr_vm_mapping_tree_node_start,
+		     pvr_vm_mapping_tree_node_last, static,
+		     __pvr_vm_mapping_tree)
+
+/**
+ * for_each_pvr_vm_mapping_tree_node() - Helper macro to iterate a specific
+ *                                       range of a VM mapping tree
+ * @tree_: Target mapping tree to iterate.
+ * @node_: Pointer to an allocated instance of &struct pvr_vm_mapping_tree_node
+ *         to be used as the loop variable.
+ * @start_: First value to iterate from.
+ * @size_: Size to iterate for.
+ *
+ * Due to the way interval tree iteration works, formulating a ``for`` loop
+ * around it is pretty verbose! We can encapsulate all that lengthiness in a
+ * single macro (so we did).
+ */
+#define for_each_pvr_vm_mapping_tree_node(tree_, node_, start_, size_)   \
+	for ((node_) = pvr_vm_mapping_tree_iter_first((tree_), (start_), \
+						      (size_));          \
+	     (node_);                                                    \
+	     (node_) = pvr_vm_mapping_tree_iter_next((node_), (start_),  \
+						     (size_)))
+
+/**
+ * struct pvr_vm_mapping_tree - Our implementation of an interval tree.
+ * @root: The underlying root of the red-black tree as used by
+ *        INTERVAL_TREE_DEFINE().
+ *
+ * The generic interval tree types in both <linux/interval_tree_generic.h> and
+ * <linux/interval_tree.h> do not contain a specific type for the root of the
+ * tree; instead using &struct rb_root_cached from the underlying red-black
+ * tree implementation.
+ */
+struct pvr_vm_mapping_tree {
+	struct rb_root_cached root;
+} __packed;
+
+/**
+ * pvr_vm_mapping_tree_iter_first() - Locate the first VM mapping tree node
+ *                                    which overlaps with the specified
+ *                                    range
+ * @tree: Target VM mapping tree.
+ * @start: Start value of the iterable range.
+ * @size: Size of the iterable range.
+ *
+ * This function forms a wrapper around __pvr_vm_mapping_tree_iter_first(),
+ * which is generated by INTERVAL_TREE_DEFINE().
+ *
+ * Return:
+ *  * The node containing @start (if one exists), or
+ *  * %NULL otherwise.
+ */
+static __always_inline struct pvr_vm_mapping_tree_node *
+pvr_vm_mapping_tree_iter_first(struct pvr_vm_mapping_tree *tree, u64 start,
+			       u64 size)
+{
+	u64 last = pvr_vm_mapping_tree_compute_last(start, size);
+
+	/* This function is generated by INTERVAL_TREE_DEFINE(). */
+	return __pvr_vm_mapping_tree_iter_first(&tree->root, start, last);
+}
+
+/**
+ * pvr_vm_mapping_tree_iter_next() - Locate the next VM mapping tree node
+ *                                   which overlaps with the specified range
+ * @node: Node to iterate from.
+ * @start: Start value of the iterable range.
+ * @size: Size of the iterable range.
+ *
+ * This function forms a wrapper around __pvr_vm_mapping_tree_iter_next(),
+ * which is generated by INTERVAL_TREE_DEFINE().
+ *
+ * Return:
+ *  * The subsequent node if one is found, or
+ *  * %NULL otherwise.
+ */
+static __always_inline struct pvr_vm_mapping_tree_node *
+pvr_vm_mapping_tree_iter_next(struct pvr_vm_mapping_tree_node *node,
+			      u64 start, u64 size)
+{
+	u64 last = pvr_vm_mapping_tree_compute_last(start, size);
+
+	/* This function is generated by INTERVAL_TREE_DEFINE(). */
+	return __pvr_vm_mapping_tree_iter_next(node, start, last);
+}
+
+/**
+ * pvr_vm_mapping_tree_init() - Initialize a mapping tree.
+ * @tree: Target VM mapping tree.
+ */
+static __always_inline void
+pvr_vm_mapping_tree_init(struct pvr_vm_mapping_tree *tree)
+{
+	tree->root = RB_ROOT_CACHED;
+}
+
+/**
+ * pvr_vm_mapping_tree_fini() - Teardown a mapping tree.
+ * @tree: Target VM mapping tree.
+ *
+ * It is an error to call this function on a non-empty mapping tree. Doing
+ * so is very likely to cause a memory leak. For this reason,
+ * pvr_vm_mapping_tree_fini() will emit a kernel warning for each entry found
+ * in the target tree before returning.
+ */
+static void
+pvr_vm_mapping_tree_fini(struct pvr_vm_mapping_tree *tree)
+{
+	struct pvr_vm_mapping_tree_node *node;
+
+	for_each_pvr_vm_mapping_tree_node(tree, node, 0, U64_MAX) {
+		WARN(true, "%s(%p) found [%llx,%llx]@%p", __func__, tree,
+		     pvr_vm_mapping_tree_node_start(node),
+		     pvr_vm_mapping_tree_node_last(node), node);
+	}
+}
+
+/**
+ * pvr_vm_mapping_tree_contains() - Check if any node in a mapping tree
+ *                                  overlaps with a specified range.
+ * @tree: Target VM mapping tree.
+ * @start: Start value of the search range.
+ * @size: Size of the search range.
+ *
+ * This function is just a call to pvr_vm_mapping_tree_iter_first() with the
+ * returned pointer coerced into a ``bool`` for convenience. It should always
+ * be inlined.
+ *
+ * Return:
+ *  * %true if any node in the target mapping tree overlaps with the range
+ *    specified by @start and @size, or
+ *  * %false otherwise.
+ */
+static __always_inline bool
+pvr_vm_mapping_tree_contains(struct pvr_vm_mapping_tree *tree, u64 start,
+			     u64 size)
+{
+	return pvr_vm_mapping_tree_iter_first(tree, start, size);
+}
+
+struct pvr_vm_mapping_tree_node;
+struct pvr_vm_mapping;
+
+static __always_inline struct pvr_vm_mapping *
+pvr_vm_mapping_from_node(struct pvr_vm_mapping_tree_node *node);
+static int
+pvr_vm_mapping_unmap(struct pvr_vm_context *vm_ctx, struct pvr_vm_mapping *mapping);
+static void
+pvr_vm_mapping_fini(struct pvr_vm_mapping *mapping);
+static __always_inline u64
+pvr_vm_mapping_start(struct pvr_vm_mapping *mapping);
+static __always_inline u64
+pvr_vm_mapping_last(struct pvr_vm_mapping *mapping);
+
+/**
+ * DOC: Memory context
+ *
+ * This is the "top level" datatype in the VM code. It's exposed in the public
+ * API as an opaque handle.
+ */
+
+/**
+ * struct pvr_vm_context - Context type which encapsulates an entire page table
+ *                         tree structure.
+ * @pvr_dev: The PowerVR device to which this context is bound.
+ *
+ *           This binding is immutable for the life of the context.
+ * @root_table: The root of the page table tree structure.
+ *
+ *              This embedded struct is our "mirror" version of the top level
+ *              page table. By definition, there can only be one of these. The
+ *              device requires this top level table to always exist, so there
+ *              is no need for it to be a pointer here.
+ * @mappings: An interval tree structure containing every currently
+ *            active mapping associated with this context.
+ * @lock: Global lock on this entire structure of page tables.
+ * @fw_mem_ctx_obj: Firmware object representing firmware memory context.
+ * @ref_count: Reference count of object.
+ * @enable_warnings: Emit warnings when tearing down memory mappings.
+ */
+struct pvr_vm_context {
+	struct pvr_device *pvr_dev;
+	struct pvr_page_table_l2 root_table;
+	struct pvr_vm_mapping_tree mappings;
+	struct mutex lock;
+	struct pvr_fw_object *fw_mem_ctx_obj;
+	struct kref ref_count;
+	bool enable_warnings;
+};
+
+/**
+ * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
+ *                                     page table structure behind a VM context.
+ * @vm_ctx: Target VM context.
+ */
+dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
+{
+	return vm_ctx->root_table.backing_page.dma_addr;
+}
+
+/**
+ * pvr_vm_context_init() - Initialize a VM context for the specified device.
+ * @vm_ctx: Target VM context.
+ * @pvr_dev: Target PowerVR device.
+ * @is_userspace_context: %true if this context is for userspace. This will
+ *                        create a firmware memory context for the VM context
+ *                        and disable warnings when tearing down mappings.
+ *
+ * Returns:
+ *  * 0 on success,
+ *  * -%ENOMEM on out of memory, or
+ *  * Any error returned by pvr_fw_mem_context_create().
+ */
+static int
+pvr_vm_context_init(struct pvr_vm_context *vm_ctx, struct pvr_device *pvr_dev,
+		    bool is_userspace_context)
+{
+	int err;
+
+	err = pvr_page_table_l2_init(&vm_ctx->root_table, pvr_dev);
+	if (err)
+		goto err_out;
+
+	pvr_vm_mapping_tree_init(&vm_ctx->mappings);
+
+	mutex_init(&vm_ctx->lock);
+
+	kref_init(&vm_ctx->ref_count);
+
+	vm_ctx->pvr_dev = pvr_dev;
+
+	if (is_userspace_context) {
+		err = pvr_fw_mem_context_create(pvr_dev, vm_ctx, &vm_ctx->fw_mem_ctx_obj);
+		if (err)
+			goto err_free;
+	} else {
+		vm_ctx->enable_warnings = true;
+	}
+
+	return 0;
+
+err_free:
+	pvr_vm_context_put(vm_ctx);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_vm_context_teardown_mappings() - Teardown any remaining mappings on this VM context
+ * @vm_ctx: Target VM context.
+ */
+static void
+pvr_vm_context_teardown_mappings(struct pvr_vm_context *vm_ctx)
+{
+	struct pvr_vm_mapping_tree_node *node;
+
+	/* Destroy any remaining mappings. */
+	mutex_lock(&vm_ctx->lock);
+
+	while ((node = pvr_vm_mapping_tree_iter_first(&vm_ctx->mappings, 0, U64_MAX)) != NULL) {
+		struct pvr_vm_mapping *mapping = pvr_vm_mapping_from_node(node);
+
+		WARN(vm_ctx->enable_warnings, "%s(%p) found [%llx,%llx]@%p", __func__, vm_ctx,
+		     pvr_vm_mapping_start(mapping), pvr_vm_mapping_last(mapping), mapping);
+		pvr_vm_mapping_unmap(vm_ctx, mapping);
+		pvr_vm_mapping_fini(mapping);
+		kfree(mapping);
+	}
+
+	mutex_unlock(&vm_ctx->lock);
+}
+
+/**
+ * pvr_vm_context_unmap_from_ptr() - Unmap pages from a memory context starting
+ *                                   from the entry addressed by a page table
+ *                                   pointer.
+ * @ptr: Page table pointer to the first page to unmap.
+ * @nr_pages: Number of pages to unmap.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error encountered while advancing @ptr with
+ *    pvr_page_table_ptr_next_page() (except -%ENXIO).
+ */
+static int
+pvr_vm_context_unmap_from_ptr(struct pvr_page_table_ptr *ptr, u64 nr_pages)
+{
+	u64 page;
+	int err;
+
+	if (nr_pages == 0)
+		return 0;
+
+	/*
+	 * Destroy first page outside loop, as it doesn't require a pointer
+	 * increment beforehand. If the L0 page table reference in @ptr is
+	 * %NULL, there cannot be a mapped page at @ptr (so skip ahead).
+	 */
+	if (ptr->l0_table)
+		pvr_page_destroy(ptr);
+
+	for (page = 1; page < nr_pages; ++page) {
+		err = pvr_page_table_ptr_next_page(ptr, false);
+		/*
+		 * If the page table tree structure at @ptr is incomplete,
+		 * skip ahead. We don't care about unmapping pages that
+		 * cannot exist.
+		 *
+		 * FIXME: This could be made more efficient by jumping ahead
+		 * using pvr_page_table_ptr_set().
+		 */
+		if (err == -ENXIO)
+			continue;
+
+		if (err)
+			goto err_out;
+
+		pvr_page_destroy(ptr);
+	}
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_vm_context_unmap() - Unmap pages from a memory context.
+ * @vm_ctx: Target memory context.
+ * @device_addr: First device-virtual address to unmap.
+ * @nr_pages: Number of pages to unmap.
+ *
+ * The total amount of device-virtual memory unmapped by pvr_vm_context_unmap()
+ * is @nr_pages * %PVR_DEVICE_PAGE_SIZE.
+ */
+static int
+pvr_vm_context_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr,
+		     u64 nr_pages)
+{
+	struct pvr_page_table_ptr ptr;
+	int err;
+
+	err = pvr_page_table_ptr_init(&ptr, vm_ctx->pvr_dev,
+				      &vm_ctx->root_table, device_addr, false);
+	if (err && err != -ENXIO)
+		goto err_out;
+
+	err = pvr_vm_context_unmap_from_ptr(&ptr, nr_pages);
+	if (err)
+		goto err_ptr_fini;
+
+	err = 0;
+	goto err_ptr_fini;
+
+err_ptr_fini:
+	pvr_page_table_ptr_fini(&ptr);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_vm_context_map_direct() - Map pages to a memory context starting from
+ *                               the entry addressed by a page table pointer.
+ * @vm_ctx: Target memory context.
+ * @dma_addr: Initial DMA address of the memory to be mapped.
+ * @size: Size of mapping.
+ * @ptr: Page table pointer to the first page to map.
+ * @flags: Flags to be set on every device-virtual page created.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * Any error encountered while creating a page with pvr_page_create(), or
+ *  * Any error encountered while advancing @ptr with
+ *    pvr_page_table_ptr_next_page().
+ */
+static int
+pvr_vm_context_map_direct(struct pvr_vm_context *vm_ctx, dma_addr_t dma_addr,
+			  u64 size, struct pvr_page_table_ptr *ptr,
+			  struct pvr_page_flags_raw flags)
+{
+	unsigned int pages = size >> PVR_DEVICE_PAGE_SHIFT;
+	unsigned int page;
+
+	struct pvr_page_table_ptr ptr_copy;
+
+	int err;
+
+	/*
+	 * Before progressing, save a copy of the start pointer so we can use
+	 * it again if we enter an error state and have to destroy pages.
+	 */
+	pvr_page_table_ptr_copy(&ptr_copy, ptr);
+
+	/*
+	 * Create first page outside loop, as it doesn't require a pointer
+	 * increment beforehand.
+	 */
+	err = pvr_page_create(ptr, dma_addr, flags);
+	if (err)
+		goto err_fini_ptr_copy;
+
+	for (page = 1; page < pages; ++page) {
+		err = pvr_page_table_ptr_next_page(ptr, true);
+		if (err)
+			goto err_destroy_pages;
+
+		dma_addr += PVR_DEVICE_PAGE_SIZE;
+
+		err = pvr_page_create(ptr, dma_addr, flags);
+		if (err)
+			goto err_destroy_pages;
+	}
+
+	err = 0;
+	goto err_fini_ptr_copy;
+
+err_destroy_pages:
+	err = pvr_vm_context_unmap_from_ptr(&ptr_copy, page);
+
+err_fini_ptr_copy:
+	pvr_page_table_ptr_fini(&ptr_copy);
+
+	return err;
+}
+
+/**
+ * pvr_vm_context_map_sgl() - Map part of a scatter-gather table entry to device-virtual memory.
+ *
+ * @vm_ctx: Target VM context.
+ * @sgl: Target scatter-gather table entry.
+ * @offset: Offset into @sgl to map from. Must result in a starting address
+ *          from @sgl which is CPU page-aligned.
+ * @size: Size of the memory to be mapped in bytes. Must be a non-zero multiple
+ *        of the device page size.
+ * @ptr: Page table pointer which points to the first page that should be
+ *       mapped to. This will point to the last page mapped to on return.
+ * @page_flags: Page options to be applied to every device-virtual memory page
+ *              in the created mapping.
+ *
+ * If you need to map all of @sgl, use pvr_vm_context_map_sgl() instead which
+ * derives the values of @offset and @size from @sgl directly.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%EINVAL if the range specified by @offset and @size is not completely
+ *    within @sgl, or
+ *  * Any error returned by pvr_vm_context_map_direct().
+ */
+static int
+pvr_vm_context_map_sgl(struct pvr_vm_context *vm_ctx,
+		       struct scatterlist *sgl, u64 offset, u64 size,
+		       struct pvr_page_table_ptr *ptr,
+		       struct pvr_page_flags_raw page_flags)
+{
+	dma_addr_t dma_addr = sg_dma_address(sgl);
+	unsigned int dma_len = sg_dma_len(sgl);
+
+	if (size > dma_len || offset > dma_len - size)
+		return -EINVAL;
+
+	return pvr_vm_context_map_direct(vm_ctx, dma_addr + offset, size, ptr,
+					 page_flags);
+}
+
+/**
+ * pvr_vm_context_map_sgt() - Map part of a scatter-gather table into device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @sgt: Target scatter-gather table.
+ * @sgt_offset: Offset into @sgt to map from. Must result in a starting
+ * address from @sgt which is CPU page-aligned.
+ * @device_addr: Virtual device address to map to. Must be device page-aligned.
+ * @size: Size of memory to be mapped in bytes. Must be a non-zero multiple
+ * of the device page size.
+ * @page_flags: Page options to be applied to every device-virtual memory page
+ * in the created mapping.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * ...
+ */
+static int
+pvr_vm_context_map_sgt(struct pvr_vm_context *vm_ctx,
+		       struct sg_table *sgt, u64 sgt_offset,
+		       u64 device_addr, u64 size,
+		       struct pvr_page_flags_raw page_flags)
+{
+	struct pvr_page_table_ptr ptr, ptr_copy;
+	struct scatterlist *sgl;
+	u64 mapped_size = 0;
+	unsigned int count;
+	int err;
+
+	if (!size)
+		return 0;
+
+	if ((sgt_offset | size) & ~PVR_DEVICE_PAGE_MASK)
+		return -EINVAL;
+
+	err = pvr_page_table_ptr_init(&ptr, vm_ctx->pvr_dev,
+				      &vm_ctx->root_table, device_addr, true);
+	if (err)
+		return -EINVAL;
+
+	pvr_page_table_ptr_copy(&ptr_copy, &ptr);
+
+	for_each_sgtable_dma_sg(sgt, sgl, count) {
+		size_t sgl_len = sg_dma_len(sgl);
+		u64 sgl_offset, map_sgl_len;
+
+		if (sgl_len <= sgt_offset) {
+			sgt_offset -= sgl_len;
+			continue;
+		}
+
+		sgl_offset = sgt_offset;
+		map_sgl_len = min_t(u64, sgl_len - sgl_offset, size);
+
+		err = pvr_vm_context_map_sgl(vm_ctx, sgl,
+					     sgl_offset,
+					     map_sgl_len, &ptr,
+					     page_flags);
+		if (err)
+			break;
+
+
+		/*
+		 * Flag the L0 page table as requiring a flush when the page
+		 * table pointer is destroyed.
+		 */
+		pvr_page_table_ptr_require_sync(&ptr, 0);
+
+		sgt_offset = 0;
+		mapped_size += map_sgl_len;
+
+		if (mapped_size >= size)
+			break;
+
+		err = pvr_page_table_ptr_next_page(&ptr, true);
+		if (err)
+			break;
+	}
+
+	if (err && mapped_size) {
+		pvr_vm_context_unmap_from_ptr(&ptr_copy,
+					      mapped_size >> PVR_DEVICE_PAGE_SHIFT);
+	}
+
+	pvr_page_table_ptr_fini(&ptr_copy);
+	pvr_page_table_ptr_fini(&ptr);
+	return err;
+}
+
+/**
+ * DOC: Memory mappings
+ */
+
+/**
+ * struct pvr_vm_mapping - Represents a mapping between a DMA address and a
+ *                         device-virtual address with a given size.
+ * @node: Base VM mapping tree node.
+ * @pvr_obj: Target PowerVR GEM object.
+ * @pvr_obj_offset: Offset into @pvr_obj from which this mapping begins.
+ * @slc_bypass: Whether to bypass the SLC on the device-side of this mapping.
+ * @pm_fw_protect: Whether this mapping should be restricted to the PM and FW.
+ *
+ * This structure implicitly contains the device-virtual address and size of
+ * the mapping through @node members &pvr_vm_mapping_tree_node.start and
+ * &pvr_vm_mapping_tree_node.size respectively.
+ *
+ * Instantiating this struct does not implicitly apply the described mapping;
+ * that must be done with pvr_vm_mapping_map() and reversed with
+ * pvr_vm_mapping_unmap() before deinitialization.
+ *
+ * See &struct pvr_page_flags_raw for details of the flag values contained in
+ * this struct.
+ */
+struct pvr_vm_mapping {
+	struct pvr_vm_mapping_tree_node node;
+	struct pvr_gem_object *pvr_obj;
+	unsigned int pvr_obj_offset;
+
+	bool slc_bypass;
+	bool pm_fw_protect;
+};
+
+static __always_inline struct pvr_vm_mapping *
+pvr_vm_mapping_from_node(struct pvr_vm_mapping_tree_node *node)
+{
+	return container_of(node, struct pvr_vm_mapping, node);
+}
+
+static __always_inline u64
+pvr_vm_mapping_start(struct pvr_vm_mapping *mapping)
+{
+	return pvr_vm_mapping_tree_node_start(&mapping->node);
+}
+
+static __always_inline u64
+pvr_vm_mapping_size(struct pvr_vm_mapping *mapping)
+{
+	return pvr_vm_mapping_tree_node_size(&mapping->node);
+}
+
+static __always_inline u64
+pvr_vm_mapping_last(struct pvr_vm_mapping *mapping)
+{
+	return pvr_vm_mapping_tree_node_last(&mapping->node);
+}
+
+/**
+ * pvr_vm_mapping_tree_insert() - Insert a mapping into a VM mapping tree
+ * @tree: Target VM mapping tree.
+ * @mapping: Mapping to be inserted.
+ *
+ * This function forms a wrapper around __pvr_vm_mapping_tree_insert(), which
+ * is generated by INTERVAL_TREE_DEFINE().
+ */
+static __always_inline void
+pvr_vm_mapping_tree_insert(struct pvr_vm_mapping_tree *tree,
+			   struct pvr_vm_mapping *mapping)
+{
+	struct pvr_vm_mapping_tree_node *node = &mapping->node;
+
+	WARN(pvr_vm_mapping_tree_node_is_inserted(node),
+	     "%s(%p,%p) called on a node which is already in a tree", __func__,
+	     tree, node);
+
+	/* This function is generated by INTERVAL_TREE_DEFINE(). */
+	__pvr_vm_mapping_tree_insert(node, &tree->root);
+}
+
+/**
+ * pvr_vm_mapping_tree_remove() - Remove a node from a VM mapping tree
+ * @tree: Target VM mapping tree.
+ * @mapping: Mapping to be removed.
+ *
+ * This function forms a wrapper around __pvr_vm_mapping_tree_remove(), which
+ * is generated by INTERVAL_TREE_DEFINE().
+ */
+static __always_inline void
+pvr_vm_mapping_tree_remove(struct pvr_vm_mapping_tree *tree,
+			   struct pvr_vm_mapping *mapping)
+{
+	struct pvr_vm_mapping_tree_node *node = &mapping->node;
+
+	WARN(!pvr_vm_mapping_tree_node_is_inserted(node),
+	     "%s(%p,%p) called on a node which is not in a tree", __func__,
+	     tree, node);
+
+	/* This function is generated by INTERVAL_TREE_DEFINE(). */
+	__pvr_vm_mapping_tree_remove(node, &tree->root);
+
+	pvr_vm_mapping_tree_node_mark_removed(node);
+}
+
+/**
+ * pvr_vm_mapping_page_flags_raw() - Generate raw page flags required for
+ *                                   applying a mapping.
+ * @mapping: Target memory mapping.
+ *
+ * Return:
+ * A raw page flags instance for use with pvr_vm_context_map_sgt().
+ */
+static struct pvr_page_flags_raw
+pvr_vm_mapping_page_flags_raw(struct pvr_vm_mapping *mapping)
+{
+	/*
+	 * FIXME: There is currently no way to mark a mapping as read-only or
+	 * cache-coherent. Should there be?
+	 */
+	return pvr_page_flags_raw_create(false, false, mapping->slc_bypass,
+					 mapping->pm_fw_protect);
+}
+
+/**
+ * pvr_vm_mapping_init() - Setup a mapping with the specified parameters.
+ * @mapping: Target memory mapping.
+ * @device_addr: Device-virtual address at the start of the mapping.
+ * @size: Size of the desired mapping.
+ * @pvr_obj: Target PowerVR memory object.
+ * @pvr_obj_offset: Offset into @pvr_obj to begin mapping from.
+ *
+ * The memory behind @mapping should be zeroed before calling this function.
+ *
+ * Some parameters of this function are unchecked. It is therefore the callers
+ * responsibility to ensure certain constraints are met. Specifically:
+ *
+ * * @pvr_obj_offset must be less than the size of @pvr_obj,
+ * * The sum of @pvr_obj_offset and @size must be less than or equal to the
+ *   size of @pvr_obj,
+ * * The range specified by @pvr_obj_offset and @size (the "CPU range") must be
+ *   CPU page-aligned both in start position and size, and
+ * * The range specified by @device_addr and @size (the "device range") must be
+ *   device page-aligned both in start position and size.
+ *
+ * Note that this function does not perform the mapping operation itself; it
+ * merely prepares an instance of &struct pvr_vm_mapping which can later be
+ * passed to pvr_vm_mapping_map() and used to track the status of the mapping.
+ * In fact, the returned &struct pvr_vm_mapping is not bound to a VM context
+ * until pvr_vm_mapping_map() is called on it.
+ *
+ * If you need to map the entirety of @pvr_obj, consider using
+ * pvr_vm_mapping_init() instead (although there is no performance benefit in
+ * doing so).
+ */
+static void
+pvr_vm_mapping_init(struct pvr_vm_mapping *mapping, u64 device_addr,
+		    u64 size, struct pvr_gem_object *pvr_obj,
+		    u64 pvr_obj_offset)
+{
+	u64 flags = pvr_obj->flags;
+
+	/*
+	 * Increment the refcount on the underlying physical memory resource
+	 * to prevent de-allocation while the mapping exists.
+	 */
+	pvr_gem_object_get(pvr_obj);
+
+	mapping->pvr_obj = pvr_obj;
+	mapping->pvr_obj_offset = pvr_obj_offset;
+
+	mapping->slc_bypass = flags & DRM_PVR_BO_DEVICE_BYPASS_CACHE;
+	mapping->pm_fw_protect = flags & DRM_PVR_BO_DEVICE_PM_FW_PROTECT;
+
+	pvr_vm_mapping_tree_node_init(&mapping->node, device_addr, size);
+}
+
+/**
+ * pvr_vm_mapping_fini() - Teardown a mapping.
+ * @mapping: Target memory mapping.
+ *
+ * This function may not be called on a mapping which is currently active. The
+ * caller must call pvr_vm_mapping_unmap() on @mapping (or otherwise ensure
+ * @mapping is not currently mapped) before calling this function.
+ */
+static void
+pvr_vm_mapping_fini(struct pvr_vm_mapping *mapping)
+{
+	pvr_vm_mapping_tree_node_fini(&mapping->node);
+
+	pvr_gem_object_put(mapping->pvr_obj);
+}
+
+/**
+ * pvr_vm_mapping_map() - Insert a mapping into a memory context.
+ * @vm_ctx: Target VM context.
+ * @mapping: Target memory mapping.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%EEXIST if @mapping overlaps with an existing mapping in @vm_ctx,
+ *  * Any error encountered while attempting to obtain a reference to the
+ *    buffer bound to @mapping (see pvr_gem_object_get_pages()), or
+ *  * Any error returned by pvr_vm_context_map_sgt().
+ */
+static int
+pvr_vm_mapping_map(struct pvr_vm_context *vm_ctx,
+		   struct pvr_vm_mapping *mapping)
+{
+	int err;
+
+	if (!pvr_gem_object_is_imported(mapping->pvr_obj)) {
+		err = pvr_gem_object_get_pages(mapping->pvr_obj);
+		if (err)
+			return err;
+	}
+
+	err = pvr_vm_context_map_sgt(vm_ctx, mapping->pvr_obj->sgt,
+				     mapping->pvr_obj_offset,
+				     pvr_vm_mapping_start(mapping),
+				     pvr_vm_mapping_size(mapping),
+				     pvr_vm_mapping_page_flags_raw(mapping));
+	if (err)
+		goto err_put_pages;
+
+	pvr_vm_mapping_tree_insert(&vm_ctx->mappings, mapping);
+
+	return 0;
+
+err_put_pages:
+	if (!pvr_gem_object_is_imported(mapping->pvr_obj))
+		pvr_gem_object_put_pages(mapping->pvr_obj);
+
+	return err;
+}
+
+/**
+ * pvr_vm_mapping_unmap() - Remove a mapping from a memory context.
+ * @vm_ctx: Target VM context.
+ * @mapping: Target memory mapping.
+ *
+ * Return:
+ *  * 0 on success, or
+ *  * Any error returned by pvr_vm_context_unmap().
+ */
+static int
+pvr_vm_mapping_unmap(struct pvr_vm_context *vm_ctx,
+		     struct pvr_vm_mapping *mapping)
+{
+	int err;
+
+	pvr_vm_mapping_tree_remove(&vm_ctx->mappings, mapping);
+
+	err = pvr_vm_context_unmap(vm_ctx, pvr_vm_mapping_start(mapping),
+				   pvr_vm_mapping_size(mapping) >> PVR_DEVICE_PAGE_SHIFT);
+	if (err)
+		goto err_out;
+
+	if (!pvr_gem_object_is_imported(mapping->pvr_obj))
+		pvr_gem_object_put_pages(mapping->pvr_obj);
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+/*
+ * Public API
+ *
+ * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
+ */
+
+/**
+ * pvr_device_addr_is_valid() - Tests whether a device-virtual address
+ *                              is valid.
+ * @device_addr: Virtual device address to test.
+ *
+ * Return:
+ *  * %true if @device_addr is within the valid range for a device page
+ *    table and is aligned to the device page size, or
+ *  * %false otherwise.
+ */
+bool
+pvr_device_addr_is_valid(u64 device_addr)
+{
+	return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
+	       (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
+}
+
+/**
+ * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
+ * address and associated size are both valid.
+ * @device_addr: Virtual device address to test.
+ * @size: Size of the range based at @device_addr to test.
+ *
+ * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
+ * @device_addr + @size) to verify a device-virtual address range initially
+ * seems intuitive, but it produces a false-negative when the address range
+ * is right at the end of device-virtual address space.
+ *
+ * This function catches that corner case, as well as checking that
+ * @size is non-zero.
+ *
+ * Return:
+ *  * %true if @device_addr is device page aligned; @size is device page
+ *    aligned; the range specified by @device_addr and @size is within the
+ *    bounds of the device-virtual address space, and @size is non-zero, or
+ *  * %false otherwise.
+ */
+bool
+pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size)
+{
+	return pvr_device_addr_is_valid(device_addr) &&
+	       size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
+	       (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
+}
+
+/**
+ * pvr_vm_create_context() - Create a new VM context.
+ * @pvr_dev: Target PowerVR device.
+ * @is_userspace_context: %true if this context is for userspace. This will
+ *                        create a firmware memory context for the VM context
+ *                        and disable warnings when tearing down mappings.
+ *
+ * Return:
+ *  * A handle to the newly-minted VM context on success,
+ *  * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
+ *    missing or has an unsupported value,
+ *  * -%ENOMEM if allocation of the structure behind the opaque handle fails,
+ *    or
+ *  * Any error encountered while setting up internal structures.
+ */
+struct pvr_vm_context *
+pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+
+	struct pvr_vm_context *vm_ctx;
+	u16 device_addr_bits;
+
+	int err;
+
+	err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
+				&device_addr_bits);
+	if (err) {
+		drm_err(drm_dev,
+			"Failed to get device virtual address space bits\n");
+		goto err_out;
+	}
+
+	if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
+		drm_err(drm_dev,
+			"Device has unsupported virtual address space size\n");
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
+	if (!vm_ctx) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	err = pvr_vm_context_init(vm_ctx, pvr_dev, is_userspace_context);
+	if (err)
+		goto err_free_vm_ctx;
+
+	return vm_ctx;
+
+err_free_vm_ctx:
+	kfree(vm_ctx);
+
+err_out:
+	return ERR_PTR(err);
+}
+
+/**
+ * pvr_vm_context_fini() - Teardown a VM context.
+ *
+ * This function ensures that no mappings are left dangling by unmapping them
+ * all in order of ascending device-virtual address.
+ */
+static void
+pvr_vm_context_fini(struct kref *ref_count)
+{
+	struct pvr_vm_context *vm_ctx =
+		container_of(ref_count, struct pvr_vm_context, ref_count);
+
+	if (vm_ctx->fw_mem_ctx_obj)
+		pvr_fw_mem_context_destroy(vm_ctx->fw_mem_ctx_obj);
+
+	pvr_vm_context_teardown_mappings(vm_ctx);
+	mutex_destroy(&vm_ctx->lock);
+	pvr_vm_mapping_tree_fini(&vm_ctx->mappings);
+	pvr_page_table_l2_fini(&vm_ctx->root_table);
+
+	kfree(vm_ctx);
+}
+
+/**
+ * pvr_vm_context_lookup() - Look up VM context from handle
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ *
+ * Takes reference on VM context object. Call pvr_vm_context_put() to release.
+ *
+ * Returns:
+ *  * The requested object on success, or
+ *  * %NULL on failure (object does not exist in list, or is not a VM context)
+ */
+struct pvr_vm_context *
+pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+	struct pvr_vm_context *vm_ctx;
+
+	xa_lock(&pvr_file->vm_ctx_handles);
+	vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
+	if (vm_ctx)
+		kref_get(&vm_ctx->ref_count);
+
+	xa_unlock(&pvr_file->vm_ctx_handles);
+
+	return vm_ctx;
+}
+
+/**
+ * pvr_vm_context_put() - Release a reference on a VM context
+ * @vm_ctx: Target VM context.
+ *
+ * Returns:
+ *  * %true if the VM context was destroyed, or
+ *  * %false if there are any references still remaining.
+ */
+bool
+pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
+{
+	WARN_ON(!vm_ctx);
+
+	if (vm_ctx)
+		return kref_put(&vm_ctx->ref_count, pvr_vm_context_fini);
+
+	return true;
+}
+
+/**
+ * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
+ * given file.
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all vm_contexts associated with @pvr_file from the device VM context
+ * list and drops initial references. vm_contexts will then be destroyed once
+ * all outstanding references are dropped.
+ */
+void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
+{
+	struct pvr_vm_context *vm_ctx;
+	unsigned long handle;
+
+	xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
+		/* vm_ctx is not used here because that would create a race with xa_erase */
+		pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
+	}
+}
+
+static int
+pvr_vm_map_mapping_locked(struct pvr_vm_context *vm_ctx,
+			  struct pvr_vm_mapping *mapping)
+{
+	u64 device_addr = pvr_vm_mapping_start(mapping);
+	u64 size = pvr_vm_mapping_size(mapping);
+
+	int err;
+
+	lockdep_assert_held(&vm_ctx->lock);
+
+	/*
+	 * Check that the requested mapping range does not overlap with an
+	 * existing mapping.
+	 */
+	if (pvr_vm_mapping_tree_contains(&vm_ctx->mappings, device_addr,
+					 size)) {
+		err = -EEXIST;
+		goto err_out;
+	}
+
+	err = pvr_vm_mapping_map(vm_ctx, mapping);
+	if (err)
+		goto err_out;
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_vm_map() - Map a section of physical memory into a section of device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
+ * @pvr_obj_offset: Offset into @pvr_obj to map from.
+ * @device_addr: Virtual device address at the start of the requested mapping.
+ * @size: Size of the requested mapping.
+ *
+ * If you need to map an entire @pvr_obj, use pvr_vm_map() instead.
+ *
+ * No handle is returned to represent the mapping. Instead, callers should
+ * remember @device_addr and use that as a handle.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
+ *    address; the region specified by @pvr_obj_offset and @size does not fall
+ *    entirely within @pvr_obj, or any part of the specified region of @pvr_obj
+ *    is not device-virtual page-aligned,
+ *  * -%EEXIST if the requested mapping overlaps with an existing mapping,
+ *  * -%ENOMEM if allocation of internally required CPU memory fails, or
+ *  * Any error encountered while performing internal operations required to
+ *    create the mapping.
+ */
+int
+pvr_vm_map(struct pvr_vm_context *vm_ctx,
+	   struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
+	   u64 device_addr, u64 size)
+{
+	size_t pvr_obj_size = pvr_gem_object_size(pvr_obj);
+
+	struct pvr_vm_mapping *mapping;
+	int err;
+
+	if (!pvr_device_addr_and_size_are_valid(device_addr, size) ||
+	    pvr_obj_offset & ~PAGE_MASK || size & ~PAGE_MASK ||
+	    pvr_obj_offset + size > pvr_obj_size ||
+	    pvr_obj_offset > pvr_obj_size) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+	if (!mapping) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	mutex_lock(&vm_ctx->lock);
+
+	pvr_vm_mapping_init(mapping, device_addr, size, pvr_obj, pvr_obj_offset);
+
+	err = pvr_vm_map_mapping_locked(vm_ctx, mapping);
+	if (err)
+		goto err_fini_mapping;
+
+	err = 0;
+	goto err_unlock;
+
+err_fini_mapping:
+	pvr_vm_mapping_fini(mapping);
+	kfree(mapping);
+
+err_unlock:
+	mutex_unlock(&vm_ctx->lock);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ *
+ * Return:
+ *  * 0 on success,
+ *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
+ *    address,
+ *  * -%ENOENT if @device_addr is not a handle to an existing mapping, or
+ *  * Any error encountered while performing internal operations required to
+ *    destroy the mapping.
+ */
+int
+pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr)
+{
+	struct pvr_vm_mapping_tree_node *node;
+	struct pvr_vm_mapping *mapping;
+	int err;
+
+	if (!pvr_device_addr_is_valid(device_addr)) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	mutex_lock(&vm_ctx->lock);
+
+	node = pvr_vm_mapping_tree_iter_first(&vm_ctx->mappings, device_addr, 0);
+	if (!node) {
+		err = -ENOENT;
+		goto err_unlock;
+	}
+
+	mapping = pvr_vm_mapping_from_node(node);
+	if (pvr_vm_mapping_start(mapping) != device_addr) {
+		err = -ENOENT;
+		goto err_unlock;
+	}
+
+	err = pvr_vm_mapping_unmap(vm_ctx, mapping);
+	if (err)
+		goto err_unlock;
+
+	pvr_vm_mapping_fini(mapping);
+	kfree(mapping);
+
+	err = 0;
+	goto err_unlock;
+
+err_unlock:
+	mutex_unlock(&vm_ctx->lock);
+
+err_out:
+	return err;
+}
+
+/*
+ * Static data areas are determined by firmware.
+ *
+ * When adding a new static data area you will also need to update the reserved_size field for the
+ * heap in pvr_heaps[].
+ */
+static const struct drm_pvr_static_data_area static_data_areas[] = {
+	{
+		.area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
+		.location_heap_id = DRM_PVR_HEAP_GENERAL,
+		.offset = 0,
+		.size = 128,
+	},
+	{
+		.area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
+		.location_heap_id = DRM_PVR_HEAP_GENERAL,
+		.offset = 128,
+		.size = 1024,
+	},
+	{
+		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
+		.offset = 0,
+		.size = 128,
+	},
+	{
+		.area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
+		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
+		.offset = 128,
+		.size = 128,
+	},
+	{
+		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+		.location_heap_id = DRM_PVR_HEAP_USC_CODE,
+		.offset = 0,
+		.size = 128,
+	},
+};
+
+#define GET_RESERVED_SIZE(last_offset, last_size) round_up(last_offset + last_size, PAGE_SIZE)
+
+/*
+ * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
+ * static data area for each heap.
+ */
+static const struct drm_pvr_heap pvr_heaps[] = {
+	[DRM_PVR_HEAP_GENERAL] = {
+		.base = ROGUE_GENERAL_HEAP_BASE,
+		.size = ROGUE_GENERAL_HEAP_SIZE,
+		.flags = 0,
+		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+	},
+	[DRM_PVR_HEAP_PDS_CODE_DATA] = {
+		.base = ROGUE_PDSCODEDATA_HEAP_BASE,
+		.size = ROGUE_PDSCODEDATA_HEAP_SIZE,
+		.flags = 0,
+		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+	},
+	[DRM_PVR_HEAP_USC_CODE] = {
+		.base = ROGUE_USCCODE_HEAP_BASE,
+		.size = ROGUE_USCCODE_HEAP_SIZE,
+		.flags = 0,
+		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+	},
+	[DRM_PVR_HEAP_RGNHDR] = {
+		.base = ROGUE_RGNHDR_HEAP_BASE,
+		.size = ROGUE_RGNHDR_HEAP_SIZE,
+		.flags = 0,
+		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+	},
+	[DRM_PVR_HEAP_VIS_TEST] = {
+		.base = ROGUE_VISTEST_HEAP_BASE,
+		.size = ROGUE_VISTEST_HEAP_SIZE,
+		.flags = 0,
+		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+	},
+	[DRM_PVR_HEAP_TRANSFER_FRAG] = {
+		.base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
+		.size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
+		.flags = 0,
+		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+	},
+};
+
+int
+pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
+			  struct drm_pvr_ioctl_dev_query_args *args)
+{
+	struct drm_pvr_dev_query_static_data_areas query = {0};
+	int err;
+
+	if (!args->pointer) {
+		args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
+		return 0;
+	}
+
+	err = PVR_UOBJ_GET(query, args->size, args->pointer);
+	if (err < 0)
+		return err;
+
+	if (!query.static_data_areas.array) {
+		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
+		query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
+		goto copy_out;
+	}
+
+	if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
+		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
+
+	err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
+	if (err < 0)
+		return err;
+
+copy_out:
+	err = PVR_UOBJ_SET(args->pointer, args->size, query);
+	if (err < 0)
+		return err;
+
+	args->size = sizeof(query);
+	return 0;
+}
+
+int
+pvr_heap_info_get(const struct pvr_device *pvr_dev,
+		  struct drm_pvr_ioctl_dev_query_args *args)
+{
+	struct drm_pvr_dev_query_heap_info query = {0};
+	u64 dest;
+	int err;
+
+	if (!args->pointer) {
+		args->size = sizeof(struct drm_pvr_dev_query_heap_info);
+		return 0;
+	}
+
+	err = PVR_UOBJ_GET(query, args->size, args->pointer);
+	if (err < 0)
+		return err;
+
+	if (!query.heaps.array) {
+		query.heaps.count = ARRAY_SIZE(pvr_heaps);
+		query.heaps.stride = sizeof(struct drm_pvr_heap);
+		goto copy_out;
+	}
+
+	if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
+		query.heaps.count = ARRAY_SIZE(pvr_heaps);
+
+	/* Region header heap is only present if BRN63142 is present. */
+	dest = query.heaps.array;
+	for (size_t i = 0; i < query.heaps.count; i++) {
+		struct drm_pvr_heap heap = pvr_heaps[i];
+
+		if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
+			heap.size = 0;
+
+		err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
+		if (err < 0)
+			return err;
+
+		dest += query.heaps.stride;
+	}
+
+copy_out:
+	err = PVR_UOBJ_SET(args->pointer, args->size, query);
+	if (err < 0)
+		return err;
+
+	args->size = sizeof(query);
+	return 0;
+}
+
+/**
+ * pvr_heap_contains_range() - Determine if a given heap contains the specified
+ *                             device-virtual address range.
+ * @pvr_heap: Target heap.
+ * @start: Inclusive start of the target range.
+ * @end: Inclusive end of the target range.
+ *
+ * It is an error to call this function with values of @start and @end that do
+ * not satisfy the condition @start <= @end.
+ */
+static __always_inline bool
+pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
+{
+	return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
+}
+
+/**
+ * pvr_find_heap_containing() - Find a heap which contains the specified
+ *                              device-virtual address range.
+ * @pvr_dev: Target PowerVR device.
+ * @start: Start of the target range.
+ * @size: Size of the target range.
+ *
+ * Return:
+ *  * A pointer to a constant instance of struct drm_pvr_heap representing the
+ *    heap containing the entire range specified by @start and @size on
+ *    success, or
+ *  * %NULL if no such heap exists.
+ */
+const struct drm_pvr_heap *
+pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
+{
+	u64 end;
+
+	if (check_add_overflow(start, size - 1, &end))
+		return NULL;
+
+	/*
+	 * There are no guarantees about the order of address ranges in
+	 * &pvr_heaps, so iterate over the entire array for a heap whose
+	 * range completely encompasses the given range.
+	 */
+	for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
+		/* Filter heaps that present only with an associated quirk */
+		if (heap_id == DRM_PVR_HEAP_RGNHDR &&
+		    !PVR_HAS_QUIRK(pvr_dev, 63142)) {
+			continue;
+		}
+
+		if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
+			return &pvr_heaps[heap_id];
+	}
+
+	return NULL;
+}
+
+/**
+ * pvr_vm_find_gem_object() - Look up a buffer object from a given
+ *                            device-virtual address.
+ * @vm_ctx: [IN] Target VM context.
+ * @device_addr: [IN] Virtual device address at the start of the required
+ *               object.
+ * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
+ *                     of the mapped region within the buffer object. May be
+ *                     %NULL if this information is not required.
+ * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
+ *                   region. May be %NULL if this information is not required.
+ *
+ * If successful, a reference will be taken on the buffer object. The caller
+ * must drop the reference with pvr_gem_object_put().
+ *
+ * Return:
+ *  * The PowerVR buffer object mapped at @device_addr if one exists, or
+ *  * %NULL otherwise.
+ */
+struct pvr_gem_object *
+pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
+		       u64 *mapped_offset_out, u64 *mapped_size_out)
+{
+	struct pvr_vm_mapping_tree_node *node;
+	struct pvr_vm_mapping *mapping;
+	struct pvr_gem_object *pvr_obj;
+
+	mutex_lock(&vm_ctx->lock);
+
+	node = pvr_vm_mapping_tree_iter_first(&vm_ctx->mappings, device_addr, 0);
+	if (!node)
+		goto err_unlock;
+
+	mapping = pvr_vm_mapping_from_node(node);
+	if (pvr_vm_mapping_start(mapping) != device_addr)
+		goto err_unlock;
+
+	pvr_obj = mapping->pvr_obj;
+	if (WARN_ON(!pvr_obj))
+		goto err_unlock;
+
+	pvr_gem_object_get(pvr_obj);
+
+	if (mapped_offset_out)
+		*mapped_offset_out = mapping->pvr_obj_offset;
+	if (mapped_size_out)
+		*mapped_size_out = pvr_vm_mapping_size(mapping);
+
+	mutex_unlock(&vm_ctx->lock);
+
+	return pvr_obj;
+
+err_unlock:
+	mutex_unlock(&vm_ctx->lock);
+
+	return NULL;
+}
+
+/**
+ * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
+ * @vm_ctx: Target VM context.
+ *
+ * Returns:
+ *  * FW object representing firmware memory context, or
+ *  * %NULL if this VM context does not have a firmware memory context.
+ */
+struct pvr_fw_object *
+pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
+{
+	return vm_ctx->fw_mem_ctx_obj;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
new file mode 100644
index 000000000000..8ead8104ed8e
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -0,0 +1,99 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_VM_H__
+#define __PVR_VM_H__
+
+#include "pvr_rogue_mmu_defs.h"
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/* Forward declaration from "pvr_device.h" */
+struct pvr_device;
+struct pvr_file;
+
+/* Forward declaration from "pvr_gem.h" */
+struct pvr_gem_object;
+
+/* Forward declaration from "pvr_vm.c" */
+struct pvr_vm_context;
+
+/* Forward declaration from <uapi/drm/pvr_drm.h> */
+struct drm_pvr_ioctl_get_heap_info_args;
+
+/**
+ * DOC: Public API (constants)
+ *
+ * .. c:macro:: PVR_DEVICE_PAGE_SIZE
+ *
+ *    Fixed page size referenced by leaf nodes in the page table tree
+ *    structure. In the current implementation, this value is pegged to the
+ *    CPU page size (%PAGE_SIZE). It is therefore an error to specify a CPU
+ *    page size which is not also a supported device page size. The supported
+ *    device page sizes are: 4KiB, 16KiB, 64KiB, 256KiB, 1MiB and 2MiB.
+ *
+ * .. c:macro:: PVR_DEVICE_PAGE_SHIFT
+ *
+ *    Shift value used to efficiently multiply or divide by
+ *    %PVR_DEVICE_PAGE_SIZE.
+ *
+ *    This value is derived from %PVR_DEVICE_PAGE_SIZE.
+ *
+ * .. c:macro:: PVR_DEVICE_PAGE_MASK
+ *
+ *    Mask used to round a value down to the nearest multiple of
+ *    %PVR_DEVICE_PAGE_SIZE. When bitwise negated, it will indicate whether a
+ *    value is already a multiple of %PVR_DEVICE_PAGE_SIZE.
+ *
+ *    This value is derived from %PVR_DEVICE_PAGE_SIZE.
+ */
+
+#define PVR_SHIFT_FROM_SIZE(size_) (__builtin_ctzll(size_))
+#define PVR_MASK_FROM_SIZE(size_) (~((size_) - U64_C(1)))
+
+/* PVR_DEVICE_PAGE_SIZE determines the page size */
+#define PVR_DEVICE_PAGE_SIZE (PAGE_SIZE)
+#define PVR_DEVICE_PAGE_SHIFT (PAGE_SHIFT)
+#define PVR_DEVICE_PAGE_MASK (PAGE_MASK)
+
+/* Functions defined in pvr_vm.c */
+
+bool pvr_device_addr_is_valid(u64 device_addr);
+bool pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size);
+
+struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
+					     bool is_userspace_context);
+
+int pvr_vm_map(struct pvr_vm_context *vm_ctx,
+	       struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
+	       u64 device_addr, u64 size);
+int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr);
+
+dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx);
+
+int pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
+			      struct drm_pvr_ioctl_dev_query_args *args);
+int pvr_heap_info_get(const struct pvr_device *pvr_dev,
+		      struct drm_pvr_ioctl_dev_query_args *args);
+const struct drm_pvr_heap *pvr_find_heap_containing(struct pvr_device *pvr_dev,
+						    u64 addr, u64 size);
+
+struct pvr_gem_object *pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx,
+					      u64 device_addr,
+					      u64 *mapped_offset_out,
+					      u64 *mapped_size_out);
+
+int
+pvr_vm_mmu_flush(struct pvr_device *pvr_dev);
+
+struct pvr_fw_object *
+pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx);
+
+struct pvr_vm_context *pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle);
+bool pvr_vm_context_put(struct pvr_vm_context *vm_ctx);
+void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file);
+
+#endif /* __PVR_VM_H__ */
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
new file mode 100644
index 000000000000..f5d24df569e8
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -0,0 +1,223 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#include "pvr_device.h"
+#include "pvr_fw_mips.h"
+#include "pvr_gem.h"
+#include "pvr_rogue_mips.h"
+#include "pvr_vm_mips.h"
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/**
+ * pvr_vm_mips_init() - Initialise MIPS FW pagetable
+ * @pvr_dev: Target PowerVR device.
+ *
+ * Returns:
+ *  * 0 on success,
+ *  * -%EINVAL,
+ *  * Any error returned by pvr_gem_object_create(), or
+ *  * And error returned by pvr_gem_object_vmap().
+ */
+int
+pvr_vm_mips_init(struct pvr_device *pvr_dev)
+{
+	u32 pt_size = 1 << ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_4K(pvr_dev);
+	struct pvr_fw_mips_data *mips_data;
+	u32 phys_bus_width;
+	int err;
+
+	/* Page table size must be at most ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * 4k pages. */
+	if (pt_size > ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * SZ_4K) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	if (PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width)) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	mips_data = kzalloc(sizeof(*mips_data), GFP_KERNEL);
+	if (!mips_data) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	mips_data->pt_obj = pvr_gem_object_create(pvr_dev, pt_size,
+						  DRM_PVR_BO_DEVICE_PM_FW_PROTECT |
+						  DRM_PVR_BO_CREATE_ZEROED);
+	if (IS_ERR(mips_data->pt_obj)) {
+		err = PTR_ERR(mips_data->pt_obj);
+		goto err_kfree;
+	}
+
+	mips_data->pt = pvr_gem_object_vmap(mips_data->pt_obj, false);
+	if (IS_ERR(mips_data->pt)) {
+		err = PTR_ERR(mips_data->pt);
+		goto err_put_obj;
+	}
+
+	mips_data->pfn_mask = (phys_bus_width > 32) ? ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT :
+						      ROGUE_MIPSFW_ENTRYLO_PFN_MASK;
+
+	mips_data->cache_policy = (phys_bus_width > 32) ? ROGUE_MIPSFW_CACHED_POLICY_ABOVE_32BIT :
+							  ROGUE_MIPSFW_CACHED_POLICY;
+
+	pvr_dev->fw_dev.processor_data.mips_data = mips_data;
+
+	return 0;
+
+err_put_obj:
+	pvr_gem_object_put(mips_data->pt_obj);
+
+err_kfree:
+	kfree(mips_data);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_vm_mips_fini() - Release MIPS FW pagetable
+ * @pvr_dev: Target PowerVR device.
+ */
+void
+pvr_vm_mips_fini(struct pvr_device *pvr_dev)
+{
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
+
+	pvr_gem_object_vunmap(mips_data->pt_obj, false);
+	pvr_gem_object_put(mips_data->pt_obj);
+	kfree(mips_data);
+	fw_dev->processor_data.mips_data = NULL;
+}
+
+static u32
+get_mips_pte_flags(bool read, bool write, int cache_policy)
+{
+	u32 flags = 0;
+
+	if (read && write) /* Read/write. */
+		flags |= ROGUE_MIPSFW_ENTRYLO_DIRTY_EN;
+	else if (write)    /* Write only. */
+		flags |= ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_EN;
+	else
+		WARN_ON(!read);
+
+	flags |= cache_policy << ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT;
+
+	flags |= ROGUE_MIPSFW_ENTRYLO_VALID_EN | ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN;
+
+	return flags;
+}
+
+/**
+ * pvr_vm_mips_map() - Map a FW object into MIPS address space
+ * @pvr_dev: Target PowerVR device.
+ * @fw_obj: FW object to map.
+ *
+ * Returns:
+ *  * 0 on success,
+ *  * -%EINVAL if object does not reside within FW address space, or
+ *  * Any error returned by pvr_fw_get_dma_addr().
+ */
+int
+pvr_vm_mips_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
+	struct pvr_gem_object *pvr_obj = &fw_obj->base;
+	u64 start = fw_obj->fw_mm_node.start;
+	u64 size = fw_obj->fw_mm_node.size;
+	u64 end;
+	u32 cache_policy;
+	u32 pte_flags;
+	u32 start_pfn;
+	u32 end_pfn;
+	u32 pfn;
+	int err;
+
+	if (check_add_overflow(start, size - 1, &end)) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	if (start < ROGUE_FW_HEAP_BASE ||
+	    start >= ROGUE_FW_HEAP_BASE + fw_dev->fw_heap_info.raw_size ||
+	    end < ROGUE_FW_HEAP_BASE ||
+	    end >= ROGUE_FW_HEAP_BASE + fw_dev->fw_heap_info.raw_size ||
+	    (start & ROGUE_MIPSFW_PAGE_MASK_4K) ||
+	    ((end + 1) & ROGUE_MIPSFW_PAGE_MASK_4K)) {
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+	end_pfn = (end & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+
+	if (pvr_obj->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED)
+		cache_policy = ROGUE_MIPSFW_UNCACHED_CACHE_POLICY;
+	else
+		cache_policy = mips_data->cache_policy;
+
+	pte_flags = get_mips_pte_flags(true, true, cache_policy);
+
+	for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
+		dma_addr_t dma_addr;
+		u32 pte;
+
+		err = pvr_fw_get_dma_addr(fw_obj,
+					  (pfn - start_pfn) << ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K,
+					  &dma_addr);
+		if (err)
+			goto err_unmap_pages;
+
+		pte = ((dma_addr >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K)
+		       << ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT) & mips_data->pfn_mask;
+		pte |= pte_flags;
+
+		WRITE_ONCE(mips_data->pt[pfn], pte);
+	}
+
+	pvr_vm_mmu_flush(pvr_dev);
+
+	return 0;
+
+err_unmap_pages:
+	for (; pfn >= start_pfn; pfn--)
+		WRITE_ONCE(mips_data->pt[pfn], 0);
+
+	pvr_vm_mmu_flush(pvr_dev);
+
+err_out:
+	return err;
+}
+
+/**
+ * pvr_vm_mips_unmap() - Unmap a FW object into MIPS address space
+ * @pvr_dev: Target PowerVR device.
+ * @fw_obj: FW object to unmap.
+ */
+void
+pvr_vm_mips_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
+{
+	struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev;
+	struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data;
+	u64 start = fw_obj->fw_mm_node.start;
+	u64 size = fw_obj->fw_mm_node.size;
+	u64 end = start + size;
+
+	u32 start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >>
+			ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+	u32 end_pfn = (end & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K;
+	u32 pfn;
+
+	for (pfn = start_pfn; pfn < end_pfn; pfn++)
+		WRITE_ONCE(mips_data->pt[pfn], 0);
+
+	pvr_vm_mmu_flush(pvr_dev);
+}
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.h b/drivers/gpu/drm/imagination/pvr_vm_mips.h
new file mode 100644
index 000000000000..8d0926052520
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.h
@@ -0,0 +1,22 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_VM_MIPS_H__
+#define __PVR_VM_MIPS_H__
+
+/* Forward declaration from pvr_device.h. */
+struct pvr_device;
+
+/* Forward declaration from pvr_gem.h. */
+struct pvr_fw_object;
+
+int
+pvr_vm_mips_init(struct pvr_device *pvr_dev);
+void
+pvr_vm_mips_fini(struct pvr_device *pvr_dev);
+int
+pvr_vm_mips_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj);
+void
+pvr_vm_mips_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj);
+
+#endif /* __PVR_VM_MIPS_H__ */
diff --git a/drivers/gpu/drm/imagination/vendor/pvr_mt8173.c b/drivers/gpu/drm/imagination/vendor/pvr_mt8173.c
new file mode 100644
index 000000000000..864814e38059
--- /dev/null
+++ b/drivers/gpu/drm/imagination/vendor/pvr_mt8173.c
@@ -0,0 +1,121 @@ 
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+/* Parts copyright (c) 2014 MediaTek Inc. */
+#include "pvr_device.h"
+#include "pvr_vendor.h"
+
+#include <drm/drm_print.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Taken from img-rogue/mt8173/mt8173_mfgsys.c in ChromeOS kernel */
+#define REG_MFG_AXI BIT(0)
+#define REG_MFG_MEM BIT(1)
+#define REG_MFG_G3D BIT(2)
+#define REG_MFG_26M BIT(3)
+#define REG_MFG_ALL (REG_MFG_AXI | REG_MFG_MEM | REG_MFG_G3D | REG_MFG_26M)
+
+#define REG_MFG_CG_STA 0x00
+#define REG_MFG_CG_SET 0x04
+#define REG_MFG_CG_CLR 0x08
+
+struct pvr_mt8173_data {
+	void __iomem *regs;
+};
+
+static int
+mt8173_init(struct pvr_device *pvr_dev)
+{
+	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+	struct platform_device *plat_dev = to_platform_device(drm_dev->dev);
+	struct pvr_mt8173_data *mt8173_data;
+	void __iomem *regs;
+	int err;
+
+	mt8173_data = kzalloc(sizeof(*mt8173_data), GFP_KERNEL);
+	if (!mt8173_data) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	regs = devm_platform_ioremap_resource(plat_dev, 1);
+	if (IS_ERR(regs)) {
+		err = PTR_ERR(regs);
+		drm_err(drm_dev,
+			"failed to ioremap mt8173 gpu registers (err=%d)\n",
+			err);
+		goto err_free;
+	}
+
+	mt8173_data->regs = regs;
+
+	pvr_dev->vendor.data = mt8173_data;
+
+	return 0;
+
+err_free:
+	kfree(mt8173_data);
+
+err_out:
+	return err;
+}
+
+static void
+mt8173_fini(struct pvr_device *pvr_dev)
+{
+	struct pvr_mt8173_data *mt8173_data = pvr_dev->vendor.data;
+
+	kfree(mt8173_data);
+	pvr_dev->vendor.data = NULL;
+}
+
+static void
+mtk_mfg_enable_hw_apm(struct pvr_mt8173_data *mt8173_data)
+{
+	/* Taken from img-rogue/mt8173/mt8173_mfgsys.c in ChromeOS kernel */
+	writel(0x003c3d4d, mt8173_data->regs + 0x24);
+	writel(0x4d45440b, mt8173_data->regs + 0x28);
+	writel(0x7a710184, mt8173_data->regs + 0xe0);
+	writel(0x835f6856, mt8173_data->regs + 0xe4);
+	writel(0x002b0234, mt8173_data->regs + 0xe8);
+	writel(0x80000000, mt8173_data->regs + 0xec);
+	writel(0x08000000, mt8173_data->regs + 0xa0);
+}
+
+static void
+mtk_mfg_disable_hw_apm(struct pvr_mt8173_data *mt8173_data)
+{
+	/* Taken from img-rogue/mt8173/mt8173_mfgsys.c in ChromeOS kernel */
+	writel(0x00, mt8173_data->regs + 0xec);
+}
+
+static int
+mt8173_power_enable(struct pvr_device *pvr_dev)
+{
+	struct pvr_mt8173_data *mt8173_data = pvr_dev->vendor.data;
+
+	/* Taken from img-rogue/mt8173/mt8173_mfgsys.c in ChromeOS kernel */
+	writel(REG_MFG_ALL, mt8173_data->regs + REG_MFG_CG_CLR);
+	mtk_mfg_enable_hw_apm(mt8173_data);
+
+	return 0;
+}
+
+static int
+mt8173_power_disable(struct pvr_device *pvr_dev)
+{
+	struct pvr_mt8173_data *mt8173_data = pvr_dev->vendor.data;
+
+	mtk_mfg_disable_hw_apm(mt8173_data);
+
+	return 0;
+}
+
+const struct pvr_vendor_callbacks pvr_mt8173_callbacks = {
+	.init = mt8173_init,
+	.fini = mt8173_fini,
+	.power_enable = mt8173_power_enable,
+	.power_disable = mt8173_power_disable,
+};
diff --git a/include/uapi/drm/pvr_drm.h b/include/uapi/drm/pvr_drm.h
new file mode 100644
index 000000000000..7deb444a4a57
--- /dev/null
+++ b/include/uapi/drm/pvr_drm.h
@@ -0,0 +1,1502 @@ 
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note OR MIT */
+/* Copyright (c) 2022 Imagination Technologies Ltd. */
+
+#ifndef __PVR_DRM_H__
+#define __PVR_DRM_H__
+
+#include "drm.h"
+
+#include <linux/const.h>
+#include <linux/types.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: IOCTLS
+ *
+ * The PowerVR IOCTL argument structs have a few limitations in place, in
+ * addition to the standard kernel restrictions:
+ *
+ *  - All members must be type-aligned.
+ *  - The overall struct must be padded to 64-bit alignment.
+ *  - Explicit padding is almost always required. This takes the form of
+ *    &_padding_x members of sufficient size to pad to the next power-of-two
+ *    alignment, where x is the offset into the struct in hexadecimal. Arrays
+ *    are never used for alignment. Padding fields must be zeroed; this is
+ *    always checked.
+ *  - Unions may only appear as the last member of a struct.
+ *  - Individual union members may grow in the future. The space between the
+ *    end of a union member and the end of its containing union is considered
+ *    "implicit padding" and must be zeroed. This is always checked.
+ */
+
+/* clang-format off */
+/**
+ * PVR_IOCTL() - Build a PowerVR IOCTL number
+ * @_ioctl: An incrementing id for this IOCTL. Added to %DRM_COMMAND_BASE.
+ * @_mode: Must be one of DRM_IO{R,W,WR}.
+ * @_data: The type of the args struct passed by this IOCTL.
+ *
+ * The struct referred to by @_data must have a &drm_pvr_ioctl_ prefix and an
+ * &_args suffix. They are therefore omitted from @_data.
+ *
+ * This should only be used to build the constants described below; it should
+ * never be used to call an IOCTL directly.
+ *
+ * Return:
+ * An IOCTL number to be passed to ioctl() from userspace.
+ */
+#define PVR_IOCTL(_ioctl, _mode, _data) \
+	_mode(DRM_COMMAND_BASE + (_ioctl), struct drm_pvr_ioctl_##_data##_args)
+
+#define DRM_IOCTL_PVR_DEV_QUERY PVR_IOCTL(0x00, DRM_IOWR, dev_query)
+#define DRM_IOCTL_PVR_CREATE_BO PVR_IOCTL(0x01, DRM_IOWR, create_bo)
+#define DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET PVR_IOCTL(0x02, DRM_IOWR, get_bo_mmap_offset)
+#define DRM_IOCTL_PVR_CREATE_VM_CONTEXT PVR_IOCTL(0x03, DRM_IOWR, create_vm_context)
+#define DRM_IOCTL_PVR_DESTROY_VM_CONTEXT PVR_IOCTL(0x04, DRM_IOW, destroy_vm_context)
+#define DRM_IOCTL_PVR_VM_MAP PVR_IOCTL(0x05, DRM_IOW, vm_map)
+#define DRM_IOCTL_PVR_VM_UNMAP PVR_IOCTL(0x06, DRM_IOW, vm_unmap)
+#define DRM_IOCTL_PVR_CREATE_CONTEXT PVR_IOCTL(0x07, DRM_IOWR, create_context)
+#define DRM_IOCTL_PVR_DESTROY_CONTEXT PVR_IOCTL(0x08, DRM_IOW, destroy_context)
+#define DRM_IOCTL_PVR_CREATE_FREE_LIST PVR_IOCTL(0x09, DRM_IOWR, create_free_list)
+#define DRM_IOCTL_PVR_DESTROY_FREE_LIST PVR_IOCTL(0x0a, DRM_IOW, destroy_free_list)
+#define DRM_IOCTL_PVR_CREATE_HWRT_DATASET PVR_IOCTL(0x0b, DRM_IOWR, create_hwrt_dataset)
+#define DRM_IOCTL_PVR_DESTROY_HWRT_DATASET PVR_IOCTL(0x0c, DRM_IOW, destroy_hwrt_dataset)
+#define DRM_IOCTL_PVR_SUBMIT_JOBS PVR_IOCTL(0x0d, DRM_IOW, submit_jobs)
+
+/**
+ * struct drm_pvr_obj_array - Container used to pass arrays of objects
+ *
+ * It is not unusual to have to extend objects to pass new parameters, and the DRM
+ * ioctl infrastructure is supporting that by padding ioctl arguments with zeros
+ * when the data passed by userspace is smaller than the struct defined in the
+ * drm_ioctl_desc, thus keeping things backward compatible. This drm_pvr_obj_array
+ * is just applying the same concepts to indirect objects passed through arrays
+ * referenced from the main ioctl arguments structure: the stride basically defines
+ * the size of the object passed by userspace, which allows the kernel driver to
+ * pad things with zeros when it's smaller than the size of the object it expects.
+ *
+ * Use DRM_PVR_OBJ_ARRAY() to fill object array fields, unless you have a very
+ * good reason not to.
+ */
+struct drm_pvr_obj_array {
+	/** @stride: Stride of object struct. Used for versioning. */
+	__u32 stride;
+
+	/** @count: Number of objects in the array. */
+	__u32 count;
+
+	/** @array: User pointer to an array of objects. */
+	__u64 array;
+};
+
+#define DRM_PVR_OBJ_ARRAY(cnt, ptr) \
+	{ .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
+
+/* clang-format on */
+
+struct drm_pvr_dev_query_gpu_info {
+	/**
+	 * @gpu_id: GPU identifier.
+	 *
+	 * For all currently supported GPUs this is the BVNC encoded as a 64-bit
+	 * value as follows:
+	 *
+	 *    +--------+--------+--------+-------+
+	 *    | 63..48 | 47..32 | 31..16 | 15..0 |
+	 *    +========+========+========+=======+
+	 *    | B      | V      | N      | C     |
+	 *    +--------+--------+--------+-------+
+	 */
+	__u64 gpu_id;
+
+	/**
+	 * @num_phantoms: Number of Phantoms present.
+	 */
+	__u32 num_phantoms;
+};
+
+struct drm_pvr_dev_query_runtime_info {
+	/*
+	 * @free_list_min_pages: Minimum allowed free list size,
+	 * in PM physical pages.
+	 */
+	__u64 free_list_min_pages;
+
+	/*
+	 * @free_list_max_pages: Maximum allowed free list size,
+	 * in PM physical pages.
+	 */
+	__u64 free_list_max_pages;
+
+	/*
+	 * @common_store_alloc_region_size: Size of the Allocation
+	 * Region within the Common Store used for coefficient and shared
+	 * registers, in dwords.
+	 */
+	__u32 common_store_alloc_region_size;
+
+	/**
+	 * @common_store_partition_space_size: Size of the
+	 * Partition Space within the Common Store for output buffers, in
+	 * dwords.
+	 */
+	__u32 common_store_partition_space_size;
+
+	/**
+	 * @max_coeffs: Maximum coefficients, in dwords.
+	 */
+	__u32 max_coeffs;
+
+	/**
+	 * @cdm_max_local_mem_size_regs: Maximum amount of local
+	 * memory available to a compute kernel, in dwords.
+	 */
+	__u32 cdm_max_local_mem_size_regs;
+};
+
+struct drm_pvr_dev_query_hwrt_info {
+	/**
+	 * @num_geomdatas: Number of geom data arguments
+	 * required when creating a HWRT dataset.
+	 */
+	__u8 num_geomdatas;
+
+	/**
+	 * @num_rtdatas: Number of RT data arguments
+	 * required when creating a HWRT dataset.
+	 */
+	__u8 num_rtdatas;
+
+	/**
+	 * @num_freelists: Number of free list data
+	 * arguments required when creating a HWRT dataset.
+	 */
+	__u8 num_freelists;
+
+	/** @_padding_3: Reserved - will be zeroed */
+	__u8 _padding_3;
+
+	/** @_padding_4: Reserved - will be zeroed */
+	__u32 _padding_4;
+};
+
+struct drm_pvr_dev_query_quirks {
+	/**
+	 * @quirks: A userspace address for the hardware quirks __u32 array.
+	 *
+	 * The first @musthave_count items in the list are quirks that the
+	 * client must support for this device. If userspace does not support
+	 * all these quirks then functionality is not guaranteed and client
+	 * initialisation must fail.
+	 * The remaining quirks in the list affect userspace and the kernel or
+	 * firmware. They are disabled by default and require userspace to
+	 * opt-in. The opt-in mechanism depends on the quirk.
+	 */
+	__u64 quirks;
+
+	/** @count: Length of @quirks (number of __u32). */
+	__u16 count;
+
+	/**
+	 * @musthave_count: The number of entries in @quirks that are
+	 * mandatory, starting at index 0.
+	 */
+	__u16 musthave_count;
+
+	/** @_padding_c: Reserved. This field must be zeroed. */
+	__u32 _padding_c;
+};
+
+struct drm_pvr_dev_query_enhancements {
+	/**
+	 * @enhancements: A userspace address for the hardware enhancements
+	 * __u32 array.
+	 *
+	 * These enhancements affect userspace and the kernel or firmware. They
+	 * are disabled by default and require userspace to opt-in. The opt-in
+	 * mechanism depends on the quirk.
+	 */
+	__u64 enhancements;
+
+	/** @count: Length of @enhancements (number of __u32). */
+	__u16 count;
+
+	/** @_padding_a: Reserved. This field must be zeroed. */
+	__u16 _padding_a;
+
+	/** @_padding_c: Reserved. This field must be zeroed. */
+	__u32 _padding_c;
+};
+
+/**
+ * enum drm_pvr_heap_id - Array index for heap info data returned by
+ * DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+enum drm_pvr_heap_id {
+	/** @DRM_PVR_HEAP_GENERAL: General purpose heap. */
+	DRM_PVR_HEAP_GENERAL = 0,
+	/** @DRM_PVR_HEAP_PDS_CODE_DATA: PDS code & data heap. */
+	DRM_PVR_HEAP_PDS_CODE_DATA,
+	/** @DRM_PVR_HEAP_USC_CODE: USC code heap. */
+	DRM_PVR_HEAP_USC_CODE,
+	/** @DRM_PVR_HEAP_RGNHDR: Region header heap. Only used if GPU has BRN63142. */
+	DRM_PVR_HEAP_RGNHDR,
+	/** @DRM_PVR_HEAP_VIS_TEST: Visibility test heap. */
+	DRM_PVR_HEAP_VIS_TEST,
+	/** @DRM_PVR_HEAP_TRANSFER_FRAG: Transfer fragment heap. */
+	DRM_PVR_HEAP_TRANSFER_FRAG,
+
+	/**
+	 * @DRM_PVR_HEAP_COUNT: The number of heaps returned by
+	 * DRM_PVR_DEV_QUERY_HEAP_INFO_GET. More heaps may be added, so this
+	 * also serves as the copy limit when sent by the caller.
+	 */
+	DRM_PVR_HEAP_COUNT
+	/* Please only add additional heaps above DRM_PVR_HEAP_COUNT! */
+};
+
+/*
+ * DOC: Flags for DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * .. c:macro:: DRM_PVR_HEAP_FLAG_STATIC_CARVEOUT_AT_END
+ *
+ *    The static data area is at the end of the heap memory area, rather than
+ *    at the beginning.
+ *    The base address will be:
+ *        drm_pvr_heap::base +
+ *            (drm_pvr_heap::size - drm_pvr_heap::static_data_carveout_size)
+ */
+#define DRM_PVR_HEAP_FLAG_STATIC_CARVEOUT_AT_END _BITUL(0)
+
+struct drm_pvr_heap {
+	/** @base: Base address of heap. */
+	__u64 base;
+
+	/**
+	 * @size: Size of heap, in bytes. Will be 0 if the heap is not present.
+	 */
+	__u64 size;
+
+	/** @flags: Flags for this heap. See &enum drm_pvr_heap_flags. */
+	__u32 flags;
+
+	/** @page_size_log2: Log2 of page size. */
+	__u32 page_size_log2;
+};
+
+/**
+ * struct drm_pvr_dev_query_heap_info_args - Arguments for
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET
+ */
+struct drm_pvr_dev_query_heap_info {
+	/**
+	 * @heaps: Array of struct drm_pvr_heap. If pointer is NULL, the count
+	 * and stride will be updated with those known to the driver version, to
+	 * facilitate allocation by the caller.
+	 */
+	struct drm_pvr_obj_array heaps;
+};
+
+enum drm_pvr_static_data_area_usage {
+	/**
+	 * @DRM_PVR_STATIC_DATA_AREA_EOT: End of Tile USC program.
+	 *
+	 * The End of Tile task runs at completion of a tile, and is responsible for emitting the
+	 * tile to the Pixel Back End.
+	 */
+	DRM_PVR_STATIC_DATA_AREA_EOT = 0,
+
+	/**
+	 * @DRM_PVR_STATIC_DATA_AREA_FENCE: MCU fence area, used during cache flush and
+	 * invalidation.
+	 *
+	 * This must point to valid physical memory but the contents otherwise are not used.
+	 */
+	DRM_PVR_STATIC_DATA_AREA_FENCE,
+
+	/** @DRM_PVR_STATIC_DATA_AREA_VDM_SYNC: VDM sync program.
+	 *
+	 * The VDM sync program is used to synchronise multiple areas of the GPU hardware.
+	 */
+	DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+
+	/**
+	 * @DRM_PVR_STATIC_DATA_AREA_YUV_CSC: YUV coefficients.
+	 *
+	 * Area contains up to 16 slots with stride of 64 bytes. Each is a 3x4 matrix of u16 fixed
+	 * point numbers, with 1 sign bit, 2 integer bits and 13 fractional bits.
+	 *
+	 * The slots are :
+	 * 0 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR
+	 * 1 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (full range)
+	 * 2 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (conformant range)
+	 * 3 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (full range)
+	 * 4 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range)
+	 * 5 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (full range)
+	 * 6 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range)
+	 * 7 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (full range)
+	 * 8 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range)
+	 * 9 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range, 10 bit)
+	 * 10 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range, 10 bit)
+	 * 11 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range, 10 bit)
+	 * 14 = Identity (biased)
+	 * 15 = Identity
+	 */
+	DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
+};
+
+struct drm_pvr_static_data_area {
+	/**
+	 * @id: Usage of static data area.
+	 * See &enum drm_pvr_static_data_area_usage.
+	 */
+	__u16 area_usage;
+
+	/**
+	 * @location_heap_id: Array index of heap where this of static data
+	 * area is located. This array is fetched using
+	 * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+	 */
+	__u16 location_heap_id;
+
+	/** @size: Size of static data area. */
+	__u32 size;
+
+	/**
+	 * @offset: Offset of static data area from start of static data
+	 * carveout.
+	 */
+	__u64 offset;
+};
+
+/**
+ * struct drm_pvr_dev_query_static_data_areas_args - Arguments for
+ * %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET
+ */
+struct drm_pvr_dev_query_static_data_areas {
+	/**
+	 * @static_data_areas: Array of struct drm_pvr_static_data_area. If
+	 * pointer is NULL, the count and stride will be updated with those
+	 * known to the driver version, to facilitate allocation by the caller.
+	 */
+	struct drm_pvr_obj_array static_data_areas;
+};
+
+/**
+ * enum drm_pvr_dev_query - Arguments for &drm_pvr_ioctl_dev_query_args.type
+ *
+ * Append only. Do not reorder.
+ */
+enum drm_pvr_dev_query {
+	/* struct drm_pvr_dev_query_gpu_info */
+	DRM_PVR_DEV_QUERY_GPU_INFO_GET = 0,
+
+	/* struct drm_pvr_dev_query_runtime_info */
+	DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET,
+
+	/* struct drm_pvr_dev_query_hwrt_info */
+	DRM_PVR_DEV_QUERY_HWRT_INFO_GET,
+
+	/* struct drm_pvr_dev_query_quirks */
+	DRM_PVR_DEV_QUERY_QUIRKS_GET,
+
+	/* struct drm_pvr_dev_query_enhancements */
+	DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET,
+
+	/* struct drm_pvr_dev_query_heap_info */
+	DRM_PVR_DEV_QUERY_HEAP_INFO_GET,
+
+	/* struct drm_pvr_dev_query_static_data_areas */
+	DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET,
+};
+
+/**
+ * struct drm_pvr_ioctl_dev_query_args - Arguments for %DRM_IOCTL_PVR_DEV_QUERY
+ */
+struct drm_pvr_ioctl_dev_query_args {
+	/**
+	 * @type: Type of query and output struct. See enum drm_pvr_dev_query.
+	 */
+	__u32 type;
+
+	/**
+	 * @size: Size of the receiving struct, see @type.
+	 *
+	 * After a successful call this will be updated to the written byte
+	 * length.
+	 * Can also be used to get the minimum byte length (see @pointer).
+	 * This allows additional fields to be appended to the structs in
+	 * future.
+	 */
+	__u32 size;
+
+	/**
+	 * @pointer: [OUT] Pointer to receiving struct @type.
+	 *
+	 * Must be large enough to contain @size bytes.
+	 * If pointer is NULL, the expected size will be returned in the @size
+	 * field, but no other data will be written.
+	 */
+	__u64 pointer;
+};
+
+/**
+ * DOC: Flags for CREATE_BO
+ *
+ * The &drm_pvr_ioctl_create_bo_args.flags field is 64 bits wide and consists
+ * of three groups of flags: creation, device mapping and CPU mapping.
+ *
+ * We use "device" to refer to the GPU here because of the ambiguity between
+ * CPU and GPU in some fonts.
+ *
+ * Creation options
+ *    These use the prefix ``DRM_PVR_BO_CREATE_``.
+ *
+ *    :ZEROED: Require the allocated buffer to be zeroed before returning. Note
+ *      that this is an active operation, and is never zero cost. Unless it is
+ *      explicitly required, this option should not be set.
+ *
+ * Device mapping options
+ *    These use the prefix ``DRM_PVR_BO_DEVICE_``.
+ *
+ *    :BYPASS_CACHE: There are very few situations where this flag is useful.
+ *       By default, the device flushes its memory caches after every job.
+ *    :PM_FW_PROTECT: Specify that only the Parameter Manager (PM) and/or
+ *       firmware processor should be allowed to access this memory when mapped
+ *       to the device. It is not valid to specify this flag with
+ *       CPU_ALLOW_USERSPACE_ACCESS.
+ *
+ * CPU mapping options
+ *    These use the prefix ``DRM_PVR_BO_CPU_``.
+ *
+ *    :ALLOW_USERSPACE_ACCESS: Allow userspace to map and access the contents
+ *       of this memory. It is not valid to specify this flag with
+ *       DEVICE_PM_FW_PROTECT.
+ */
+#define DRM_PVR_BO_DEVICE_BYPASS_CACHE _BITULL(0)
+#define DRM_PVR_BO_DEVICE_PM_FW_PROTECT _BITULL(1)
+#define DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS _BITULL(2)
+#define DRM_PVR_BO_CREATE_ZEROED _BITULL(3)
+/* Bits 4..63 are reserved. */
+
+/**
+ * struct drm_pvr_ioctl_create_bo_args - Arguments for %DRM_IOCTL_PVR_CREATE_BO
+ */
+struct drm_pvr_ioctl_create_bo_args {
+	/**
+	 * @size: [IN/OUT] Unaligned size of buffer object to create. On
+	 * return, this will be populated with the actual aligned size of the
+	 * new buffer.
+	 */
+	__u64 size;
+
+	/**
+	 * @handle: [OUT] GEM handle of the new buffer object for use in
+	 * userspace.
+	 */
+	__u32 handle;
+
+	/** @_padding_c: Reserved. This field must be zeroed. */
+	__u32 _padding_c;
+
+	/**
+	 * @flags: [IN] Options which will affect the behaviour of this
+	 * creation operation and future mapping operations on the created
+	 * object. This field must be a valid combination of DRM_PVR_BO_*
+	 * values, with all bits marked as reserved set to zero.
+	 */
+	__u64 flags;
+};
+
+/**
+ * struct drm_pvr_ioctl_get_bo_mmap_offset_args - Arguments for
+ * %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET
+ *
+ * Like other DRM drivers, the "mmap" IOCTL doesn't actually map any memory.
+ * Instead, it allocates a fake offset which refers to the specified buffer
+ * object. This offset can be used with a real mmap call on the DRM device
+ * itself.
+ */
+struct drm_pvr_ioctl_get_bo_mmap_offset_args {
+	/** @handle: [IN] GEM handle of the buffer object to be mapped. */
+	__u32 handle;
+
+	/** @_padding_4: Reserved. This field must be zeroed. */
+	__u32 _padding_4;
+
+	/** @offset: [OUT] Fake offset to use in the real mmap call. */
+	__u64 offset;
+};
+
+/**
+ * struct drm_pvr_ioctl_create_vm_context_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_VM_CONTEXT
+ */
+struct drm_pvr_ioctl_create_vm_context_args {
+	/** @handle: [OUT] Handle for new VM context. */
+	__u32 handle;
+
+	/** @_padding_4: Reserved. This field must be zeroed. */
+	__u32 _padding_4;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_vm_context_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT
+ */
+struct drm_pvr_ioctl_destroy_vm_context_args {
+	/**
+	 * @handle: [IN] Handle for VM context to be destroyed.
+	 */
+	__u32 handle;
+
+	/** @_padding_4: Reserved. This field must be zeroed. */
+	__u32 _padding_4;
+};
+
+/**
+ * DOC: VM UAPI
+ *
+ * The VM UAPI allows userspace to create buffer object mappings in GPU virtual address space.
+ *
+ * The client is responsible for managing GPU address space. It should allocate mappings within
+ * the heaps returned by %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * %DRM_IOCTL_PVR_VM_MAP creates a new mapping. The client provides the target virtual address for
+ * the mapping. Size and offset within the mapped buffer object can be specified, so the client can
+ * partially map a buffer.
+ *
+ * %DRM_IOCTL_PVR_VM_UNMAP removes a mapping. The entire mapping will be removed from GPU address
+ * space. For this reason only the start address is provided by the client.
+ */
+
+/**
+ * struct drm_pvr_ioctl_vm_map_args - Arguments for %DRM_IOCTL_PVR_VM_MAP.
+ */
+struct drm_pvr_ioctl_vm_map_args {
+	/**
+	 * @vm_context_handle: [IN] Handle for VM context for this mapping to
+	 *                          exist in.
+	 */
+	__u32 vm_context_handle;
+
+	/** @flags: [IN] Flags which affect this mapping. Currently always 0. */
+	__u32 flags;
+
+	/**
+	 * @device_addr: [IN] Requested device-virtual address for the mapping.
+	 * This must be non-zero and aligned to the device page size for the
+	 * heap containing the requested address. It is an error to specify an
+	 * address which is not contained within one of the heaps returned by
+	 * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+	 */
+	__u64 device_addr;
+
+	/**
+	 * @handle: [IN] Handle of the target buffer object. This must be a
+	 * valid handle returned by %DRM_IOCTL_PVR_CREATE_BO.
+	 */
+	__u32 handle;
+
+	/** @_padding_14: Reserved. This field must be zeroed. */
+	__u32 _padding_14;
+
+	/**
+	 * @offset: [IN] Offset into the target bo from which to begin the
+	 * mapping.
+	 */
+	__u64 offset;
+
+	/**
+	 * @size: [IN] Size of the requested mapping. Must be aligned to
+	 * the device page size for the heap containing the requested address,
+	 * as well as the host page size. When added to @device_addr, the
+	 * result must not overflow the heap which contains @device_addr (i.e.
+	 * the range specified by @device_addr and @size must be completely
+	 * contained within a single heap specified by
+	 * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET).
+	 */
+	__u64 size;
+};
+
+/**
+ * struct drm_pvr_ioctl_vm_unmap_args - Arguments for %DRM_IOCTL_PVR_VM_UNMAP.
+ */
+struct drm_pvr_ioctl_vm_unmap_args {
+	/**
+	 * @vm_context_handle: [IN] Handle for VM context that this mapping
+	 *                          exists in.
+	 */
+	__u32 vm_context_handle;
+
+	/** @_padding_4: Reserved. This field must be zeroed. */
+	__u32 _padding_4;
+
+	/**
+	 * @device_addr: [IN] Device-virtual address at the start of the target
+	 * mapping. This must be non-zero.
+	 */
+	__u64 device_addr;
+};
+
+/**
+ * enum drm_pvr_ctx_priority - Arguments for
+ * &drm_pvr_ioctl_create_context_args.priority
+ */
+enum drm_pvr_ctx_priority {
+	DRM_PVR_CTX_PRIORITY_LOW = -512,
+	DRM_PVR_CTX_PRIORITY_NORMAL = 0,
+	/* A priority above NORMAL requires CAP_SYS_NICE or DRM_MASTER. */
+	DRM_PVR_CTX_PRIORITY_HIGH = 512,
+};
+
+/* clang-format off */
+
+/**
+ * enum drm_pvr_ctx_type - Arguments for
+ * &drm_pvr_ioctl_create_context_args.type
+ */
+enum drm_pvr_ctx_type {
+	/**
+	 * @DRM_PVR_CTX_TYPE_RENDER: Render context. Use &struct
+	 * drm_pvr_ioctl_create_render_context_args for context creation arguments.
+	 */
+	DRM_PVR_CTX_TYPE_RENDER = 0,
+
+	/**
+	 * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context. Use &struct
+	 * drm_pvr_ioctl_create_compute_context_args for context creation arguments.
+	 */
+	DRM_PVR_CTX_TYPE_COMPUTE,
+
+	/**
+	 * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data masters. Use
+	 * &struct drm_pvr_ioctl_create_transfer_context_args for context creation arguments.
+	 */
+	DRM_PVR_CTX_TYPE_TRANSFER_FRAG,
+};
+
+/* clang-format on */
+
+/**
+ * struct drm_pvr_ioctl_create_context_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_CONTEXT
+ */
+struct drm_pvr_ioctl_create_context_args {
+	/**
+	 * @type: [IN] Type of context to create.
+	 *
+	 * This must be one of the values defined by &enum drm_pvr_ctx_type.
+	 */
+	__u32 type;
+
+	/** @flags: [IN] Flags for context. */
+	__u32 flags;
+
+	/**
+	 * @priority: [IN] Priority of new context.
+	 *
+	 * This must be one of the values defined by &enum drm_pvr_ctx_priority.
+	 */
+	__s32 priority;
+
+	/** @handle: [OUT] Handle for new context. */
+	__u32 handle;
+
+	/**
+	 * @static_context_state: [IN] Pointer to static context state to copy to
+	 *                             new context.
+	 *
+	 * The state differs based on the value of @type:
+	 * * For %DRM_PVR_CTX_TYPE_RENDER, state should be of type
+	 *   &struct rogue_fwif_static_rendercontext_state.
+	 * * For %DRM_PVR_CTX_TYPE_COMPUTE, state should be of type
+	 *   &struct rogue_fwif_static_computecontext_state.
+	 */
+	__u64 static_context_state;
+
+	/**
+	 * @static_context_state_len: [IN] Length of static context state, in bytes.
+	 */
+	__u32 static_context_state_len;
+
+	/**
+	 * @vm_context_handle: [IN] Handle for VM context that this context is
+	 *                          associated with.
+	 */
+	__u32 vm_context_handle;
+
+	/**
+	 * @callstack_addr: [IN] Address for initial call stack pointer. Only valid
+	 *                       if @type is %DRM_PVR_CTX_TYPE_RENDER, otherwise
+	 *                       must be 0.
+	 */
+	__u64 callstack_addr;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_context_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_CONTEXT
+ */
+struct drm_pvr_ioctl_destroy_context_args {
+	/**
+	 * @handle: [IN] Handle for context to be destroyed.
+	 */
+	__u32 handle;
+
+	/** @_padding_4: Reserved. This field must be zeroed. */
+	__u32 _padding_4;
+};
+
+/**
+ * struct drm_pvr_ioctl_create_free_list_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_FREE_LIST
+ *
+ * Free list arguments have the following constraints :
+ *
+ * - &max_num_pages must be greater than zero.
+ * - &grow_threshold must be between 0 and 100.
+ * - &grow_num_pages must be less than or equal to &max_num_pages.
+ * - &initial_num_pages, &max_num_pages and &grow_num_pages must be multiples
+ *   of 4.
+ *
+ * When &grow_num_pages is 0 :
+ * - &initial_num_pages must be equal to &max_num_pages
+ *
+ * When &grow_num_pages is non-zero :
+ * - &initial_num_pages must be less than &max_num_pages.
+ */
+struct drm_pvr_ioctl_create_free_list_args {
+	/**
+	 * @free_list_gpu_addr: [IN] Address of GPU mapping of buffer object
+	 *                           containing memory to be used by free list.
+	 *
+	 * The mapped region of the buffer object must be at least
+	 * @max_num_pages * sizeof(__u32).
+	 *
+	 * The buffer object must have been created with
+	 * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT set and
+	 * %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS not set.
+	 */
+	__u64 free_list_gpu_addr;
+
+	/** @initial_num_pages: [IN] Pages initially allocated to free list. */
+	__u32 initial_num_pages;
+
+	/** @max_num_pages: [IN] Maximum number of pages in free list. */
+	__u32 max_num_pages;
+
+	/** @grow_num_pages: [IN] Pages to grow free list by per request. */
+	__u32 grow_num_pages;
+
+	/**
+	 * @grow_threshold: [IN] Percentage of FL memory used that should
+	 *                       trigger a new grow request.
+	 */
+	__u32 grow_threshold;
+
+	/**
+	 * @vm_context_handle: [IN] Handle for VM context that the free list buffer
+	 *                          object is mapped in.
+	 */
+	__u32 vm_context_handle;
+
+	/**
+	 * @handle: [OUT] Handle for created free list.
+	 */
+	__u32 handle;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_free_list_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_FREE_LIST
+ */
+struct drm_pvr_ioctl_destroy_free_list_args {
+	/**
+	 * @handle: [IN] Handle for free list to be destroyed.
+	 */
+	__u32 handle;
+
+	/** @_padding_4: Reserved. This field must be zeroed. */
+	__u32 _padding_4;
+};
+
+struct drm_pvr_create_hwrt_geom_data_args {
+	/** @tpc_dev_addr: [IN] Tail pointer cache GPU virtual address. */
+	__u64 tpc_dev_addr;
+
+	/** @tpc_size: [IN] Size of TPC, in bytes. */
+	__u32 tpc_size;
+
+	/** @tpc_stride: [IN] Stride between layers in TPC, in pages */
+	__u32 tpc_stride;
+
+	/** @vheap_table_dev_addr: [IN] VHEAP table GPU virtual address. */
+	__u64 vheap_table_dev_addr;
+
+	/** @rtc_dev_addr: [IN] Render Target Cache virtual address. */
+	__u64 rtc_dev_addr;
+};
+
+struct drm_pvr_create_hwrt_rt_data_args {
+	/** @pm_mlist_dev_addr: [IN] PM MLIST GPU virtual address. */
+	__u64 pm_mlist_dev_addr;
+
+	/** @macrotile_array_dev_addr: [IN] Macrotile array GPU virtual address. */
+	__u64 macrotile_array_dev_addr;
+
+	/** @region_header_dev_addr: [IN] Region header array GPU virtual address. */
+	__u64 region_header_dev_addr;
+};
+
+/**
+ * struct drm_pvr_ioctl_create_hwrt_dataset_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_HWRT_DATASET
+ */
+struct drm_pvr_ioctl_create_hwrt_dataset_args {
+	/** @geom_data_args: [IN] Geometry data arguments. */
+	struct drm_pvr_create_hwrt_geom_data_args geom_data_args;
+
+	/** @rt_data_args: [IN] Array of render target arguments. */
+	struct drm_pvr_create_hwrt_rt_data_args rt_data_args[2];
+
+	/**
+	 * @free_list_args: [IN] Array of free list handles.
+	 *
+	 * free_list_handles[0] must have initial size of at least that reported
+	 * by %DRM_PVR_DEV_QUERY_RUNTIME_INFO::free_list_min_pages.
+	 */
+	__u32 free_list_handles[2];
+
+	/** @width: [IN] Width in pixels. */
+	__u32 width;
+
+	/** @height: [IN] Height in pixels. */
+	__u32 height;
+
+	/** @samples: [IN] Number of samples. */
+	__u32 samples;
+
+	/** @layers: [IN] Number of layers. */
+	__u32 layers;
+
+	/** @isp_merge_lower_x: [IN] Lower X coefficient for triangle merging. */
+	__u32 isp_merge_lower_x;
+
+	/** @isp_merge_lower_y: [IN] Lower Y coefficient for triangle merging. */
+	__u32 isp_merge_lower_y;
+
+	/** @isp_merge_scale_x: [IN] Scale X coefficient for triangle merging. */
+	__u32 isp_merge_scale_x;
+
+	/** @isp_merge_scale_y: [IN] Scale Y coefficient for triangle merging. */
+	__u32 isp_merge_scale_y;
+
+	/** @isp_merge_upper_x: [IN] Upper X coefficient for triangle merging. */
+	__u32 isp_merge_upper_x;
+
+	/** @isp_merge_upper_y: [IN] Upper Y coefficient for triangle merging. */
+	__u32 isp_merge_upper_y;
+
+	/**
+	 * @region_header_size: [IN] Size of region header array. This common field is used by
+	 *                           both render targets in this data set.
+	 *
+	 * The units for this field differ depending on what version of the simple internal
+	 * parameter format the device uses. If format 2 is in use then this is interpreted as the
+	 * number of region headers. For other formats it is interpreted as the size in dwords.
+	 */
+	__u32 region_header_size;
+
+	/**
+	 * @handle: [OUT] Handle for created HWRT dataset.
+	 */
+	__u32 handle;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_hwrt_dataset_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET
+ */
+struct drm_pvr_ioctl_destroy_hwrt_dataset_args {
+	/**
+	 * @handle: [IN] Handle for HWRT dataset to be destroyed.
+	 */
+	__u32 handle;
+
+	/** @_padding_4: Reserved. This field must be zeroed. */
+	__u32 _padding_4;
+};
+
+/**
+ * DOC: Flags for the drm_pvr_sync_op object.
+ *
+ * Operations
+ * ~~~~~~~~~~
+ * .. c:macro:: DRM_PVR_SYNC_OP_HANDLE_TYPE_MASK
+ *
+ *    Handle type mask for the drm_pvr_sync_op::flags field.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ
+ *
+ *    Indicates the handle passed in drm_pvr_sync_op::handle is a syncobj handle.
+ *    This is the default type.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ
+ *
+ *    Indicates the handle passed in drm_pvr_sync_op::handle is a timeline syncobj handle.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_SIGNAL
+ *
+ *    Signal operation requested. The out-fence bound to the job will be attached to
+ *    the syncobj whose handle is passed in drm_pvr_sync_op::handle.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_WAIT
+ *
+ *    Wait operation requested. The job will wait for this particular syncobj or syncobj
+ *    point to be signaled before being started.
+ *    This is the default operation.
+ */
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK 0xf
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ 0
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ 1
+#define DRM_PVR_SYNC_OP_FLAG_SIGNAL _BITULL(31)
+#define DRM_PVR_SYNC_OP_FLAG_WAIT 0
+
+#define DRM_PVR_SYNC_OP_FLAGS_MASK (DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK | \
+				    DRM_PVR_SYNC_OP_FLAG_SIGNAL)
+
+/**
+ * struct drm_pvr_sync_op - Object describing a sync operation
+ */
+struct drm_pvr_sync_op {
+	/** @handle: Handle of sync object. */
+	__u32 handle;
+
+	/** @flags: Combination of DRM_PVR_SYNC_OP_FLAG_ flags. */
+	__u32 flags;
+
+	/** @value: Timeline value for this drm_syncobj. MBZ for a binary syncobj. */
+	__u64 value;
+};
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl geometry command.
+ *
+ * Operations
+ * ~~~~~~~~~~
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST
+ *
+ *    Indicates if this the first command to be issued for a render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST
+ *
+ *    Indicates if this the last command to be issued for a render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE
+ *
+ *    Forces to use single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK
+ *
+ *    Logical OR of all the geometry cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE _BITULL(2)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK                                 \
+	(DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST |                                   \
+	 DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST |                                    \
+	 DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl fragment command.
+ *
+ * Operations
+ * ~~~~~~~~~~
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE
+ *
+ *    Use single core in a multi core setup.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER
+ *
+ *    Indicates whether a depth buffer is present.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER
+ *
+ *    Indicates whether a stencil buffer is present.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP
+ *
+ *    Disallow compute overlapped with this render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS
+ *
+ *    Indicates whether this render produces visibility results.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK
+ *
+ *    Logical OR of all the fragment cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER _BITULL(2)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP _BITULL(3)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS _BITULL(5)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK                                 \
+	(DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE |                             \
+	 DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER |                             \
+	 DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER |                           \
+	 DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP |                     \
+	 DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl compute command.
+ *
+ * Operations
+ * ~~~~~~~~~~
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP
+ *
+ *    Disallow other jobs overlapped with this compute.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE
+ *
+ *    Forces to use single core in a multi core device.
+ */
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK         \
+	(DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP | \
+	 DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl transfer command.
+ *
+ * Operations
+ * ~~~~~~~~~~
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
+ *
+ *    Forces job to use a single core in a multi core device.
+ */
+#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE _BITULL(0)
+
+#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK \
+	DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
+
+/**
+ * enum drm_pvr_job_type - Arguments for &drm_pvr_job.job_type
+ */
+enum drm_pvr_job_type {
+	DRM_PVR_JOB_TYPE_GEOMETRY = 0,
+	DRM_PVR_JOB_TYPE_FRAGMENT,
+	DRM_PVR_JOB_TYPE_COMPUTE,
+	DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
+};
+
+/**
+ * struct drm_pvr_hwrt_data_ref - Reference HWRT data
+ */
+struct drm_pvr_hwrt_data_ref {
+	/** @set_handle: HWRT data set handle. */
+	__u32 set_handle;
+
+	/** @data_index: Index of the HWRT data inside the data set. */
+	__u32 data_index;
+};
+
+/**
+ * struct drm_pvr_job - Job arguments passed to the %DRM_IOCTL_PVR_SUBMIT_JOBS ioctl
+ */
+struct drm_pvr_job {
+	/**
+	 * @type: [IN] Type of job being submitted
+	 *
+	 * This must be one of the values defined by &enum drm_pvr_job_type.
+	 */
+	__u32 type;
+
+	/**
+	 * @context: [IN] Context handle.
+	 *
+	 * When @job_type is %DRM_PVR_JOB_TYPE_RENDER, %DRM_PVR_JOB_TYPE_COMPUTE or
+	 * %DRM_PVR_JOB_TYPE_TRANSFER_FRAG, this must be a valid handle returned by
+	 * %DRM_IOCTL_PVR_CREATE_CONTEXT. The type of context must be compatible
+	 * with the type of job being submitted.
+	 *
+	 * When @job_type is %DRM_PVR_JOB_TYPE_NULL, this must be zero.
+	 */
+	__u32 context_handle;
+
+	/**
+	 * @flags: [IN] Flags for command.
+	 *
+	 * Those are job-dependent. See DRM_PVR_SUBMIT_JOB_xxx_
+	 */
+	__u32 flags;
+
+	/**
+	 * @cmd_stream_len: [IN] Length of command stream, in bytes.
+	 */
+	__u32 cmd_stream_len;
+
+	/**
+	 * @cmd_stream: [IN] Pointer to command stream for command.
+	 *
+	 * The command stream must be u64-aligned.
+	 */
+	__u64 cmd_stream;
+
+	/** @sync_ops: [IN] Fragment sync operations. */
+	struct drm_pvr_obj_array sync_ops;
+
+	/**
+	 * @hwrt: [IN] HWRT data used by render jobs (geometry or fragment).
+	 *
+	 * Must be zero for non-render jobs.
+	 */
+	struct drm_pvr_hwrt_data_ref hwrt;
+};
+
+/**
+ * struct drm_pvr_ioctl_submit_jobs_args - Arguments for %DRM_IOCTL_PVR_SUBMIT_JOB
+ */
+struct drm_pvr_ioctl_submit_jobs_args {
+	/** @jobs: [IN] Array of jobs to submit. */
+	struct drm_pvr_obj_array jobs;
+};
+
+/* Definitions for coredump decoding in userspace. */
+
+#define PVR_COREDUMP_HEADER_MAGIC 0x21525650 /* PVR! */
+#define PVR_COREDUMP_HEADER_VERSION_MAJ 1
+#define PVR_COREDUMP_HEADER_VERSION_MIN 0
+
+/**
+ * struct pvr_coredump_header - Header of PowerVR coredump
+ */
+struct pvr_coredump_header {
+	/** @magic: Will be %PVR_COREDUMP_HEADER_MAGIC. */
+	__u32 magic;
+	/** @major_version: Will be %PVR_COREDUMP_HEADER_VERSION_MAJ. */
+	__u32 major_version;
+	/** @minor_version: Will be %PVR_COREDUMP_HEADER_VERSION_MIN. */
+	__u32 minor_version;
+	/** @flags: Flags for this coredump. Currently no flags are defined, this should be zero. */
+	__u32 flags;
+	/** @size: Size of coredump (including this header) in bytes. */
+	__u32 size;
+	/** @padding: Reserved. This field must be zero. */
+	__u32 padding;
+};
+
+/**
+ * enum pvr_coredump_block_type - Valid coredump block types
+ */
+enum pvr_coredump_block_type {
+	/**
+	 * %PVR_COREDUMP_BLOCK_TYPE_DEVINFO: Device information block.
+	 *
+	 * Block data is &struct pvr_coredump_block_devinfo.
+	 */
+	PVR_COREDUMP_BLOCK_TYPE_DEVINFO = 0,
+
+	/**
+	 * %PVR_COREDUMP_BLOCK_TYPE_REGISTERS: Register block.
+	 *
+	 * Block data is an array of &struct pvr_coredump_block_register. Number of registers is
+	 * determined by block size.
+	 */
+	PVR_COREDUMP_BLOCK_TYPE_REGISTERS,
+
+	/**
+	 * %PVR_COREDUMP_BLOCK_TYPE_CONTEXT_RESET_DATA: Context reset data block.
+	 *
+	 * Block data is &struct pvr_coredump_block_reset_data.
+	 */
+	PVR_COREDUMP_BLOCK_TYPE_CONTEXT_RESET_DATA,
+
+	/**
+	 * %PVR_COREDUMP_BLOCK_TYPE_HWRINFO: Hardware Reset information block.
+	 *
+	 * Block data is &struct pvr_coredump_block_hwrinfo.
+	 */
+	PVR_COREDUMP_BLOCK_TYPE_HWRINFO,
+};
+
+/**
+ * struct pvr_coredump_block_header - Header of PowerVR coredump block
+ *
+ * Block data immediately follows this header. The format is determined by @type.
+ */
+struct pvr_coredump_block_header {
+	/** @type: Block type. One of %PVR_COREDUMP_BLOCK_TYPE_*. */
+	__u32 type;
+	/** @size: Size of block data following this header, in bytes. */
+	__u32 size;
+	/** @flags: Type dependent flags. */
+	__u32 flags;
+	/** @padding: Reserved. This field must be zero. */
+	__u32 padding;
+};
+
+#define PVR_COREDUMP_PROCESS_NAME_LEN 16
+#define PVR_COREDUMP_VERSION_LEN      65
+#define PVR_COREDUMP_DEVINFO_PADDING (8 - ((PVR_COREDUMP_PROCESS_NAME_LEN + \
+					    PVR_COREDUMP_VERSION_LEN) & 7))
+
+/**
+ * struct pvr_coredump_block_devinfo - Device information block
+ */
+struct pvr_coredump_block_devinfo {
+	/** @gpu_id: GPU ID. */
+	__u64 gpu_id;
+	/** @fw_version: Version of PowerVR firmware on system that created the coredump. */
+	struct {
+		/** @major: Major version number. */
+		__u32 major;
+		/** @minor: Minor version number. */
+		__u32 minor;
+	} fw_version;
+	/** @process_name: Name of process that submitted the failed job. */
+	char process_name[PVR_COREDUMP_PROCESS_NAME_LEN];
+	/** @kernel_version: String of kernel version on system that created the coredump. */
+	char kernel_version[PVR_COREDUMP_VERSION_LEN];
+	/** @padding: Reserved. This field must be zero. */
+	__u8 padding[PVR_COREDUMP_DEVINFO_PADDING];
+};
+
+/** %PVR_COREDUMP_REGISTER_FLAG_SIZE_MASK: Mask of register size field. */
+#define PVR_COREDUMP_REGISTER_FLAG_SIZE_MASK 7
+/** %PVR_COREDUMP_REGISTER_FLAG_SIZE_32BIT: Register is 32-bits wide. */
+#define PVR_COREDUMP_REGISTER_FLAG_SIZE_32BIT 2
+/** %PVR_COREDUMP_REGISTER_FLAG_SIZE_64BIT: Register is 64-bits wide. */
+#define PVR_COREDUMP_REGISTER_FLAG_SIZE_64BIT 3
+
+/**
+ * struct pvr_coredump_block_register - PowerVR register dump
+ */
+struct pvr_coredump_block_register {
+	/** @offset: Offset of register. */
+	__u32 offset;
+	/** @flags: Flags for this register. Combination of %PVR_COREDUMP_REGISTER_FLAG_*. */
+	__u32 flags;
+	/** @value: Value of register. */
+	__u64 value;
+};
+
+/** %PVR_COREDUMP_RESET_DATA_FLAG_PF: Set if a page fault happened. */
+#define PVR_COREDUMP_RESET_DATA_FLAG_PF _BITUL(0)
+/** %PVR_COREDUMP_RESET_DATA_FLAG_ALL_CTXS: Set if reset applicable to all contexts. */
+#define PVR_COREDUMP_RESET_DATA_FLAG_ALL_CTXS _BITUL(1)
+
+/** %PVR_COREDUMP_RESET_REASON_NONE: No reset reason recorded. */
+#define PVR_COREDUMP_RESET_REASON_NONE 0
+/** %PVR_COREDUMP_RESET_REASON_GUILTY_LOCKUP: Caused a reset due to locking up. */
+#define PVR_COREDUMP_RESET_REASON_GUILTY_LOCKUP 1
+/** %PVR_COREDUMP_RESET_REASON_INNOCENT_LOCKUP: Affected by another context locking up. */
+#define PVR_COREDUMP_RESET_REASON_INNOCENT_LOCKUP 2
+/** %PVR_COREDUMP_RESET_REASON_GUILTY_OVERRUNING: Overran the global deadline. */
+#define PVR_COREDUMP_RESET_REASON_GUILTY_OVERRUNING 3
+/** %PVR_COREDUMP_RESET_REASON_INNOCENT_OVERRUNING: Affected by another context overrunning. */
+#define PVR_COREDUMP_RESET_REASON_INNOCENT_OVERRUNING 4
+/** %PVR_COREDUMP_RESET_REASON_HARD_CONTEXT_SWITCH: Forced reset to meet scheduling requirements. */
+#define PVR_COREDUMP_RESET_REASON_HARD_CONTEXT_SWITCH 5
+/** %PVR_COREDUMP_RESET_REASON_FW_WATCHDOG: FW Safety watchdog triggered. */
+#define PVR_COREDUMP_RESET_REASON_FW_WATCHDOG 12
+/** %PVR_COREDUMP_RESET_REASON_FW_PAGEFAULT: FW page fault (no HWR). */
+#define PVR_COREDUMP_RESET_REASON_FW_PAGEFAULT 13
+/** %PVR_COREDUMP_RESET_REASON_FW_EXEC_ERR: FW execution error (GPU reset requested). */
+#define PVR_COREDUMP_RESET_REASON_FW_EXEC_ERR 14
+/** %PVR_COREDUMP_RESET_REASON_HOST_WDG_FW_ERR: Host watchdog detected FW error. */
+#define PVR_COREDUMP_RESET_REASON_HOST_WDG_FW_ERR 15
+/** %PVR_COREDUMP_RESET_REASON_GEOM_OOM_DISABLED: Geometry DM OOM event is not allowed. */
+#define PVR_COREDUMP_RESET_REASON_GEOM_OOM_DISABLED 16
+
+/** %PVR_COREDUMP_DM_GP: General purpose Data Master. */
+#define PVR_COREDUMP_DM_GP 0
+/** %PVR_COREDUMP_DM_2D: 2D Data Master. */
+#define PVR_COREDUMP_DM_2D 1
+/** %PVR_COREDUMP_DM_GEOM: Geometry Data Master. */
+#define PVR_COREDUMP_DM_GEOM 2
+/** %PVR_COREDUMP_DM_FRAG: Fragment Data Master. */
+#define PVR_COREDUMP_DM_FRAG 3
+/** %PVR_COREDUMP_DM_CDM: Compute Data Master. */
+#define PVR_COREDUMP_DM_CDM 4
+/** %PVR_COREDUMP_DM_RAY: Ray tracing Data Master. */
+#define PVR_COREDUMP_DM_RAY 5
+/** %PVR_COREDUMP_DM_GEOM2: Geometry 2 Data Master. */
+#define PVR_COREDUMP_DM_GEOM2 6
+/** %PVR_COREDUMP_DM_GEOM3: Geometry 3 Data Master. */
+#define PVR_COREDUMP_DM_GEOM3 7
+/** %PVR_COREDUMP_DM_GEOM4: Geometry 4 Data Master. */
+#define PVR_COREDUMP_DM_GEOM4 8
+
+/**
+ * struct pvr_coredump_block_reset_data - Firmware context reset data
+ */
+struct pvr_coredump_block_reset_data {
+	/** @context_id: FW ID of context affected by the reset */
+	__u32 context_id;
+	/** @reset_reason: Reason for reset. One of %PVR_COREDUMP_RESET_REASON_*. */
+	__u32 reset_reason;
+	/** @dm: Data Master affected by the reset. One of %PVR_COREDUMP_DM_. */
+	__u32 dm;
+	/** @reset_job_ref: Internal job ref running at the time of reset. */
+	__u32 reset_job_ref;
+	/** @flags: Reset data flags. Combination of %PVR_COREDUMP_RESET_DATA_FLAG_*. */
+	__u32 flags;
+	/** @padding: Reserved. This field must be zero. */
+	__u32 padding;
+	/**
+	 * @fault_address: Page fault address. Only valid when %PVR_COREDUMP_RESET_DATA_FLAG_PF is
+	 *                 set in @flags.
+	 */
+	__u64 fault_address;
+};
+
+/** %PVR_COREDUMP_HWRTYPE_UNKNOWNFAILURE: HWR triggered by unknown failure. */
+#define PVR_COREDUMP_HWRTYPE_UNKNOWNFAILURE 0
+/** %PVR_COREDUMP_HWRTYPE_OVERRUN: HWR triggered by overrun. */
+#define PVR_COREDUMP_HWRTYPE_OVERRUN 1
+/** %PVR_COREDUMP_HWRTYPE_POLLFAILURE: HWR triggered by poll timeout. */
+#define PVR_COREDUMP_HWRTYPE_POLLFAILURE 2
+/** %PVR_COREDUMP_HWRTYPE_BIF0FAULT: HWR triggered by fault from Bus Interface 0. */
+#define PVR_COREDUMP_HWRTYPE_BIF0FAULT 3
+/** %PVR_COREDUMP_HWRTYPE_BIF1: HWR triggered by fault from Bus Interface 1. */
+#define PVR_COREDUMP_HWRTYPE_BIF1FAULT 4
+/** %PVR_COREDUMP_HWRTYPE_TEXASBIF0FAULT: HWR triggered by fault from Texas Bus Interface 0. */
+#define PVR_COREDUMP_HWRTYPE_TEXASBIF0FAULT 5
+/** %PVR_COREDUMP_HWRTYPE_MMUFAULT: HWR triggered by MMU fault. */
+#define PVR_COREDUMP_HWRTYPE_MMUFAULT 6
+/** %PVR_COREDUMP_HWRTYPE_MMUMETAFAULT: HWR triggered by MMU fault caused by META FW processor. */
+#define PVR_COREDUMP_HWRTYPE_MMUMETAFAULT 7
+/** %PVR_COREDUMP_HWRTYPE_MIPSTLBFAULT: HWR triggered by TLB fault from MIPS FW processor. */
+#define PVR_COREDUMP_HWRTYPE_MIPSTLBFAULT 8
+/** %PVR_COREDUMP_HWRTYPE_ECCFAULT: HWR triggered by ECC fault. */
+#define PVR_COREDUMP_HWRTYPE_ECCFAULT 9
+/** %PVR_COREDUMP_HWRTYPE_MMURISCVFAULT: HWR triggered by MMU fault from RISC-V FW processor. */
+#define PVR_COREDUMP_HWRTYPE_MMURISCVFAULT 10
+
+/* DM is working if all flags are cleared */
+#define PVR_COREDUMP_HWRINFO_DM_STATE_WORKING 0
+/* DM is idle and ready for HWR */
+#define PVR_COREDUMP_HWRINFO_DM_STATE_READY_FOR_HWR _BITUL(0)
+/* DM need to skip to next cmd before resuming processing */
+#define PVR_COREDUMP_HWRINFO_DM_STATE_NEEDS_SKIP _BITUL(2)
+/* DM need partial render cleanup before resuming processing */
+#define PVR_COREDUMP_HWRINFO_DM_STATE_NEEDS_PR_CLEANUP _BITUL(3)
+/* DM need to increment Recovery Count once fully recovered */
+#define PVR_COREDUMP_HWRINFO_DM_STATE_NEEDS_TRACE_CLEAR _BITUL(4)
+/* DM was identified as locking up and causing HWR */
+#define PVR_COREDUMP_HWRINFO_DM_STATE_GUILTY_LOCKUP _BITUL(5)
+/* DM was innocently affected by another lockup which caused HWR */
+#define PVR_COREDUMP_HWRINFO_DM_STATE_INNOCENT_LOCKUP _BITUL(6)
+/* DM was identified as over-running and causing HWR */
+#define PVR_COREDUMP_HWRINFO_DM_STATE_GUILTY_OVERRUNING _BITUL(7)
+/* DM was innocently affected by another DM over-running which caused HWR */
+#define PVR_COREDUMP_HWRINFO_DM_STATE_INNOCENT_OVERRUNING _BITUL(8)
+/* DM was forced into HWR as it delayed more important workloads */
+#define PVR_COREDUMP_HWRINFO_DM_STATE_HARD_CONTEXT_SWITCH _BITUL(9)
+/* DM was forced into HWR due to an uncorrected GPU ECC error */
+#define PVR_COREDUMP_HWRINFO_DM_STATE_GPU_ECC_HWR _BITUL(10)
+
+struct pvr_coredump_hwrinfo_bifinfo {
+	/** @bif_req_status: Request status for affected BIF. */
+	__u64 bif_req_status;
+	/** @bif_mmu_status: MMU status for affected BIF. */
+	__u64 bif_mmu_status;
+};
+
+struct pvr_coredump_hwrinfo_eccinfo {
+	/** @fault_gpu: GPU fault information. */
+	__u32 fault_gpu;
+};
+
+struct pvr_coredump_hwrinfo_mmuinfo {
+	/** @mmu_status: MMU status. */
+	__u64 mmu_status[2];
+};
+
+struct pvr_coredump_hwrinfo_pollinfo {
+	/** @thread_num: Number of thread which timed out on a poll. */
+	__u32 thread_num;
+	/** @cr_poll_addr: Address of timed out poll. */
+	__u32 cr_poll_addr;
+	/** @cr_poll_mask: Mask of timed out poll. */
+	__u32 cr_poll_mask;
+	/** @cr_poll_last_value: Last value read from polled location. */
+	__u32 cr_poll_last_value;
+};
+
+struct pvr_coredump_hwrinfo_tlbinfo {
+	/** @bad_addr: Virtual address of failed access. */
+	__u32 bad_addr;
+	/** @entry_lo: MIPS TLB EntryLo for failed access. */
+	__u32 entry_lo;
+};
+
+/**
+ * struct pvr_coredump_block_hwrinfo - Firmware hardware reset information
+ */
+struct pvr_coredump_block_hwrinfo {
+	/** @hwr_type: Type of HWR event. One of %PVR_COREDUMP_HWRTYPE_*. */
+	__u32 hwr_type;
+	/** @dm: Data Master affected by the HWR event. One of %PVR_COREDUMP_DM_. */
+	__u32 dm;
+	/** @core_id: ID of GPU core affected by the HWR event. */
+	__u32 core_id;
+	/** @event_status: Event status of Data Master. */
+	__u32 event_status;
+	/** @dm_state: Data Master state. Combination of %PVR_COREDUMP_HWRINFO_DM_STATE_. */
+	__u32 dm_state;
+	/** @active_hwrt_data: FW address of affected HWRT data. */
+	__u32 active_hwrt_data;
+
+	/** @hwr_data: HWR type specific data. Determined by @hwr_type. */
+	union {
+		/**
+		 * @bif_info: Bus Interface specific information.
+		 *
+		 * Used for %PVR_COREDUMP_HWRTYPE_BIF0FAULT, %PVR_COREDUMP_HWRTYPE_BIF1FAULT,
+		 * %PVR_COREDUMP_HWRTYPE_TEXASBIF0FAULT and %PVR_COREDUMP_HWRTYPE_MMURISCVFAULT.
+		 */
+		struct pvr_coredump_hwrinfo_bifinfo bif_info;
+
+		/**
+		 * @mmu_info: MMU specific information.
+		 *
+		 * Used for %PVR_COREDUMP_HWRTYPE_MMUFAULT and %PVR_COREDUMP_HWRTYPE_MMUMETAFAULT.
+		 */
+		struct pvr_coredump_hwrinfo_mmuinfo mmu_info;
+
+		/**
+		 * @poll_info: Poll timeout specific information.
+		 *
+		 * Used for %PVR_COREDUMP_HWRTYPE_POLLFAILURE.
+		 */
+		struct pvr_coredump_hwrinfo_pollinfo poll_info;
+
+		/**
+		 * @tlb_info: MIPS TLB specific information.
+		 *
+		 * Used for %PVR_COREDUMP_HWRTYPE_MIPSTLBFAULT.
+		 */
+		struct pvr_coredump_hwrinfo_tlbinfo tlb_info;
+
+		/**
+		 * @ecc_info: ECC specific information.
+		 *
+		 * Used for %PVR_COREDUMP_HWRTYPE_ECCFAULT.
+		 */
+		struct pvr_coredump_hwrinfo_eccinfo ecc_info;
+	} hwr_data;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __PVR_DRM_H__ */