#include "amdgpu_rap.h"
 #include "amdgpu_securedisplay.h"
 #include "amdgpu_fw_attestation.h"
+#include "amdgpu_umr.h"
 
 int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
 {
        return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
 }
 
+static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
+{
+       struct amdgpu_debugfs_regs2_data *rd;
+
+       rd = kzalloc(sizeof *rd, GFP_KERNEL);
+       if (!rd)
+               return -ENOMEM;
+       rd->adev = file_inode(file)->i_private;
+       file->private_data = rd;
+       mutex_init(&rd->lock);
+
+       return 0;
+}
+
+static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file)
+{
+       struct amdgpu_debugfs_regs2_data *rd = file->private_data;
+       mutex_destroy(&rd->lock);
+       kfree(file->private_data);
+       return 0;
+}
+
+static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en)
+{
+       struct amdgpu_debugfs_regs2_data *rd = f->private_data;
+       struct amdgpu_device *adev = rd->adev;
+       ssize_t result = 0;
+       int r;
+       uint32_t value;
+
+       if (size & 0x3 || offset & 0x3)
+               return -EINVAL;
+
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+       if (r < 0) {
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+               return r;
+       }
+
+       r = amdgpu_virt_enable_access_debugfs(adev);
+       if (r < 0) {
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+               return r;
+       }
+
+       mutex_lock(&rd->lock);
+
+       if (rd->id.use_grbm) {
+               if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
+                   (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
+                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+                       amdgpu_virt_disable_access_debugfs(adev);
+                       mutex_unlock(&rd->lock);
+                       return -EINVAL;
+               }
+               mutex_lock(&adev->grbm_idx_mutex);
+               amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se,
+                                                               rd->id.grbm.sh,
+                                                               rd->id.grbm.instance);
+       }
+
+       if (rd->id.use_srbm) {
+               mutex_lock(&adev->srbm_mutex);
+               amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe,
+                                                                       rd->id.srbm.queue, rd->id.srbm.vmid);
+       }
+
+       if (rd->id.pg_lock)
+               mutex_lock(&adev->pm.mutex);
+
+       while (size) {
+               if (!write_en) {
+                       value = RREG32(offset >> 2);
+                       r = put_user(value, (uint32_t *)buf);
+               } else {
+                       r = get_user(value, (uint32_t *)buf);
+                       if (!r)
+                               amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value);
+               }
+               if (r) {
+                       result = r;
+                       goto end;
+               }
+               offset += 4;
+               size -= 4;
+               result += 4;
+               buf += 4;
+       }
+end:
+       if (rd->id.use_grbm) {
+               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+               mutex_unlock(&adev->grbm_idx_mutex);
+       }
+
+       if (rd->id.use_srbm) {
+               amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
+               mutex_unlock(&adev->srbm_mutex);
+       }
+
+       if (rd->id.pg_lock)
+               mutex_unlock(&adev->pm.mutex);
+
+       mutex_unlock(&rd->lock);
+
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+       amdgpu_virt_disable_access_debugfs(adev);
+       return result;
+}
+
+static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data)
+{
+       struct amdgpu_debugfs_regs2_data *rd = f->private_data;
+       int r;
+
+       switch (cmd) {
+       case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE:
+               mutex_lock(&rd->lock);
+               r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id);
+               mutex_unlock(&rd->lock);
+               return r ? -EINVAL : 0;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
+{
+       return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0);
+}
+
+static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos)
+{
+       return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1);
+}
+
 
 /**
  * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
        return result;
 }
 
+static const struct file_operations amdgpu_debugfs_regs2_fops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl,
+       .read = amdgpu_debugfs_regs2_read,
+       .write = amdgpu_debugfs_regs2_write,
+       .open = amdgpu_debugfs_regs2_open,
+       .release = amdgpu_debugfs_regs2_release,
+       .llseek = default_llseek
+};
+
 static const struct file_operations amdgpu_debugfs_regs_fops = {
        .owner = THIS_MODULE,
        .read = amdgpu_debugfs_regs_read,
 
 static const struct file_operations *debugfs_regs[] = {
        &amdgpu_debugfs_regs_fops,
+       &amdgpu_debugfs_regs2_fops,
        &amdgpu_debugfs_regs_didt_fops,
        &amdgpu_debugfs_regs_pcie_fops,
        &amdgpu_debugfs_regs_smc_fops,
 
 static const char *debugfs_regs_names[] = {
        "amdgpu_regs",
+       "amdgpu_regs2",
        "amdgpu_regs_didt",
        "amdgpu_regs_pcie",
        "amdgpu_regs_smc",
 
--- /dev/null
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/ioctl.h>
+
+/*
+ * MMIO debugfs IOCTL structure
+ */
+struct amdgpu_debugfs_regs2_iocdata {
+       __u32 use_srbm, use_grbm, pg_lock;
+       struct {
+               __u32 se, sh, instance;
+       } grbm;
+       struct {
+               __u32 me, pipe, queue, vmid;
+       } srbm;
+};
+
+/*
+ * MMIO debugfs state data (per file* handle)
+ */
+struct amdgpu_debugfs_regs2_data {
+       struct amdgpu_device *adev;
+       struct mutex lock;
+       struct amdgpu_debugfs_regs2_iocdata id;
+};
+
+enum AMDGPU_DEBUGFS_REGS2_CMDS {
+       AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0,
+};
+
+#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE, struct amdgpu_debugfs_regs2_iocdata)