#define PCI_DEVICE_ID_INTEL_WHL_UD_IMC         0x3e35
 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC          0x8a02
 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC         0x8a12
+#define PCI_DEVICE_ID_INTEL_TGL_U1_IMC         0x9a02
+#define PCI_DEVICE_ID_INTEL_TGL_U2_IMC         0x9a04
+#define PCI_DEVICE_ID_INTEL_TGL_U3_IMC         0x9a12
+#define PCI_DEVICE_ID_INTEL_TGL_U4_IMC         0x9a14
+#define PCI_DEVICE_ID_INTEL_TGL_H_IMC          0x9a36
 
 
 /* SNB event control */
 }
 
 /* end of Nehalem uncore support */
+
+/* Tiger Lake MMIO uncore support */
+
+static const struct pci_device_id tgl_uncore_pci_ids[] = {
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U1_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U2_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U3_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U4_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_H_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* end: all zeroes */ }
+};
+
+enum perf_tgl_uncore_imc_freerunning_types {
+       TGL_MMIO_UNCORE_IMC_DATA_TOTAL,
+       TGL_MMIO_UNCORE_IMC_DATA_READ,
+       TGL_MMIO_UNCORE_IMC_DATA_WRITE,
+       TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
+};
+
+static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = {
+       [TGL_MMIO_UNCORE_IMC_DATA_TOTAL]        = { 0x5040, 0x0, 0x0, 1, 64 },
+       [TGL_MMIO_UNCORE_IMC_DATA_READ]         = { 0x5058, 0x0, 0x0, 1, 64 },
+       [TGL_MMIO_UNCORE_IMC_DATA_WRITE]        = { 0x50A0, 0x0, 0x0, 1, 64 },
+};
+
+static struct freerunning_counters tgl_uncore_imc_freerunning[] = {
+       [TGL_MMIO_UNCORE_IMC_DATA_TOTAL]        = { 0xd840, 0x0, 0x0, 1, 64 },
+       [TGL_MMIO_UNCORE_IMC_DATA_READ]         = { 0xd858, 0x0, 0x0, 1, 64 },
+       [TGL_MMIO_UNCORE_IMC_DATA_WRITE]        = { 0xd8A0, 0x0, 0x0, 1, 64 },
+};
+
+static struct uncore_event_desc tgl_uncore_imc_events[] = {
+       INTEL_UNCORE_EVENT_DESC(data_total,         "event=0xff,umask=0x10"),
+       INTEL_UNCORE_EVENT_DESC(data_total.scale,   "6.103515625e-5"),
+       INTEL_UNCORE_EVENT_DESC(data_total.unit,    "MiB"),
+
+       INTEL_UNCORE_EVENT_DESC(data_read,         "event=0xff,umask=0x20"),
+       INTEL_UNCORE_EVENT_DESC(data_read.scale,   "6.103515625e-5"),
+       INTEL_UNCORE_EVENT_DESC(data_read.unit,    "MiB"),
+
+       INTEL_UNCORE_EVENT_DESC(data_write,        "event=0xff,umask=0x30"),
+       INTEL_UNCORE_EVENT_DESC(data_write.scale,  "6.103515625e-5"),
+       INTEL_UNCORE_EVENT_DESC(data_write.unit,   "MiB"),
+
+       { /* end: all zeroes */ }
+};
+
+static struct pci_dev *tgl_uncore_get_mc_dev(void)
+{
+       const struct pci_device_id *ids = tgl_uncore_pci_ids;
+       struct pci_dev *mc_dev = NULL;
+
+       while (ids && ids->vendor) {
+               mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL);
+               if (mc_dev)
+                       return mc_dev;
+               ids++;
+       }
+
+       return mc_dev;
+}
+
+#define TGL_UNCORE_MMIO_IMC_MEM_OFFSET         0x10000
+
+static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
+{
+       struct pci_dev *pdev = tgl_uncore_get_mc_dev();
+       struct intel_uncore_pmu *pmu = box->pmu;
+       resource_size_t addr;
+       u32 mch_bar;
+
+       if (!pdev) {
+               pr_warn("perf uncore: Cannot find matched IMC device.\n");
+               return;
+       }
+
+       pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar);
+       /* MCHBAR is disabled */
+       if (!(mch_bar & BIT(0))) {
+               pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
+               return;
+       }
+       mch_bar &= ~BIT(0);
+       addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx);
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+       pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar);
+       addr |= ((resource_size_t)mch_bar << 32);
+#endif
+
+       box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
+}
+
+static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
+       .init_box       = tgl_uncore_imc_freerunning_init_box,
+       .exit_box       = uncore_mmio_exit_box,
+       .read_counter   = uncore_mmio_read_counter,
+       .hw_config      = uncore_freerunning_hw_config,
+};
+
+static struct attribute *tgl_uncore_imc_formats_attr[] = {
+       &format_attr_event.attr,
+       &format_attr_umask.attr,
+       NULL
+};
+
+static const struct attribute_group tgl_uncore_imc_format_group = {
+       .name = "format",
+       .attrs = tgl_uncore_imc_formats_attr,
+};
+
+static struct intel_uncore_type tgl_uncore_imc_free_running = {
+       .name                   = "imc_free_running",
+       .num_counters           = 3,
+       .num_boxes              = 2,
+       .num_freerunning_types  = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
+       .freerunning            = tgl_uncore_imc_freerunning,
+       .ops                    = &tgl_uncore_imc_freerunning_ops,
+       .event_descs            = tgl_uncore_imc_events,
+       .format_group           = &tgl_uncore_imc_format_group,
+};
+
+static struct intel_uncore_type *tgl_mmio_uncores[] = {
+       &tgl_uncore_imc_free_running,
+       NULL
+};
+
+void tgl_l_uncore_mmio_init(void)
+{
+       tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning;
+       uncore_mmio_uncores = tgl_mmio_uncores;
+}
+
+void tgl_uncore_mmio_init(void)
+{
+       uncore_mmio_uncores = tgl_mmio_uncores;
+}
+
+/* end of Tiger Lake MMIO uncore support */