* Iterate through all the IOMMUs to get common EFR
  * masks among all IOMMUs and warn if found inconsistency.
  */
-static void get_global_efr(void)
+static __init void get_global_efr(void)
 {
        struct amd_iommu *iommu;
 
        pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
 }
 
-static bool check_feature_on_all_iommus(u64 mask)
-{
-       return !!(amd_iommu_efr & mask);
-}
-
-static inline int check_feature_gpt_level(void)
-{
-       return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
-}
-
 /*
  * For IVHD type 0x11/0x40, EFR is also available via IVHD.
  * Default to IVHD EFR since it is available sooner
        u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
        u64 entry = start & PM_ADDR_MASK;
 
-       if (!check_feature_on_all_iommus(FEATURE_SNP))
+       if (!check_feature(FEATURE_SNP))
                return;
 
        /* Note:
        void *buf = (void *)__get_free_pages(gfp, order);
 
        if (buf &&
-           check_feature_on_all_iommus(FEATURE_SNP) &&
+           check_feature(FEATURE_SNP) &&
            set_memory_4k((unsigned long)buf, (1 << order))) {
                free_pages((unsigned long)buf, order);
                buf = NULL;
 
 static void iommu_enable_gt(struct amd_iommu *iommu)
 {
-       if (!iommu_feature(iommu, FEATURE_GT))
+       if (!check_feature(FEATURE_GT))
                return;
 
        iommu_feature_enable(iommu, CONTROL_GT_EN);
        u64 val;
        struct pci_dev *pdev = iommu->dev;
 
-       if (!iommu_feature(iommu, FEATURE_PC))
+       if (!check_feature(FEATURE_PC))
                return;
 
        amd_iommu_pc_present = true;
                                       struct device_attribute *attr,
                                       char *buf)
 {
-       struct amd_iommu *iommu = dev_to_amd_iommu(dev);
-       return sysfs_emit(buf, "%llx:%llx\n", iommu->features2, iommu->features);
+       return sysfs_emit(buf, "%llx:%llx\n", amd_iommu_efr, amd_iommu_efr2);
 }
 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
 
        features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
        features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
 
-       if (!iommu->features) {
-               iommu->features = features;
-               iommu->features2 = features2;
+       if (!amd_iommu_efr) {
+               amd_iommu_efr = features;
+               amd_iommu_efr2 = features2;
                return;
        }
 
         * Sanity check and warn if EFR values from
         * IVHD and MMIO conflict.
         */
-       if (features != iommu->features ||
-           features2 != iommu->features2) {
+       if (features != amd_iommu_efr ||
+           features2 != amd_iommu_efr2) {
                pr_warn(FW_WARN
                        "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
-                       features, iommu->features,
-                       features2, iommu->features2);
+                       features, amd_iommu_efr,
+                       features2, amd_iommu_efr2);
        }
 }
 
 
        late_iommu_features_init(iommu);
 
-       if (iommu_feature(iommu, FEATURE_GT)) {
+       if (check_feature(FEATURE_GT)) {
                int glxval;
                u32 max_pasid;
                u64 pasmax;
 
-               pasmax = iommu->features & FEATURE_PASID_MASK;
+               pasmax = amd_iommu_efr & FEATURE_PASID_MASK;
                pasmax >>= FEATURE_PASID_SHIFT;
                max_pasid  = (1 << (pasmax + 1)) - 1;
 
 
                BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
 
-               glxval   = iommu->features & FEATURE_GLXVAL_MASK;
+               glxval   = amd_iommu_efr & FEATURE_GLXVAL_MASK;
                glxval >>= FEATURE_GLXVAL_SHIFT;
 
                if (amd_iommu_max_glx_val == -1)
                        amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
        }
 
-       if (iommu_feature(iommu, FEATURE_GT) &&
-           iommu_feature(iommu, FEATURE_PPR)) {
+       if (check_feature(FEATURE_GT) &&
+           check_feature(FEATURE_PPR)) {
                iommu->is_iommu_v2   = true;
                amd_iommu_v2_present = true;
        }
 
-       if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
+       if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
                return -ENOMEM;
 
        if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
        init_iommu_perf_ctr(iommu);
 
        if (amd_iommu_pgtable == AMD_IOMMU_V2) {
-               if (!iommu_feature(iommu, FEATURE_GIOSUP) ||
-                   !iommu_feature(iommu, FEATURE_GT)) {
+               if (!check_feature(FEATURE_GIOSUP) ||
+                   !check_feature(FEATURE_GT)) {
                        pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
                        amd_iommu_pgtable = AMD_IOMMU_V1;
                }
 
 static void print_iommu_info(void)
 {
+       int i;
        static const char * const feat_str[] = {
                "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
                "IA", "GA", "HE", "PC"
        };
-       struct amd_iommu *iommu;
-
-       for_each_iommu(iommu) {
-               struct pci_dev *pdev = iommu->dev;
-               int i;
-
-               pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
 
-               if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
-                       pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2);
+       if (amd_iommu_efr) {
+               pr_info("Extended features (%#llx, %#llx):", amd_iommu_efr, amd_iommu_efr2);
 
-                       for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
-                               if (iommu_feature(iommu, (1ULL << i)))
-                                       pr_cont(" %s", feat_str[i]);
-                       }
+               for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
+                       if (check_feature(1ULL << i))
+                               pr_cont(" %s", feat_str[i]);
+               }
 
-                       if (iommu->features & FEATURE_GAM_VAPIC)
-                               pr_cont(" GA_vAPIC");
+               if (check_feature(FEATURE_GAM_VAPIC))
+                       pr_cont(" GA_vAPIC");
 
-                       if (iommu->features & FEATURE_SNP)
-                               pr_cont(" SNP");
+               if (check_feature(FEATURE_SNP))
+                       pr_cont(" SNP");
 
-                       pr_cont("\n");
-               }
+               pr_cont("\n");
        }
+
        if (irq_remapping_enabled) {
                pr_info("Interrupt remapping enabled\n");
                if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
        }
 
        if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
-           !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) {
+           !check_feature(FEATURE_GAM_VAPIC)) {
                amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
                return;
        }
                return -EINVAL;
        }
 
-       amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP);
+       amd_iommu_snp_en = check_feature(FEATURE_SNP);
        if (!amd_iommu_snp_en)
                return -EINVAL;