#define ARM_CPU_IMP_ARM                        0x41
 #define ARM_CPU_IMP_INTEL              0x69
 
-#define ARM_CPU_PART_ARM1136           0xB360
-#define ARM_CPU_PART_ARM1156           0xB560
-#define ARM_CPU_PART_ARM1176           0xB760
-#define ARM_CPU_PART_ARM11MPCORE       0xB020
-#define ARM_CPU_PART_CORTEX_A8         0xC080
-#define ARM_CPU_PART_CORTEX_A9         0xC090
-#define ARM_CPU_PART_CORTEX_A5         0xC050
-#define ARM_CPU_PART_CORTEX_A15                0xC0F0
-#define ARM_CPU_PART_CORTEX_A7         0xC070
-#define ARM_CPU_PART_CORTEX_A12                0xC0D0
-#define ARM_CPU_PART_CORTEX_A17                0xC0E0
+/* ARM implemented processors */
+#define ARM_CPU_PART_ARM1136           0x4100b360
+#define ARM_CPU_PART_ARM1156           0x4100b560
+#define ARM_CPU_PART_ARM1176           0x4100b760
+#define ARM_CPU_PART_ARM11MPCORE       0x4100b020
+#define ARM_CPU_PART_CORTEX_A8         0x4100c080
+#define ARM_CPU_PART_CORTEX_A9         0x4100c090
+#define ARM_CPU_PART_CORTEX_A5         0x4100c050
+#define ARM_CPU_PART_CORTEX_A7         0x4100c070
+#define ARM_CPU_PART_CORTEX_A12                0x4100c0d0
+#define ARM_CPU_PART_CORTEX_A17                0x4100c0e0
+#define ARM_CPU_PART_CORTEX_A15                0x4100c0f0
 
 #define ARM_CPU_XSCALE_ARCH_MASK       0xe000
 #define ARM_CPU_XSCALE_ARCH_V1         0x2000
        return (read_cpuid_id() & 0xFF000000) >> 24;
 }
 
-static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
+/*
+ * The CPU part number is meaningless without referring to the CPU
+ * implementer: implementers are free to define their own part numbers
+ * which are permitted to clash with other implementer part numbers.
+ */
+static inline unsigned int __attribute_const__ read_cpuid_part(void)
+{
+       return read_cpuid_id() & 0xff00fff0;
+}
+
+static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
 {
        return read_cpuid_id() & 0xFFF0;
 }
 
 static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
 {
-       return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK;
+       return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
 }
 
 static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
 
 
 static inline bool scu_a9_has_base(void)
 {
-       return read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
+       return read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
 }
 
 static inline unsigned long scu_a9_get_base(void)
 
 static int probe_current_pmu(struct arm_pmu *pmu)
 {
        int cpu = get_cpu();
-       unsigned long implementor = read_cpuid_implementor();
-       unsigned long part_number = read_cpuid_part_number();
        int ret = -ENODEV;
 
        pr_info("probing PMU on CPU %d\n", cpu);
 
+       switch (read_cpuid_part()) {
        /* ARM Ltd CPUs. */
-       if (implementor == ARM_CPU_IMP_ARM) {
-               switch (part_number) {
-               case ARM_CPU_PART_ARM1136:
-               case ARM_CPU_PART_ARM1156:
-               case ARM_CPU_PART_ARM1176:
-                       ret = armv6pmu_init(pmu);
-                       break;
-               case ARM_CPU_PART_ARM11MPCORE:
-                       ret = armv6mpcore_pmu_init(pmu);
-                       break;
-               case ARM_CPU_PART_CORTEX_A8:
-                       ret = armv7_a8_pmu_init(pmu);
-                       break;
-               case ARM_CPU_PART_CORTEX_A9:
-                       ret = armv7_a9_pmu_init(pmu);
-                       break;
-               }
-       /* Intel CPUs [xscale]. */
-       } else if (implementor == ARM_CPU_IMP_INTEL) {
-               switch (xscale_cpu_arch_version()) {
-               case ARM_CPU_XSCALE_ARCH_V1:
-                       ret = xscale1pmu_init(pmu);
-                       break;
-               case ARM_CPU_XSCALE_ARCH_V2:
-                       ret = xscale2pmu_init(pmu);
-                       break;
+       case ARM_CPU_PART_ARM1136:
+       case ARM_CPU_PART_ARM1156:
+       case ARM_CPU_PART_ARM1176:
+               ret = armv6pmu_init(pmu);
+               break;
+       case ARM_CPU_PART_ARM11MPCORE:
+               ret = armv6mpcore_pmu_init(pmu);
+               break;
+       case ARM_CPU_PART_CORTEX_A8:
+               ret = armv7_a8_pmu_init(pmu);
+               break;
+       case ARM_CPU_PART_CORTEX_A9:
+               ret = armv7_a9_pmu_init(pmu);
+               break;
+
+       default:
+               if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
+                       switch (xscale_cpu_arch_version()) {
+                       case ARM_CPU_XSCALE_ARCH_V1:
+                               ret = xscale1pmu_init(pmu);
+                               break;
+                       case ARM_CPU_XSCALE_ARCH_V2:
+                               ret = xscale2pmu_init(pmu);
+                               break;
+                       }
                }
+               break;
        }
 
        put_cpu();
 
 
 int __attribute_const__ kvm_target_cpu(void)
 {
-       unsigned long implementor = read_cpuid_implementor();
-       unsigned long part_number = read_cpuid_part_number();
-
-       if (implementor != ARM_CPU_IMP_ARM)
-               return -EINVAL;
-
-       switch (part_number) {
+       switch (read_cpuid_part()) {
        case ARM_CPU_PART_CORTEX_A7:
                return KVM_ARM_TARGET_CORTEX_A7;
        case ARM_CPU_PART_CORTEX_A15:
 
        if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
                arch_spin_unlock(&exynos_mcpm_lock);
 
-               if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
+               if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
                        /*
                         * On the Cortex-A15 we need to disable
                         * L2 prefetching before flushing the cache.
 
 static void __init exynos_cache_off(void)
 {
-       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
+       if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
                /* disable L2 prefetching on the Cortex-A15 */
                asm volatile(
                "mcr    p15, 1, %0, c15, c0, 3\n\t"
 
        void __iomem *scu_base = scu_base_addr();
        unsigned int i, ncores;
 
-       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
+       if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
                ncores = scu_base ? scu_get_core_count(scu_base) : 1;
        else
                /*
 
        exynos_sysram_init();
 
-       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
+       if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
                scu_enable(scu_base_addr());
 
        /*
 
        tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
        __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
 
-       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
+       if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
                exynos_cpu_save_register();
 
        return 0;
        if (exynos_pm_central_resume())
                goto early_wakeup;
 
-       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
+       if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
                exynos_cpu_restore_register();
 
        /* For release retention */
 
        s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
 
-       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
+       if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
                scu_enable(S5P_VA_SCU);
 
 early_wakeup:
        case CPU_PM_ENTER:
                if (cpu == 0) {
                        exynos_pm_central_suspend();
-                       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
+                       if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
                                exynos_cpu_save_register();
                }
                break;
 
        case CPU_PM_EXIT:
                if (cpu == 0) {
-                       if (read_cpuid_part_number() ==
-                                       ARM_CPU_PART_CORTEX_A9) {
+                       if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
                                scu_enable(S5P_VA_SCU);
                                exynos_cpu_restore_register();
                        }
 
        if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
                arch_spin_unlock(&tc2_pm_lock);
 
-               if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
+               if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
                        /*
                         * On the Cortex-A15 we need to disable
                         * L2 prefetching before flushing the cache.
 static void __init tc2_cache_off(void)
 {
        pr_info("TC2: disabling cache during MCPM loopback test\n");
-       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
+       if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
                /* disable L2 prefetching on the Cortex-A15 */
                asm volatile(
                "mcr    p15, 1, %0, c15, c0, 3 \n\t"
 
 static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
 {
        unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
-       bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
+       bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
 
        if (rev >= L310_CACHE_ID_RTL_R2P0) {
                if (cortex_a9) {
 
         * fire when the timer value is greater than or equal to. In previous
         * revisions the comparators fired when the timer value was equal to.
         */
-       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9
+       if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
            && (read_cpuid_id() & 0xf0000f) < 0x200000) {
                pr_warn("global-timer: non support for this cpu version.\n");
                return;