/*
  * X86 PAT uses page flags WC and Uncached together to keep track of
  * memory type of pages that have backing page struct. X86 PAT supports 3
- * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and
- * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
+ * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and
+ * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not
  * been changed from its default (value of -1 used to denote this).
- * Note we do not support _PAGE_CACHE_UC here.
+ * Note we do not support _PAGE_CACHE_MODE_UC here.
  */
 
 #define _PGMT_DEFAULT          0
 #define _PGMT_MASK             (1UL << PG_uncached | 1UL << PG_arch_1)
 #define _PGMT_CLEAR_MASK       (~_PGMT_MASK)
 
-static inline unsigned long get_page_memtype(struct page *pg)
+static inline enum page_cache_mode get_page_memtype(struct page *pg)
 {
        unsigned long pg_flags = pg->flags & _PGMT_MASK;
 
        if (pg_flags == _PGMT_DEFAULT)
                return -1;
        else if (pg_flags == _PGMT_WC)
-               return _PAGE_CACHE_WC;
+               return _PAGE_CACHE_MODE_WC;
        else if (pg_flags == _PGMT_UC_MINUS)
-               return _PAGE_CACHE_UC_MINUS;
+               return _PAGE_CACHE_MODE_UC_MINUS;
        else
-               return _PAGE_CACHE_WB;
+               return _PAGE_CACHE_MODE_WB;
 }
 
-static inline void set_page_memtype(struct page *pg, unsigned long memtype)
+static inline void set_page_memtype(struct page *pg,
+                                   enum page_cache_mode memtype)
 {
-       unsigned long memtype_flags = _PGMT_DEFAULT;
+       unsigned long memtype_flags;
        unsigned long old_flags;
        unsigned long new_flags;
 
        switch (memtype) {
-       case _PAGE_CACHE_WC:
+       case _PAGE_CACHE_MODE_WC:
                memtype_flags = _PGMT_WC;
                break;
-       case _PAGE_CACHE_UC_MINUS:
+       case _PAGE_CACHE_MODE_UC_MINUS:
                memtype_flags = _PGMT_UC_MINUS;
                break;
-       case _PAGE_CACHE_WB:
+       case _PAGE_CACHE_MODE_WB:
                memtype_flags = _PGMT_WB;
                break;
+       default:
+               memtype_flags = _PGMT_DEFAULT;
+               break;
        }
 
        do {
        } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
 }
 #else
-static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
-static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
+static inline enum page_cache_mode get_page_memtype(struct page *pg)
+{
+       return -1;
+}
+static inline void set_page_memtype(struct page *pg,
+                                   enum page_cache_mode memtype)
+{
+}
 #endif
 
 /*
 
  * The intersection is based on "Effective Memory Type" tables in IA-32
  * SDM vol 3a
  */
-static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
+static unsigned long pat_x_mtrr_type(u64 start, u64 end,
+                                    enum page_cache_mode req_type)
 {
        /*
         * Look for MTRR hint to get the effective type in case where PAT
         * request is for WB.
         */
-       if (req_type == _PAGE_CACHE_WB) {
+       if (req_type == _PAGE_CACHE_MODE_WB) {
                u8 mtrr_type;
 
                mtrr_type = mtrr_type_lookup(start, end);
                if (mtrr_type != MTRR_TYPE_WRBACK)
-                       return _PAGE_CACHE_UC_MINUS;
+                       return _PAGE_CACHE_MODE_UC_MINUS;
 
-               return _PAGE_CACHE_WB;
+               return _PAGE_CACHE_MODE_WB;
        }
 
        return req_type;
  * - Find the memtype of all the pages in the range, look for any conflicts
  * - In case of no conflicts, set the new memtype for pages in the range
  */
-static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
-                                 unsigned long *new_type)
+static int reserve_ram_pages_type(u64 start, u64 end,
+                                 enum page_cache_mode req_type,
+                                 enum page_cache_mode *new_type)
 {
        struct page *page;
        u64 pfn;
 
-       if (req_type == _PAGE_CACHE_UC) {
+       if (req_type == _PAGE_CACHE_MODE_UC) {
                /* We do not support strong UC */
                WARN_ON_ONCE(1);
-               req_type = _PAGE_CACHE_UC_MINUS;
+               req_type = _PAGE_CACHE_MODE_UC_MINUS;
        }
 
        for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
-               unsigned long type;
+               enum page_cache_mode type;
 
                page = pfn_to_page(pfn);
                type = get_page_memtype(page);
                if (type != -1) {
-                       printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
+                       pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
                                start, end - 1, type, req_type);
                        if (new_type)
                                *new_type = type;
 
 /*
  * req_type typically has one of the:
- * - _PAGE_CACHE_WB
- * - _PAGE_CACHE_WC
- * - _PAGE_CACHE_UC_MINUS
- * - _PAGE_CACHE_UC
+ * - _PAGE_CACHE_MODE_WB
+ * - _PAGE_CACHE_MODE_WC
+ * - _PAGE_CACHE_MODE_UC_MINUS
+ * - _PAGE_CACHE_MODE_UC
  *
  * If new_type is NULL, function will return an error if it cannot reserve the
  * region with req_type. If new_type is non-NULL, function will return
  * available type in new_type in case of no error. In case of any error
  * it will return a negative return value.
  */
-int reserve_memtype(u64 start, u64 end, unsigned long req_type,
-                   unsigned long *new_type)
+int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
+                   enum page_cache_mode *new_type)
 {
        struct memtype *new;
-       unsigned long actual_type;
+       enum page_cache_mode actual_type;
        int is_range_ram;
        int err = 0;
 
        if (!pat_enabled) {
                /* This is identical to page table setting without PAT */
                if (new_type) {
-                       if (req_type == _PAGE_CACHE_WC)
-                               *new_type = _PAGE_CACHE_UC_MINUS;
+                       if (req_type == _PAGE_CACHE_MODE_WC)
+                               *new_type = _PAGE_CACHE_MODE_UC_MINUS;
                        else
-                               *new_type = req_type & _PAGE_CACHE_MASK;
+                               *new_type = req_type;
                }
                return 0;
        }
        /* Low ISA region is always mapped WB in page table. No need to track */
        if (x86_platform.is_untracked_pat_range(start, end)) {
                if (new_type)
-                       *new_type = _PAGE_CACHE_WB;
+                       *new_type = _PAGE_CACHE_MODE_WB;
                return 0;
        }
 
         * tools and ACPI tools). Use WB request for WB memory and use
         * UC_MINUS otherwise.
         */
-       actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
+       actual_type = pat_x_mtrr_type(start, end, req_type);
 
        if (new_type)
                *new_type = actual_type;
        if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
                struct page *page;
                page = pfn_to_page(paddr >> PAGE_SHIFT);
-               rettype = pgprot2cachemode(__pgprot(get_page_memtype(page)));
+               rettype = get_page_memtype(page);
                /*
                 * -1 from get_page_memtype() implies RAM page is in its
                 * default state and not reserved, and hence of type WB
 
        entry = rbt_memtype_lookup(paddr);
        if (entry != NULL)
-               rettype = pgprot2cachemode(__pgprot(entry->type));
+               rettype = entry->type;
        else
                rettype = _PAGE_CACHE_MODE_UC_MINUS;
 
        resource_size_t size = end - start;
        enum page_cache_mode req_type = *type;
        enum page_cache_mode new_type;
-       unsigned long new_prot;
        int ret;
 
        WARN_ON_ONCE(iomem_map_sanity_check(start, size));
 
-       ret = reserve_memtype(start, end, cachemode2protval(req_type),
-                               &new_prot);
+       ret = reserve_memtype(start, end, req_type, &new_type);
        if (ret)
                goto out_err;
 
-       new_type = pgprot2cachemode(__pgprot(new_prot));
-
        if (!is_new_memtype_allowed(start, size, req_type, new_type))
                goto out_free;
 
 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
                                unsigned long size, pgprot_t *vma_prot)
 {
-       unsigned long flags = _PAGE_CACHE_WB;
+       enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
 
        if (!range_is_allowed(pfn, size))
                return 0;
 
        if (file->f_flags & O_DSYNC)
-               flags = _PAGE_CACHE_UC_MINUS;
+               pcm = _PAGE_CACHE_MODE_UC_MINUS;
 
 #ifdef CONFIG_X86_32
        /*
              boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
              boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
            (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
-               flags = _PAGE_CACHE_UC;
+               pcm = _PAGE_CACHE_MODE_UC;
        }
 #endif
 
        *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
-                            flags);
+                            cachemode2protval(pcm));
        return 1;
 }
 
                printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
                        "for [mem %#010Lx-%#010Lx]\n",
                        current->comm, current->pid,
-                       cattr_name(cachemode2protval(pcm)),
+                       cattr_name(pcm),
                        base, (unsigned long long)(base + size-1));
                return -EINVAL;
        }
 {
        int is_ram = 0;
        int ret;
-       unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
-       unsigned long flags = want_flags;
+       enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
+       enum page_cache_mode pcm = want_pcm;
 
        is_ram = pat_pagerange_is_ram(paddr, paddr + size);
 
                if (!pat_enabled)
                        return 0;
 
-               flags = cachemode2protval(lookup_memtype(paddr));
-               if (want_flags != flags) {
+               pcm = lookup_memtype(paddr);
+               if (want_pcm != pcm) {
                        printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
                                current->comm, current->pid,
-                               cattr_name(want_flags),
+                               cattr_name(want_pcm),
                                (unsigned long long)paddr,
                                (unsigned long long)(paddr + size - 1),
-                               cattr_name(flags));
+                               cattr_name(pcm));
                        *vma_prot = __pgprot((pgprot_val(*vma_prot) &
-                                             (~_PAGE_CACHE_MASK)) |
-                                            flags);
+                                            (~_PAGE_CACHE_MASK)) |
+                                            cachemode2protval(pcm));
                }
                return 0;
        }
 
-       ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
+       ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
        if (ret)
                return ret;
 
-       if (flags != want_flags) {
+       if (pcm != want_pcm) {
                if (strict_prot ||
-                   !is_new_memtype_allowed(paddr, size,
-                               pgprot2cachemode(__pgprot(want_flags)),
-                               pgprot2cachemode(__pgprot(flags)))) {
+                   !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
                        free_memtype(paddr, paddr + size);
                        printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
                                " for [mem %#010Lx-%#010Lx], got %s\n",
                                current->comm, current->pid,
-                               cattr_name(want_flags),
+                               cattr_name(want_pcm),
                                (unsigned long long)paddr,
                                (unsigned long long)(paddr + size - 1),
-                               cattr_name(flags));
+                               cattr_name(pcm));
                        return -EINVAL;
                }
                /*
                 */
                *vma_prot = __pgprot((pgprot_val(*vma_prot) &
                                      (~_PAGE_CACHE_MASK)) |
-                                    flags);
+                                    cachemode2protval(pcm));
        }
 
-       if (kernel_map_sync_memtype(paddr, size,
-                                   pgprot2cachemode(__pgprot(flags))) < 0) {
+       if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
                free_memtype(paddr, paddr + size);
                return -EINVAL;
        }
 pgprot_t pgprot_writecombine(pgprot_t prot)
 {
        if (pat_enabled)
-               return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
+               return __pgprot(pgprot_val(prot) |
+                               cachemode2protval(_PAGE_CACHE_MODE_WC));
        else
                return pgprot_noncached(prot);
 }