return 0;                               /* error */
 }
 
+static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
+{
+       unsigned int wimg = ptel & HPTE_R_WIMG;
+
+       /* Handle SAO */
+       if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
+           cpu_has_feature(CPU_FTR_ARCH_206))
+               wimg = HPTE_R_M;
+
+       if (!io_type)
+               return wimg == HPTE_R_M;
+
+       return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
+}
+
+/* Return HPTE cache control bits corresponding to Linux pte bits */
+static inline unsigned long hpte_cache_bits(unsigned long pte_val)
+{
+#if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
+       return pte_val & (HPTE_R_W | HPTE_R_I);
+#else
+       return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
+               ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
+#endif
+}
+
 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
                                   unsigned long pagesize)
 {
 
        struct page *page, *hpage, *pages[1];
        unsigned long s, pgsize;
        unsigned long *physp;
-       unsigned int got, pgorder;
+       unsigned int is_io, got, pgorder;
+       struct vm_area_struct *vma;
        unsigned long pfn, i, npages;
 
        physp = kvm->arch.slot_phys[memslot->id];
        if (physp[gfn - memslot->base_gfn])
                return 0;
 
+       is_io = 0;
+       got = 0;
        page = NULL;
        pgsize = psize;
+       err = -EINVAL;
        start = gfn_to_hva_memslot(memslot, gfn);
 
        /* Instantiate and get the page we want access to */
        np = get_user_pages_fast(start, 1, 1, pages);
-       if (np != 1)
-               return -EINVAL;
-       page = pages[0];
-       got = KVMPPC_GOT_PAGE;
+       if (np != 1) {
+               /* Look up the vma for the page */
+               down_read(¤t->mm->mmap_sem);
+               vma = find_vma(current->mm, start);
+               if (!vma || vma->vm_start > start ||
+                   start + psize > vma->vm_end ||
+                   !(vma->vm_flags & VM_PFNMAP))
+                       goto up_err;
+               is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
+               pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
+               /* check alignment of pfn vs. requested page size */
+               if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
+                       goto up_err;
+               up_read(¤t->mm->mmap_sem);
 
-       /* See if this is a large page */
-       s = PAGE_SIZE;
-       if (PageHuge(page)) {
-               hpage = compound_head(page);
-               s <<= compound_order(hpage);
-               /* Get the whole large page if slot alignment is ok */
-               if (s > psize && slot_is_aligned(memslot, s) &&
-                   !(memslot->userspace_addr & (s - 1))) {
-                       start &= ~(s - 1);
-                       pgsize = s;
-                       page = hpage;
+       } else {
+               page = pages[0];
+               got = KVMPPC_GOT_PAGE;
+
+               /* See if this is a large page */
+               s = PAGE_SIZE;
+               if (PageHuge(page)) {
+                       hpage = compound_head(page);
+                       s <<= compound_order(hpage);
+                       /* Get the whole large page if slot alignment is ok */
+                       if (s > psize && slot_is_aligned(memslot, s) &&
+                           !(memslot->userspace_addr & (s - 1))) {
+                               start &= ~(s - 1);
+                               pgsize = s;
+                               page = hpage;
+                       }
                }
+               if (s < psize)
+                       goto out;
+               pfn = page_to_pfn(page);
        }
-       err = -EINVAL;
-       if (s < psize)
-               goto out;
-       pfn = page_to_pfn(page);
 
        npages = pgsize >> PAGE_SHIFT;
        pgorder = __ilog2(npages);
        spin_lock(&kvm->arch.slot_phys_lock);
        for (i = 0; i < npages; ++i) {
                if (!physp[i]) {
-                       physp[i] = ((pfn + i) << PAGE_SHIFT) + got + pgorder;
+                       physp[i] = ((pfn + i) << PAGE_SHIFT) +
+                               got + is_io + pgorder;
                        got = 0;
                }
        }
                put_page(page);
        }
        return err;
+
+ up_err:
+       up_read(¤t->mm->mmap_sem);
+       return err;
 }
 
 /*
 
        unsigned long g_ptel = ptel;
        struct kvm_memory_slot *memslot;
        unsigned long *physp, pte_size;
+       unsigned long is_io;
        bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
 
        psize = hpte_page_size(pteh, ptel);
        pa = *physp;
        if (!pa)
                return H_TOO_HARD;
+       is_io = pa & (HPTE_R_I | HPTE_R_W);
        pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
        pa &= PAGE_MASK;
 
        ptel |= pa;
 
        /* Check WIMG */
-       if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
-           (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
-               return H_PARAMETER;
+       if (!hpte_cache_flags_ok(ptel, is_io)) {
+               if (is_io)
+                       return H_PARAMETER;
+               /*
+                * Allow guest to map emulated device memory as
+                * uncacheable, but actually make it cacheable.
+                */
+               ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
+               ptel |= HPTE_R_M;
+       }
        pteh &= ~0x60UL;
        pteh |= HPTE_V_VALID;