driver::
 
        int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
-                                 int npage, int prot, unsigned long *phys_pfn);
+                                 int npage, int prot, struct page **pages);
 
        void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova,
                                    int npage);
 
                unsigned long size, struct page **page)
 {
        int total_pages = DIV_ROUND_UP(size, PAGE_SIZE);
-       unsigned long base_pfn = 0;
+       struct page *base_page = NULL;
        int npage;
        int ret;
 
         */
        for (npage = 0; npage < total_pages; npage++) {
                dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT;
-               unsigned long pfn;
+               struct page *cur_page;
 
                ret = vfio_pin_pages(&vgpu->vfio_device, cur_iova, 1,
-                                    IOMMU_READ | IOMMU_WRITE, &pfn);
+                                    IOMMU_READ | IOMMU_WRITE, &cur_page);
                if (ret != 1) {
                        gvt_vgpu_err("vfio_pin_pages failed for iova %pad, ret %d\n",
                                     &cur_iova, ret);
                        goto err;
                }
 
-               if (!pfn_valid(pfn)) {
-                       gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
-                       npage++;
-                       ret = -EFAULT;
-                       goto err;
-               }
-
                if (npage == 0)
-                       base_pfn = pfn;
-               else if (base_pfn + npage != pfn) {
+                       base_page = cur_page;
+               else if (base_page + npage != cur_page) {
                        gvt_vgpu_err("The pages are not continuous\n");
                        ret = -EINVAL;
                        npage++;
                }
        }
 
-       *page = pfn_to_page(base_pfn);
+       *page = base_page;
        return 0;
 err:
        gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
 
 struct page_array {
        /* Array that stores pages need to pin. */
        dma_addr_t              *pa_iova;
-       /* Array that receives PFNs of the pages pinned. */
-       unsigned long           *pa_pfn;
+       /* Array that receives the pinned pages. */
+       struct page             **pa_page;
        /* Number of pages pinned from @pa_iova. */
        int                     pa_nr;
 };
                return -EINVAL;
 
        pa->pa_iova = kcalloc(pa->pa_nr,
-                             sizeof(*pa->pa_iova) + sizeof(*pa->pa_pfn),
+                             sizeof(*pa->pa_iova) + sizeof(*pa->pa_page),
                              GFP_KERNEL);
        if (unlikely(!pa->pa_iova)) {
                pa->pa_nr = 0;
                return -ENOMEM;
        }
-       pa->pa_pfn = (unsigned long *)&pa->pa_iova[pa->pa_nr];
+       pa->pa_page = (struct page **)&pa->pa_iova[pa->pa_nr];
 
        pa->pa_iova[0] = iova;
-       pa->pa_pfn[0] = -1ULL;
+       pa->pa_page[0] = NULL;
        for (i = 1; i < pa->pa_nr; i++) {
                pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE;
-               pa->pa_pfn[i] = -1ULL;
+               pa->pa_page[i] = NULL;
        }
 
        return 0;
 
                ret = vfio_pin_pages(vdev, *first, npage,
                                     IOMMU_READ | IOMMU_WRITE,
-                                    &pa->pa_pfn[pinned]);
+                                    &pa->pa_page[pinned]);
                if (ret < 0) {
                        goto err_out;
                } else if (ret > 0 && ret != npage) {
         */
 
        for (i = 0; i < pa->pa_nr; i++)
-               idaws[i] = pa->pa_pfn[i] << PAGE_SHIFT;
+               idaws[i] = page_to_phys(pa->pa_page[i]);
 
        /* Adjust the first IDAW, since it may not start on a page boundary */
        idaws[0] += pa->pa_iova[0] & (PAGE_SIZE - 1);
 
        l = n;
        for (i = 0; i < pa.pa_nr; i++) {
-               struct page *page = pfn_to_page(pa.pa_pfn[i]);
-               void *from = kmap_local_page(page);
+               void *from = kmap_local_page(pa.pa_page[i]);
 
                m = PAGE_SIZE;
                if (i == 0) {
 
        struct ap_qirq_ctrl aqic_gisa = {};
        struct ap_queue_status status = {};
        struct kvm_s390_gisa *gisa;
+       struct page *h_page;
        int nisc;
        struct kvm *kvm;
-       unsigned long h_pfn;
        phys_addr_t h_nib;
        dma_addr_t nib;
        int ret;
        }
 
        ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
-                            IOMMU_READ | IOMMU_WRITE, &h_pfn);
+                            IOMMU_READ | IOMMU_WRITE, &h_page);
        switch (ret) {
        case 1:
                break;
        kvm = q->matrix_mdev->kvm;
        gisa = kvm->arch.gisa_int.origin;
 
-       h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
+       h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
        aqic_gisa.gisc = isc;
 
        nisc = kvm_s390_gisc_register(kvm, isc);
 
  * @npage [in]   : count of pages to be pinned.  This count should not
  *                be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
  * @prot [in]    : protection flags
- * @phys_pfn[out]: array of host PFNs
+ * @pages[out]   : array of host pages
  * Return error or number of pages pinned.
  */
 int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
-                  int npage, int prot, unsigned long *phys_pfn)
+                  int npage, int prot, struct page **pages)
 {
        struct vfio_container *container;
        struct vfio_group *group = device->group;
        struct vfio_iommu_driver *driver;
        int ret;
 
-       if (!phys_pfn || !npage || !vfio_assert_device_open(device))
+       if (!pages || !npage || !vfio_assert_device_open(device))
                return -EINVAL;
 
        if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
        if (likely(driver && driver->ops->pin_pages))
                ret = driver->ops->pin_pages(container->iommu_data,
                                             group->iommu_group, iova,
-                                            npage, prot, phys_pfn);
+                                            npage, prot, pages);
        else
                ret = -ENOTTY;
 
 
                                     struct iommu_group *group,
                                     dma_addr_t user_iova,
                                     int npage, int prot,
-                                    unsigned long *phys_pfn);
+                                    struct page **pages);
        void            (*unpin_pages)(void *iommu_data,
                                       dma_addr_t user_iova, int npage);
        void            (*register_device)(void *iommu_data,
 
                                      struct iommu_group *iommu_group,
                                      dma_addr_t user_iova,
                                      int npage, int prot,
-                                     unsigned long *phys_pfn)
+                                     struct page **pages)
 {
        struct vfio_iommu *iommu = iommu_data;
        struct vfio_iommu_group *group;
        bool do_accounting;
        dma_addr_t iova;
 
-       if (!iommu || !phys_pfn)
+       if (!iommu || !pages)
                return -EINVAL;
 
        /* Supported for v2 version only */
        do_accounting = list_empty(&iommu->domain_list);
 
        for (i = 0; i < npage; i++) {
+               unsigned long phys_pfn;
                struct vfio_pfn *vpfn;
 
                iova = user_iova + PAGE_SIZE * i;
 
                vpfn = vfio_iova_get_vfio_pfn(dma, iova);
                if (vpfn) {
-                       phys_pfn[i] = vpfn->pfn;
+                       pages[i] = pfn_to_page(vpfn->pfn);
                        continue;
                }
 
                remote_vaddr = dma->vaddr + (iova - dma->iova);
-               ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
+               ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn,
                                             do_accounting);
                if (ret)
                        goto pin_unwind;
 
-               ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
+               ret = vfio_add_to_pfn_list(dma, iova, phys_pfn);
                if (ret) {
-                       if (put_pfn(phys_pfn[i], dma->prot) && do_accounting)
+                       if (put_pfn(phys_pfn, dma->prot) && do_accounting)
                                vfio_lock_acct(dma, -1, true);
                        goto pin_unwind;
                }
 
+               pages[i] = pfn_to_page(phys_pfn);
+
                if (iommu->dirty_page_tracking) {
                        unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
 
        goto pin_done;
 
 pin_unwind:
-       phys_pfn[i] = 0;
+       pages[i] = NULL;
        for (j = 0; j < i; j++) {
                dma_addr_t iova;
 
                iova = user_iova + PAGE_SIZE * j;
                dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
                vfio_unpin_page_external(dma, iova, do_accounting);
-               phys_pfn[j] = 0;
+               pages[j] = NULL;
        }
 pin_done:
        mutex_unlock(&iommu->lock);
 
 #define VFIO_PIN_PAGES_MAX_ENTRIES     (PAGE_SIZE/sizeof(unsigned long))
 
 int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
-                  int npage, int prot, unsigned long *phys_pfn);
+                  int npage, int prot, struct page **pages);
 void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage);
 int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova,
                void *data, size_t len, bool write);