*
  * Returns: - 0 on success
  *          - -EINVAL if the gpa is not valid guest storage
- *          - -ENOMEM if out of memory
  */
 static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
 {
        struct page *page;
-       hva_t hva;
-       int rc;
 
-       hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
-       if (kvm_is_error_hva(hva))
+       page = gfn_to_page(kvm, gpa_to_gfn(gpa));
+       if (is_error_page(page))
                return -EINVAL;
-       rc = get_user_pages_fast(hva, 1, 1, &page);
-       if (rc < 0)
-               return rc;
-       else if (rc != 1)
-               return -ENOMEM;
        *hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
        return 0;
 }
 /* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
 static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
 {
-       struct page *page;
-
-       page = virt_to_page(hpa);
-       set_page_dirty_lock(page);
-       put_page(page);
+       kvm_release_pfn_dirty(hpa >> PAGE_SHIFT);
        /* mark the page always as dirty for migration */
        mark_page_dirty(kvm, gpa_to_gfn(gpa));
 }
                        rc = set_validity_icpt(scb_s, 0x003bU);
                if (!rc) {
                        rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
-                       if (rc == -EINVAL)
+                       if (rc)
                                rc = set_validity_icpt(scb_s, 0x0034U);
                }
                if (rc)
                }
                /* 256 bytes cannot cross page boundaries */
                rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
-               if (rc == -EINVAL)
+               if (rc) {
                        rc = set_validity_icpt(scb_s, 0x0080U);
-               if (rc)
                        goto unpin;
+               }
                scb_s->itdba = hpa;
        }
 
                 * if this block gets bigger, we have to shadow it.
                 */
                rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
-               if (rc == -EINVAL)
+               if (rc) {
                        rc = set_validity_icpt(scb_s, 0x1310U);
-               if (rc)
                        goto unpin;
+               }
                scb_s->gvrd = hpa;
        }
 
                }
                /* 64 bytes cannot cross page boundaries */
                rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
-               if (rc == -EINVAL)
+               if (rc) {
                        rc = set_validity_icpt(scb_s, 0x0043U);
-               /* Validity 0x0044 will be checked by SIE */
-               if (rc)
                        goto unpin;
+               }
+               /* Validity 0x0044 will be checked by SIE */
                scb_s->riccbd = hpa;
        }
        if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
                 * cross page boundaries
                 */
                rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
-               if (rc == -EINVAL)
+               if (rc) {
                        rc = set_validity_icpt(scb_s, 0x10b0U);
-               if (rc)
                        goto unpin;
+               }
                scb_s->sdnxo = hpa | sdnxc;
        }
        return 0;
  *
  * Returns: - 0 if the scb was pinned.
  *          - > 0 if control has to be given to guest 2
- *          - -ENOMEM if out of memory
  */
 static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
                   gpa_t gpa)
        int rc;
 
        rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
-       if (rc == -EINVAL) {
+       if (rc) {
                rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-               if (!rc)
-                       rc = 1;
+               WARN_ON_ONCE(rc);
+               return 1;
        }
-       if (!rc)
-               vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
-       return rc;
+       vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
+       return 0;
 }
 
 /*
 
 
 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
 
-static void kvm_release_pfn_dirty(kvm_pfn_t pfn);
 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
 
 __visible bool kvm_rebooting;
 }
 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
 
-static void kvm_release_pfn_dirty(kvm_pfn_t pfn)
+void kvm_release_pfn_dirty(kvm_pfn_t pfn)
 {
        kvm_set_pfn_dirty(pfn);
        kvm_release_pfn_clean(pfn);
 }
+EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
 
 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
 {