4.117 KVM_CLEAR_DIRTY_LOG (vm ioctl)
  
- Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
+ Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
 -Architectures: x86
 +Architectures: x86, arm, arm64, mips
  Type: vm ioctl
  Parameters: struct kvm_dirty_log (in)
  Returns: 0 on success, -1 on error
  * For the new DR6 bits, note that bit 16 is set iff the #DB exception
    will clear DR6.RTM.
  
- 7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
+ 7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
  
 -Architectures: all
 +Architectures: x86, arm, arm64, mips
  Parameters: args[0] whether feature should be enabled or not
  
  With this capability enabled, KVM_GET_DIRTY_LOG will not automatically
 
        pt_element_t *table;
        struct page *page;
  
 -      npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
 +      npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page);
-       /* Check if the user is doing something meaningless. */
-       if (unlikely(npages != 1))
-               return -EFAULT;
- 
-       table = kmap_atomic(page);
-       ret = CMPXCHG(&table[index], orig_pte, new_pte);
-       kunmap_atomic(table);
- 
-       kvm_release_page_dirty(page);
+       if (likely(npages == 1)) {
+               table = kmap_atomic(page);
+               ret = CMPXCHG(&table[index], orig_pte, new_pte);
+               kunmap_atomic(table);
+ 
+               kvm_release_page_dirty(page);
+       } else {
+               struct vm_area_struct *vma;
+               unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK;
+               unsigned long pfn;
+               unsigned long paddr;
+ 
+               down_read(¤t->mm->mmap_sem);
+               vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
+               if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
+                       up_read(¤t->mm->mmap_sem);
+                       return -EFAULT;
+               }
+               pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+               paddr = pfn << PAGE_SHIFT;
+               table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
+               if (!table) {
+                       up_read(¤t->mm->mmap_sem);
+                       return -EFAULT;
+               }
+               ret = CMPXCHG(&table[index], orig_pte, new_pte);
+               memunmap(table);
+               up_read(¤t->mm->mmap_sem);
+       }
  
        return (ret != orig_pte);
  }
 
  static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
                            unsigned short port)
  {
-       unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       unsigned long val = kvm_rax_read(vcpu);
        int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
                                            size, port, &val, 1);
 +      if (ret)
 +              return ret;
  
 -      if (!ret) {
 +      /*
 +       * Workaround userspace that relies on old KVM behavior of %rip being
 +       * incremented prior to exiting to userspace to handle "OUT 0x7e".
 +       */
 +      if (port == 0x7e &&
 +          kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
 +              vcpu->arch.complete_userspace_io =
 +                      complete_fast_pio_out_port_0x7e;
 +              kvm_skip_emulated_instruction(vcpu);
 +      } else {
                vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
                vcpu->arch.complete_userspace_io = complete_fast_pio_out;
        }