}
 
 static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
-                                 struct vm_area_struct *vma,
-                                 struct list_head *to_kill)
+               struct vm_area_struct *vma, struct list_head *to_kill,
+               unsigned long addr)
 {
-       unsigned long addr = page_address_in_vma(p, vma);
+       if (addr == -EFAULT)
+               return;
        __add_to_kill(tsk, p, vma, to_kill, addr);
 }
 
 static void collect_procs_anon(struct folio *folio, struct page *page,
                struct list_head *to_kill, int force_early)
 {
-       struct vm_area_struct *vma;
        struct task_struct *tsk;
        struct anon_vma *av;
        pgoff_t pgoff;
        pgoff = page_to_pgoff(page);
        rcu_read_lock();
        for_each_process(tsk) {
+               struct vm_area_struct *vma;
                struct anon_vma_chain *vmac;
                struct task_struct *t = task_early_kill(tsk, force_early);
+               unsigned long addr;
 
                if (!t)
                        continue;
                        vma = vmac->vma;
                        if (vma->vm_mm != t->mm)
                                continue;
-                       if (!page_mapped_in_vma(page, vma))
-                               continue;
-                       add_to_kill_anon_file(t, page, vma, to_kill);
+                       addr = page_mapped_in_vma(page, vma);
+                       add_to_kill_anon_file(t, page, vma, to_kill, addr);
                }
        }
        rcu_read_unlock();
        pgoff = page_to_pgoff(page);
        for_each_process(tsk) {
                struct task_struct *t = task_early_kill(tsk, force_early);
+               unsigned long addr;
 
                if (!t)
                        continue;
                         * Assume applications who requested early kill want
                         * to be informed of all such data corruptions.
                         */
-                       if (vma->vm_mm == t->mm)
-                               add_to_kill_anon_file(t, page, vma, to_kill);
+                       if (vma->vm_mm != t->mm)
+                               continue;
+                       addr = page_address_in_vma(page, vma);
+                       add_to_kill_anon_file(t, page, vma, to_kill, addr);
                }
        }
        rcu_read_unlock();
 
  * @page: the page to test
  * @vma: the VMA to test
  *
- * Returns 1 if the page is mapped into the page tables of the VMA, 0
- * if the page is not mapped into the page tables of this VMA.  Only
- * valid for normal file or anonymous VMAs.
+ * Return: The address the page is mapped at if the page is in the range
+ * covered by the VMA and present in the page table.  If the page is
+ * outside the VMA or not present, returns -EFAULT.
+ * Only valid for normal file or anonymous VMAs.
  */
-int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
+unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
 {
        struct folio *folio = page_folio(page);
        pgoff_t pgoff = folio->index + folio_page_idx(folio, page);
 
        pvmw.address = vma_address(vma, pgoff, 1);
        if (pvmw.address == -EFAULT)
-               return 0;
+               goto out;
        if (!page_vma_mapped_walk(&pvmw))
-               return 0;
+               return -EFAULT;
        page_vma_mapped_walk_done(&pvmw);
-       return 1;
+out:
+       return pvmw.address;
 }