unsigned long addr, unsigned int nr_pages,
unsigned long *rss, unsigned short *mmap_miss)
{
+ unsigned int ref_from_caller = 1;
vm_fault_t ret = 0;
struct page *page = folio_page(folio, start);
unsigned int count = 0;
if (count) {
set_pte_range(vmf, folio, page, count, addr);
*rss += count;
- folio_ref_add(folio, count);
+ folio_ref_add(folio, count - ref_from_caller);
+ ref_from_caller = 0;
if (in_range(vmf->address, addr, count * PAGE_SIZE))
ret = VM_FAULT_NOPAGE;
}
if (count) {
set_pte_range(vmf, folio, page, count, addr);
*rss += count;
- folio_ref_add(folio, count);
+ folio_ref_add(folio, count - ref_from_caller);
+ ref_from_caller = 0;
if (in_range(vmf->address, addr, count * PAGE_SIZE))
ret = VM_FAULT_NOPAGE;
}
vmf->pte = old_ptep;
+ if (ref_from_caller)
+ /* Locked folios cannot get truncated. */
+ folio_ref_dec(folio);
return ret;
}
struct page *page = &folio->page;
if (PageHWPoison(page))
- return ret;
+ goto out;
/* See comment of filemap_map_folio_range() */
if (!folio_test_workingset(folio))
* the fault-around logic.
*/
if (!pte_none(ptep_get(vmf->pte)))
- return ret;
+ goto out;
if (vmf->address == addr)
ret = VM_FAULT_NOPAGE;
set_pte_range(vmf, folio, page, 1, addr);
(*rss)++;
- folio_ref_inc(folio);
+ return ret;
+out:
+ /* Locked folios cannot get truncated. */
+ folio_ref_dec(folio);
return ret;
}
nr_pages, &rss, &mmap_miss);
folio_unlock(folio);
- folio_put(folio);
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
add_mm_counter(vma->vm_mm, folio_type, rss);
pte_unmap_unlock(vmf->pte, vmf->ptl);