{
struct mempolicy *pol;
pgoff_t ilx;
- struct page *page;
+ struct folio *folio;
+ if (vma->vm_flags & VM_DROPPABLE)
+ gfp |= __GFP_NOWARN;
+
pol = get_vma_policy(vma, addr, order, &ilx);
- page = alloc_pages_mpol_noprof(gfp | __GFP_COMP, order,
- pol, ilx, numa_node_id());
+ folio = folio_alloc_mpol_noprof(gfp, order, pol, ilx, numa_node_id());
mpol_cond_put(pol);
- return page_rmappable_folio(page);
+ return folio;
}
EXPORT_SYMBOL(vma_alloc_folio_noprof);
*
* Like folio_add_anon_rmap_*() but must only be called on *new* folios.
* This means the inc-and-test can be bypassed.
- * The folio does not have to be locked.
+ * The folio doesn't necessarily need to be locked while it's exclusive
+ * unless two threads map it concurrently. However, the folio must be
+ * locked if it's shared.
*
- * If the folio is pmd-mappable, it is accounted as a THP. As the folio
- * is new, it's assumed to be mapped exclusively by a single process.
+ * If the folio is pmd-mappable, it is accounted as a THP.
*/
void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
- unsigned long address)
+ unsigned long address, rmap_t flags)
{
- int nr = folio_nr_pages(folio);
+ const int nr = folio_nr_pages(folio);
+ const bool exclusive = flags & RMAP_EXCLUSIVE;
+ int nr_pmdmapped = 0;
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio);
VM_BUG_ON_VMA(address < vma->vm_start ||
address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
- if (!folio_test_swapbacked(folio))
+
- if (!(vma->vm_flags & VM_DROPPABLE))
+ /*
+ * VM_DROPPABLE mappings don't swap; instead they're just dropped when
+ * under memory pressure.
+ */
++ if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE))
__folio_set_swapbacked(folio);
- __folio_set_anon(folio, vma, address, true);
+ __folio_set_anon(folio, vma, address, exclusive);
if (likely(!folio_test_large(folio))) {
/* increment count (starts at -1) */
* discarded. Remap the page to page table.
*/
set_pte_at(mm, address, pvmw.pte, pteval);
- folio_set_swapbacked(folio);
+ /*
+ * Unlike MADV_FREE mappings, VM_DROPPABLE ones
+ * never get swap backed on failure to drop.
+ */
+ if (!(vma->vm_flags & VM_DROPPABLE))
+ folio_set_swapbacked(folio);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- break;
+ goto walk_abort;
}
if (swap_duplicate(entry) < 0) {