]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
uprobes: Use a folio instead of a page folio-mk-pte
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 20 Feb 2025 20:04:46 +0000 (15:04 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 25 Feb 2025 19:47:21 +0000 (14:47 -0500)
Allocate an order-0 folio instead of a page.  Saves a few calls to
compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
kernel/events/uprobes.c

index bf2a87a0a37878e99d0cf715ddedfdc2d1949c5d..74f695cd02a413ea8894dae18adef63d12ba55f1 100644 (file)
@@ -158,17 +158,16 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
  * @vma:      vma that holds the pte pointing to page
  * @addr:     address the old @page is mapped at
  * @old_page: the page we are replacing by new_page
- * @new_page: the modified page we replace page by
+ * @new_folio: the modified folio we replace @page with
  *
- * If @new_page is NULL, only unmap @old_page.
+ * If @new_folio is NULL, only unmap @old_page.
  *
  * Returns 0 on success, negative error code otherwise.
  */
 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
-                               struct page *old_page, struct page *new_page)
+                               struct page *old_page, struct folio *new_folio)
 {
        struct folio *old_folio = page_folio(old_page);
-       struct folio *new_folio;
        struct mm_struct *mm = vma->vm_mm;
        DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
        int err;
@@ -177,8 +176,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
                                addr + PAGE_SIZE);
 
-       if (new_page) {
-               new_folio = page_folio(new_page);
+       if (new_folio) {
                err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
                if (err)
                        return err;
@@ -193,7 +191,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
                goto unlock;
        VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
 
-       if (new_page) {
+       if (new_folio) {
                folio_get(new_folio);
                folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE);
                folio_add_lru_vma(new_folio, vma);
@@ -208,9 +206,9 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 
        flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
        ptep_clear_flush(vma, addr, pvmw.pte);
-       if (new_page)
+       if (new_folio)
                set_pte_at(mm, addr, pvmw.pte,
-                          mk_pte(new_page, vma->vm_page_prot));
+                          folio_mk_pte(new_folio, vma->vm_page_prot));
 
        folio_remove_rmap_pte(old_folio, old_page, vma);
        if (!folio_mapped(old_folio))
@@ -474,7 +472,8 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
                        unsigned long vaddr, uprobe_opcode_t opcode)
 {
        struct uprobe *uprobe;
-       struct page *old_page, *new_page;
+       struct page *old_page;
+       struct folio *new_folio;
        struct vm_area_struct *vma;
        int ret, is_register, ref_ctr_updated = 0;
        bool orig_page_huge = false;
@@ -519,13 +518,14 @@ retry:
                goto put_old;
 
        ret = -ENOMEM;
-       new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
-       if (!new_page)
+       new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
+       if (!new_folio)
                goto put_old;
 
-       __SetPageUptodate(new_page);
-       copy_highpage(new_page, old_page);
-       copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
+       copy_highpage(folio_page(new_folio, 0), old_page);
+       copy_to_page(folio_page(new_folio, 0), vaddr, &opcode,
+                       UPROBE_SWBP_INSN_SIZE);
+       __folio_mark_uptodate(new_folio);
 
        if (!is_register) {
                struct page *orig_page;
@@ -539,10 +539,11 @@ retry:
 
                if (orig_page) {
                        if (PageUptodate(orig_page) &&
-                           pages_identical(new_page, orig_page)) {
+                           pages_identical(folio_page(new_folio, 0),
+                                   orig_page)) {
                                /* let go new_page */
-                               put_page(new_page);
-                               new_page = NULL;
+                               folio_put(new_folio);
+                               new_folio = NULL;
 
                                if (PageCompound(orig_page))
                                        orig_page_huge = true;
@@ -551,9 +552,9 @@ retry:
                }
        }
 
-       ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page);
-       if (new_page)
-               put_page(new_page);
+       ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_folio);
+       if (new_folio)
+               folio_put(new_folio);
 put_old:
        put_page(old_page);