The page cache deletion paths all have interrupts enabled, so no need to
use irqsafe/irqrestore locking variants.
They used to have irqs disabled by the memcg lock added in commit
c4843a7593a9 ("memcg: add per cgroup dirty page accounting"), but that has
since been replaced by memcg taking the page lock instead, commit
0a31bc97c80c ("mm: memcontrol: rewrite uncharge AP").
Link: https://lkml.kernel.org/r/20210614211904.14420-1-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
 
 void delete_from_page_cache(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
-       unsigned long flags;
 
        BUG_ON(!PageLocked(page));
-       xa_lock_irqsave(&mapping->i_pages, flags);
+       xa_lock_irq(&mapping->i_pages);
        __delete_from_page_cache(page, NULL);
-       xa_unlock_irqrestore(&mapping->i_pages, flags);
+       xa_unlock_irq(&mapping->i_pages);
 
        page_cache_free_page(mapping, page);
 }
                                  struct pagevec *pvec)
 {
        int i;
-       unsigned long flags;
 
        if (!pagevec_count(pvec))
                return;
 
-       xa_lock_irqsave(&mapping->i_pages, flags);
+       xa_lock_irq(&mapping->i_pages);
        for (i = 0; i < pagevec_count(pvec); i++) {
                trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
 
                unaccount_page_cache_page(mapping, pvec->pages[i]);
        }
        page_cache_delete_batch(mapping, pvec);
-       xa_unlock_irqrestore(&mapping->i_pages, flags);
+       xa_unlock_irq(&mapping->i_pages);
 
        for (i = 0; i < pagevec_count(pvec); i++)
                page_cache_free_page(mapping, pvec->pages[i]);
        void (*freepage)(struct page *) = mapping->a_ops->freepage;
        pgoff_t offset = old->index;
        XA_STATE(xas, &mapping->i_pages, offset);
-       unsigned long flags;
 
        VM_BUG_ON_PAGE(!PageLocked(old), old);
        VM_BUG_ON_PAGE(!PageLocked(new), new);
 
        mem_cgroup_migrate(old, new);
 
-       xas_lock_irqsave(&xas, flags);
+       xas_lock_irq(&xas);
        xas_store(&xas, new);
 
        old->mapping = NULL;
                __dec_lruvec_page_state(old, NR_SHMEM);
        if (PageSwapBacked(new))
                __inc_lruvec_page_state(new, NR_SHMEM);
-       xas_unlock_irqrestore(&xas, flags);
+       xas_unlock_irq(&xas);
        if (freepage)
                freepage(old);
        put_page(old);
 
 static int
 invalidate_complete_page2(struct address_space *mapping, struct page *page)
 {
-       unsigned long flags;
-
        if (page->mapping != mapping)
                return 0;
 
        if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
                return 0;
 
-       xa_lock_irqsave(&mapping->i_pages, flags);
+       xa_lock_irq(&mapping->i_pages);
        if (PageDirty(page))
                goto failed;
 
        BUG_ON(page_has_private(page));
        __delete_from_page_cache(page, NULL);
-       xa_unlock_irqrestore(&mapping->i_pages, flags);
+       xa_unlock_irq(&mapping->i_pages);
 
        if (mapping->a_ops->freepage)
                mapping->a_ops->freepage(page);
        put_page(page); /* pagecache ref */
        return 1;
 failed:
-       xa_unlock_irqrestore(&mapping->i_pages, flags);
+       xa_unlock_irq(&mapping->i_pages);
        return 0;
 }
 
 
 static int __remove_mapping(struct address_space *mapping, struct page *page,
                            bool reclaimed, struct mem_cgroup *target_memcg)
 {
-       unsigned long flags;
        int refcount;
        void *shadow = NULL;
 
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
 
-       xa_lock_irqsave(&mapping->i_pages, flags);
+       xa_lock_irq(&mapping->i_pages);
        /*
         * The non racy check for a busy page.
         *
                if (reclaimed && !mapping_exiting(mapping))
                        shadow = workingset_eviction(page, target_memcg);
                __delete_from_swap_cache(page, swap, shadow);
-               xa_unlock_irqrestore(&mapping->i_pages, flags);
+               xa_unlock_irq(&mapping->i_pages);
                put_swap_page(page, swap);
        } else {
                void (*freepage)(struct page *);
                    !mapping_exiting(mapping) && !dax_mapping(mapping))
                        shadow = workingset_eviction(page, target_memcg);
                __delete_from_page_cache(page, shadow);
-               xa_unlock_irqrestore(&mapping->i_pages, flags);
+               xa_unlock_irq(&mapping->i_pages);
 
                if (freepage != NULL)
                        freepage(page);
        return 1;
 
 cannot_free:
-       xa_unlock_irqrestore(&mapping->i_pages, flags);
+       xa_unlock_irq(&mapping->i_pages);
        return 0;
 }