]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Merge remote-tracking branch 'folio/for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 2 Sep 2021 04:27:19 +0000 (14:27 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 2 Sep 2021 04:27:19 +0000 (14:27 +1000)
# Conflicts:
# mm/filemap.c
# mm/rmap.c
# mm/util.c

24 files changed:
1  2 
fs/afs/write.c
fs/io_uring.c
include/linux/backing-dev.h
include/linux/highmem.h
include/linux/memcontrol.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/netfs.h
include/linux/page-flags.h
include/linux/writeback.h
kernel/bpf/verifier.c
mm/filemap.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/migrate.c
mm/page-writeback.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/swap_state.c
mm/util.c
mm/vmscan.c

diff --cc fs/afs/write.c
Simple merge
diff --cc fs/io_uring.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc mm/filemap.c
index 920e8dc03251d51469d068e47689c842b37ed24a,83d0e720054ce631d5cd24c7e21063a6a48f4856..03d488fcb893585861d0bd2b5d28dc76ea4bcedd
@@@ -1017,54 -997,16 +1017,54 @@@ struct folio *filemap_alloc_folio(gfp_
                do {
                        cpuset_mems_cookie = read_mems_allowed_begin();
                        n = cpuset_mem_spread_node();
-                       page = __alloc_pages_node(n, gfp, 0);
-               } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
+                       folio = __folio_alloc_node(gfp, order, n);
+               } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
  
-               return page;
+               return folio;
        }
-       return alloc_pages(gfp, 0);
+       return folio_alloc(gfp, order);
  }
- EXPORT_SYMBOL(__page_cache_alloc);
+ EXPORT_SYMBOL(filemap_alloc_folio);
  #endif
  
 +/*
 + * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
 + *
 + * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
 + *
 + * @mapping1: the first mapping to lock
 + * @mapping2: the second mapping to lock
 + */
 +void filemap_invalidate_lock_two(struct address_space *mapping1,
 +                               struct address_space *mapping2)
 +{
 +      if (mapping1 > mapping2)
 +              swap(mapping1, mapping2);
 +      if (mapping1)
 +              down_write(&mapping1->invalidate_lock);
 +      if (mapping2 && mapping1 != mapping2)
 +              down_write_nested(&mapping2->invalidate_lock, 1);
 +}
 +EXPORT_SYMBOL(filemap_invalidate_lock_two);
 +
 +/*
 + * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
 + *
 + * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
 + *
 + * @mapping1: the first mapping to unlock
 + * @mapping2: the second mapping to unlock
 + */
 +void filemap_invalidate_unlock_two(struct address_space *mapping1,
 +                                 struct address_space *mapping2)
 +{
 +      if (mapping1)
 +              up_write(&mapping1->invalidate_lock);
 +      if (mapping2 && mapping1 != mapping2)
 +              up_write(&mapping2->invalidate_lock);
 +}
 +EXPORT_SYMBOL(filemap_invalidate_unlock_two);
 +
  /*
   * In order to wait for pages to become available there must be
   * waitqueues associated with pages. By using a hash table of
@@@ -2424,49 -2362,42 +2420,50 @@@ static int filemap_update_page(struct k
                struct address_space *mapping, struct iov_iter *iter,
                struct page *page)
  {
+       struct folio *folio = page_folio(page);
        int error;
  
-       if (!trylock_page(page)) {
 +      if (iocb->ki_flags & IOCB_NOWAIT) {
 +              if (!filemap_invalidate_trylock_shared(mapping))
 +                      return -EAGAIN;
 +      } else {
 +              filemap_invalidate_lock_shared(mapping);
 +      }
 +
+       if (!folio_trylock(folio)) {
 +              error = -EAGAIN;
                if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
 -                      return -EAGAIN;
 +                      goto unlock_mapping;
                if (!(iocb->ki_flags & IOCB_WAITQ)) {
-                       put_and_wait_on_page_locked(page, TASK_KILLABLE);
 +                      filemap_invalidate_unlock_shared(mapping);
+                       put_and_wait_on_page_locked(&folio->page, TASK_KILLABLE);
                        return AOP_TRUNCATED_PAGE;
                }
-               error = __lock_page_async(page, iocb->ki_waitq);
+               error = __folio_lock_async(folio, iocb->ki_waitq);
                if (error)
 -                      return error;
 +                      goto unlock_mapping;
        }
  
-       if (!page->mapping)
 +      error = AOP_TRUNCATED_PAGE;
 -              goto truncated;
+       if (!folio->mapping)
 +              goto unlock;
  
        error = 0;
-       if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, page))
+       if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, &folio->page))
                goto unlock;
  
        error = -EAGAIN;
        if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
                goto unlock;
  
-       error = filemap_read_page(iocb->ki_filp, mapping, page);
+       error = filemap_read_page(iocb->ki_filp, mapping, &folio->page);
 -      if (error == AOP_TRUNCATED_PAGE)
 -              folio_put(folio);
 -      return error;
 -truncated:
 -      folio_unlock(folio);
 -      folio_put(folio);
 -      return AOP_TRUNCATED_PAGE;
 +      goto unlock_mapping;
  unlock:
-       unlock_page(page);
+       folio_unlock(folio);
 +unlock_mapping:
 +      filemap_invalidate_unlock_shared(mapping);
 +      if (error == AOP_TRUNCATED_PAGE)
-               put_page(page);
++              folio_put(folio);
        return error;
  }
  
diff --cc mm/hugetlb.c
Simple merge
diff --cc mm/memcontrol.c
Simple merge
Simple merge
diff --cc mm/memory.c
Simple merge
diff --cc mm/migrate.c
Simple merge
Simple merge
diff --cc mm/page_alloc.c
Simple merge
diff --cc mm/rmap.c
index 2d29a57d29e8d2021588b17c306bc6f5199c7d97,b3aae8eeaeaf42abafb62365f11258d3b6096385..80588dba16b64afbcdc43d2f31650c5335a847c7
+++ b/mm/rmap.c
  /*
   * Lock ordering in mm:
   *
 - * inode->i_mutex     (while writing or truncating, not reading or faulting)
 + * inode->i_rwsem     (while writing or truncating, not reading or faulting)
   *   mm->mmap_lock
 - *     page->flags PG_locked (lock_page)   * (see huegtlbfs below)
 - *       hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
 - *         mapping->i_mmap_rwsem
 - *           hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
 - *           anon_vma->rwsem
 - *             mm->page_table_lock or pte_lock
 - *               swap_lock (in swap_duplicate, swap_info_get)
 - *                 mmlist_lock (in mmput, drain_mmlist and others)
 - *                 mapping->private_lock (in __set_page_dirty_buffers)
 - *                   lock_page_memcg move_lock (in __set_page_dirty_buffers)
 - *                     i_pages lock (widely used)
 - *                       lruvec->lru_lock (in folio_lruvec_lock_irq)
 - *                 inode->i_lock (in set_page_dirty's __mark_inode_dirty)
 - *                 bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
 - *                   sb_lock (within inode_lock in fs/fs-writeback.c)
 - *                   i_pages lock (widely used, in set_page_dirty,
 - *                             in arch-dependent flush_dcache_mmap_lock,
 - *                             within bdi.wb->list_lock in __sync_single_inode)
 + *     mapping->invalidate_lock (in filemap_fault)
 + *       page->flags PG_locked (lock_page)   * (see hugetlbfs below)
 + *         hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
 + *           mapping->i_mmap_rwsem
 + *             hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
 + *             anon_vma->rwsem
 + *               mm->page_table_lock or pte_lock
 + *                 swap_lock (in swap_duplicate, swap_info_get)
 + *                   mmlist_lock (in mmput, drain_mmlist and others)
 + *                   mapping->private_lock (in __set_page_dirty_buffers)
 + *                     lock_page_memcg move_lock (in __set_page_dirty_buffers)
 + *                       i_pages lock (widely used)
-  *                         lruvec->lru_lock (in lock_page_lruvec_irq)
++ *                         lruvec->lru_lock (in folio_lruvec_lock_irq)
 + *                   inode->i_lock (in set_page_dirty's __mark_inode_dirty)
 + *                   bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
 + *                     sb_lock (within inode_lock in fs/fs-writeback.c)
 + *                     i_pages lock (widely used, in set_page_dirty,
 + *                               in arch-dependent flush_dcache_mmap_lock,
 + *                               within bdi.wb->list_lock in __sync_single_inode)
   *
 - * anon_vma->rwsem,mapping->i_mutex      (memory_failure, collect_procs_anon)
 + * anon_vma->rwsem,mapping->i_mmap_rwsem   (memory_failure, collect_procs_anon)
   *   ->tasklist_lock
   *     pte map lock
   *
diff --cc mm/shmem.c
Simple merge
diff --cc mm/swap_state.c
Simple merge
diff --cc mm/util.c
index db3091116b7c250a44caa614335a7de644e5f495,811a246f83b395e09b65591228ea25342691e4e4..3f9c8bf125b027e7ec5c541939ad15e4ebc48730
+++ b/mm/util.c
@@@ -635,31 -635,6 +635,21 @@@ void kvfree_sensitive(const void *addr
  }
  EXPORT_SYMBOL(kvfree_sensitive);
  
- static inline void *__page_rmapping(struct page *page)
- {
-       unsigned long mapping;
-       mapping = (unsigned long)page->mapping;
-       mapping &= ~PAGE_MAPPING_FLAGS;
-       return (void *)mapping;
- }
 +void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 +{
 +      void *newp;
 +
 +      if (oldsize >= newsize)
 +              return (void *)p;
 +      newp = kvmalloc(newsize, flags);
 +      if (!newp)
 +              return NULL;
 +      memcpy(newp, p, oldsize);
 +      kvfree(p);
 +      return newp;
 +}
 +EXPORT_SYMBOL(kvrealloc);
 +
  /* Neutral page->mapping pointer to address_space or anon_vma or other */
  void *page_rmapping(struct page *page)
  {
diff --cc mm/vmscan.c
Simple merge