]> www.infradead.org Git - linux.git/commitdiff
shmem: add shmem_read_folio() and shmem_read_folio_gfp()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 6 Feb 2023 16:25:20 +0000 (16:25 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 10 Feb 2023 00:51:42 +0000 (16:51 -0800)
These are the folio replacements for shmem_read_mapping_page() and
shmem_read_mapping_page_gfp().

[akpm@linux-foundation.org: fix shmem_read_mapping_page_gfp(), per Matthew]
Link: https://lkml.kernel.org/r/Y+QdJTuzxeBYejw2@casper.infradead.org
Link: https://lkml.kernel.org/r/20230206162520.4029022-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mark Hemment <markhemm@googlemail.com>
Cc: Charan Teja Kalla <quic_charante@quicinc.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Pavankumar Kondeti <quic_pkondeti@quicinc.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/shmem_fs.h
mm/shmem.c

index d09d54be4ffd99950383cccedf5dd350743536f2..103d1000a5a2e20d997d70458a3577f5063a9e49 100644 (file)
@@ -109,6 +109,14 @@ enum sgp_type {
 
 int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
                enum sgp_type sgp);
+struct folio *shmem_read_folio_gfp(struct address_space *mapping,
+               pgoff_t index, gfp_t gfp);
+
+static inline struct folio *shmem_read_folio(struct address_space *mapping,
+               pgoff_t index)
+{
+       return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping));
+}
 
 static inline struct page *shmem_read_mapping_page(
                                struct address_space *mapping, pgoff_t index)
index 732969afabd117cccb9629760e6fcfca81baad9a..be6bdd320d5fe3572b6a65af1ca561e76acda078 100644 (file)
@@ -4311,9 +4311,9 @@ int shmem_zero_setup(struct vm_area_struct *vma)
 }
 
 /**
- * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
- * @mapping:   the page's address_space
- * @index:     the page index
+ * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
+ * @mapping:   the folio's address_space
+ * @index:     the folio index
  * @gfp:       the page allocator flags to use if allocating
  *
  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
@@ -4325,13 +4325,12 @@ int shmem_zero_setup(struct vm_area_struct *vma)
  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
  */
-struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
-                                        pgoff_t index, gfp_t gfp)
+struct folio *shmem_read_folio_gfp(struct address_space *mapping,
+               pgoff_t index, gfp_t gfp)
 {
 #ifdef CONFIG_SHMEM
        struct inode *inode = mapping->host;
        struct folio *folio;
-       struct page *page;
        int error;
 
        BUG_ON(!shmem_mapping(mapping));
@@ -4341,6 +4340,25 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
                return ERR_PTR(error);
 
        folio_unlock(folio);
+       return folio;
+#else
+       /*
+        * The tiny !SHMEM case uses ramfs without swap
+        */
+       return mapping_read_folio_gfp(mapping, index, gfp);
+#endif
+}
+EXPORT_SYMBOL_GPL(shmem_read_folio_gfp);
+
+struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+                                        pgoff_t index, gfp_t gfp)
+{
+       struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp);
+       struct page *page;
+
+       if (IS_ERR(folio))
+               return &folio->page;
+
        page = folio_file_page(folio, index);
        if (PageHWPoison(page)) {
                folio_put(folio);
@@ -4348,11 +4366,5 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
        }
 
        return page;
-#else
-       /*
-        * The tiny !SHMEM case uses ramfs without swap
-        */
-       return read_cache_page_gfp(mapping, index, gfp);
-#endif
 }
 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);