]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
Merge branch 'akpm-current/current'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 2 Sep 2021 05:16:45 +0000 (15:16 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 2 Sep 2021 05:16:45 +0000 (15:16 +1000)
141 files changed:
1  2 
Documentation/core-api/cachetlb.rst
MAINTAINERS
arch/Kconfig
arch/alpha/kernel/syscalls/syscall.tbl
arch/arc/Kconfig
arch/arc/include/asm/bitops.h
arch/arm/include/asm/cacheflush.h
arch/arm/kernel/traps.c
arch/arm/mach-rpc/ecard.c
arch/arm/tools/syscall.tbl
arch/arm64/Kconfig
arch/arm64/include/asm/unistd32.h
arch/arm64/mm/init.c
arch/ia64/kernel/syscalls/syscall.tbl
arch/m68k/kernel/syscalls/syscall.tbl
arch/microblaze/kernel/syscalls/syscall.tbl
arch/mips/Kconfig
arch/mips/include/asm/cacheflush.h
arch/mips/kernel/syscalls/syscall_o32.tbl
arch/nds32/include/asm/cacheflush.h
arch/parisc/include/asm/cacheflush.h
arch/parisc/kernel/syscalls/syscall.tbl
arch/powerpc/Kconfig
arch/powerpc/include/asm/bitops.h
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/syscalls/syscall.tbl
arch/powerpc/kernel/traps.c
arch/powerpc/mm/book3s64/radix_tlb.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/riscv/mm/init.c
arch/s390/Kconfig
arch/s390/kernel/setup.c
arch/s390/kernel/syscalls/syscall.tbl
arch/s390/kvm/kvm-s390.c
arch/s390/mm/fault.c
arch/s390/mm/init.c
arch/sh/include/asm/cacheflush.h
arch/sh/kernel/syscalls/syscall.tbl
arch/sparc/kernel/syscalls/syscall.tbl
arch/x86/Kconfig
arch/x86/entry/syscalls/syscall_32.tbl
arch/x86/kernel/apic/vector.c
arch/xtensa/kernel/syscalls/syscall.tbl
block/blk-map.c
block/blk-mq.c
drivers/base/node.c
drivers/block/rnbd/rnbd-clt.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/mmc/host/mmc_spi.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mtd/nand/raw/intel-nand-controller.c
drivers/net/virtio_net.c
drivers/pci/controller/dwc/pci-dra7xx.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/virtio/virtio_mem.c
fs/buffer.c
fs/eventpoll.c
fs/exec.c
fs/f2fs/segment.c
fs/fcntl.c
fs/fs-writeback.c
fs/inode.c
fs/internal.h
fs/locks.c
fs/namei.c
fs/namespace.c
fs/pipe.c
include/linux/backing-dev.h
include/linux/bitmap.h
include/linux/bitops.h
include/linux/cpumask.h
include/linux/fs.h
include/linux/highmem-internal.h
include/linux/highmem.h
include/linux/memcontrol.h
include/linux/migrate.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/page-flags.h
include/linux/page_idle.h
include/linux/pagemap.h
include/linux/rmap.h
include/linux/sched.h
include/linux/swap.h
include/linux/syscalls.h
include/linux/units.h
include/linux/writeback.h
init/main.c
ipc/sem.c
kernel/cpu.c
kernel/exit.c
kernel/fork.c
kernel/sched/core.c
kernel/sched/sched.h
kernel/signal.c
kernel/sys.c
kernel/sys_ni.c
kernel/sysctl.c
kernel/time/clocksource.c
kernel/time/posix-timers.c
lib/Kconfig
lib/Kconfig.debug
lib/scatterlist.c
lib/string.c
lib/test_bitmap.c
lib/test_kasan.c
lib/vsprintf.c
mm/Makefile
mm/backing-dev.c
mm/compaction.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/kasan/hw_tags.c
mm/kasan/kasan.h
mm/kfence/report.c
mm/khugepaged.c
mm/ksm.c
mm/madvise.c
mm/memblock.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/mempolicy.c
mm/memremap.c
mm/migrate.c
mm/mmap.c
mm/page-writeback.c
mm/page_alloc.c
mm/page_owner.c
mm/rmap.c
mm/shmem.c
mm/swap.c
mm/swapfile.c
mm/truncate.c
mm/userfaultfd.c
mm/vmscan.c
mm/vmstat.c
mm/workingset.c
tools/testing/selftests/kvm/dirty_log_perf_test.c

Simple merge
diff --cc MAINTAINERS
Simple merge
diff --cc arch/Kconfig
Simple merge
Simple merge
index a7daaf64ae34410fdba74176665a561ef20984a8,0cc24ae6a8d99cac6858b23f8f31ca3ddacfd900..bdb7e190a294e7c4789e00d681bfaa541c37541f
@@@ -186,10 -368,7 +186,9 @@@ static inline __attribute__ ((const)) u
  #include <asm-generic/bitops/fls64.h>
  #include <asm-generic/bitops/sched.h>
  #include <asm-generic/bitops/lock.h>
 +#include <asm-generic/bitops/atomic.h>
 +#include <asm-generic/bitops/non-atomic.h>
  
- #include <asm-generic/bitops/find.h>
  #include <asm-generic/bitops/le.h>
  #include <asm-generic/bitops/ext2-atomic-setbit.h>
  
index 23bf823376e1980cf2f222d4fe03aaa3e611925c,5e56288e343bb6fbb9eff102180f2f3d93b6b959..e68fb879e4f9d5a86b3dc0fbc0d7c8c1e546ed53
@@@ -290,8 -290,8 +290,9 @@@ extern void flush_cache_page(struct vm_
   */
  #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  extern void flush_dcache_page(struct page *);
 +void flush_dcache_folio(struct folio *folio);
  
+ #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
  static inline void flush_kernel_vmap_range(void *addr, int size)
  {
        if ((cache_is_vivt() || cache_is_vipt_aliasing()))
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 1b8316212512eb76fec3f441008982a5e9d3d73f,3d9efee0f43c92903b31e10fe940406326fc4b76..5f2233ec61dd119925c0c27c6a7ba6ff1625ab4f
@@@ -691,10 -637,12 +692,12 @@@ static void __init reserve_crashkernel(
                return;
        }
  
-       if (register_memory_notifier(&kdump_mem_nb))
+       if (register_memory_notifier(&kdump_mem_nb)) {
+               memblock_free(crash_base, crash_size);
                return;
+       }
  
 -      if (!OLDMEM_BASE && MACHINE_IS_VM)
 +      if (!oldmem_data.start && MACHINE_IS_VM)
                diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
        crashk_res.start = crash_base;
        crashk_res.end = crash_base + crash_size - 1;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc block/blk-map.c
Simple merge
diff --cc block/blk-mq.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index fbbb78f6885e700ed4bddd8176f2b926b6cf9832,8d7ec6448d5fc38168d06ae47ccc14f38b030acd..ab4b133e1ccdd51701d311a74e6340a88a7d602a
@@@ -211,10 -211,11 +211,10 @@@ static int dra7xx_pcie_handle_msi(struc
        if (!val)
                return 0;
  
-       pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
+       pos = find_first_bit(&val, MAX_MSI_IRQS_PER_CTRL);
        while (pos != MAX_MSI_IRQS_PER_CTRL) {
 -              irq = irq_find_mapping(pp->irq_domain,
 -                                     (index * MAX_MSI_IRQS_PER_CTRL) + pos);
 -              generic_handle_irq(irq);
 +              generic_handle_domain_irq(pp->irq_domain,
 +                                        (index * MAX_MSI_IRQS_PER_CTRL) + pos);
                pos++;
                pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
        }
Simple merge
Simple merge
diff --cc fs/buffer.c
Simple merge
diff --cc fs/eventpoll.c
Simple merge
diff --cc fs/exec.c
Simple merge
Simple merge
diff --cc fs/fcntl.c
Simple merge
Simple merge
diff --cc fs/inode.c
Simple merge
diff --cc fs/internal.h
Simple merge
diff --cc fs/locks.c
Simple merge
diff --cc fs/namei.c
Simple merge
diff --cc fs/namespace.c
Simple merge
diff --cc fs/pipe.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index cdb74ce8030d4223d2fbc1df6bd6fad10c068feb,69e6c5462c43e5e0fa7b9e5deec436ec9a555f19..3a0ce40090c654663c25d65aa04232172df9bcbe
@@@ -602,18 -593,6 +600,11 @@@ static inline struct obj_cgroup **page_
  }
  #endif
  
- static __always_inline bool memcg_stat_item_in_bytes(int idx)
- {
-       if (idx == MEMCG_PERCPU_B)
-               return true;
-       return vmstat_item_in_bytes(idx);
- }
 +static inline bool PageMemcgKmem(struct page *page)
 +{
 +      return folio_memcg_kmem(page_folio(page));
 +}
 +
  static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
  {
        return (memcg == root_mem_cgroup);
@@@ -705,15 -684,37 +696,36 @@@ static inline bool mem_cgroup_below_min
                page_counter_read(&memcg->memory);
  }
  
- int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
 -int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
 -                      gfp_t gfp_mask);
 -static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
 -                                  gfp_t gfp_mask)
++int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
++static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
++                                  gfp_t gfp)
+ {
+       if (mem_cgroup_disabled())
+               return 0;
 -      return __mem_cgroup_charge(page, mm, gfp_mask);
++      return __mem_cgroup_charge(folio, mm, gfp);
+ }
  int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
                                  gfp_t gfp, swp_entry_t entry);
  void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
  
- void mem_cgroup_uncharge(struct folio *folio);
- void mem_cgroup_uncharge_list(struct list_head *page_list);
 -void __mem_cgroup_uncharge(struct page *page);
 -static inline void mem_cgroup_uncharge(struct page *page)
++void __mem_cgroup_uncharge(struct folio *folio);
++static inline void mem_cgroup_uncharge(struct folio *folio)
+ {
+       if (mem_cgroup_disabled())
+               return;
 -      __mem_cgroup_uncharge(page);
++      __mem_cgroup_uncharge(folio);
+ }
+ void __mem_cgroup_uncharge_list(struct list_head *page_list);
+ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
+ {
+       if (mem_cgroup_disabled())
+               return;
+       __mem_cgroup_uncharge_list(page_list);
+ }
  
 -void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
 +void mem_cgroup_migrate(struct folio *old, struct folio *new);
  
  /**
   * mem_cgroup_lruvec - get the lru list vector for a memcg & node
Simple merge
index 8d3fda2653820ae180c9633172c7ea8dd323b49e,11c38550627c829adea214bdcb3b0ec196ec2706..5c99a87f2639f5c519a8294585749632dcd09e4d
@@@ -1238,41 -1214,14 +1238,33 @@@ static inline void get_page(struct pag
  }
  
  bool __must_check try_grab_page(struct page *page, unsigned int flags);
__maybe_unused struct page *try_grab_compound_head(struct page *page, int refs,
-                                                  unsigned int flags);
+ struct page *try_grab_compound_head(struct page *page, int refs,
+                                   unsigned int flags);
  
- static inline __must_check bool try_get_page(struct page *page)
- {
-       page = compound_head(page);
-       if (WARN_ON_ONCE(page_ref_count(page) <= 0))
-               return false;
-       page_ref_inc(page);
-       return true;
- }
+ struct page *try_get_compound_head(struct page *page, int refs);
  
 +/**
 + * folio_put - Decrement the reference count on a folio.
 + * @folio: The folio.
 + *
 + * If the folio's reference count reaches zero, the memory will be
 + * released back to the page allocator and may be used by another
 + * allocation immediately.  Do not access the memory or the struct folio
 + * after calling folio_put() unless you can be sure that it wasn't the
 + * last reference.
 + *
 + * Context: May be called in process or interrupt context, but not in NMI
 + * context.  May be called while holding a spinlock.
 + */
 +static inline void folio_put(struct folio *folio)
 +{
 +      if (folio_put_testzero(folio))
 +              __put_page(&folio->page);
 +}
 +
  static inline void put_page(struct page *page)
  {
 -      page = compound_head(page);
 +      struct folio *folio = page_folio(page);
  
        /*
         * For devmap managed pages we need to catch refcount transition from
Simple merge
Simple merge
index 1bcb1365b1d0652e47d77add50b31441bb17f7cf,d8a6aecf99cb9f0fc63bc49126b06fe5936d82e4..83abf95e9fa7ddb7f55708bc612ac6cef5d0cdf6
@@@ -6,9 -6,39 +6,9 @@@
  #include <linux/page-flags.h>
  #include <linux/page_ext.h>
  
- #ifdef CONFIG_IDLE_PAGE_TRACKING
+ #ifdef CONFIG_PAGE_IDLE_FLAG
  
 -#ifdef CONFIG_64BIT
 -static inline bool page_is_young(struct page *page)
 -{
 -      return PageYoung(page);
 -}
 -
 -static inline void set_page_young(struct page *page)
 -{
 -      SetPageYoung(page);
 -}
 -
 -static inline bool test_and_clear_page_young(struct page *page)
 -{
 -      return TestClearPageYoung(page);
 -}
 -
 -static inline bool page_is_idle(struct page *page)
 -{
 -      return PageIdle(page);
 -}
 -
 -static inline void set_page_idle(struct page *page)
 -{
 -      SetPageIdle(page);
 -}
 -
 -static inline void clear_page_idle(struct page *page)
 -{
 -      ClearPageIdle(page);
 -}
 -#else /* !CONFIG_64BIT */
 +#ifndef CONFIG_64BIT
  /*
   * If there is not enough space to store Idle and Young bits in page flags, use
   * page ext flags instead.
@@@ -74,11 -104,11 +74,11 @@@ static inline void folio_clear_idle(str
  
        clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
  }
 -#endif /* CONFIG_64BIT */
 +#endif /* !CONFIG_64BIT */
  
- #else /* !CONFIG_IDLE_PAGE_TRACKING */
+ #else /* !CONFIG_PAGE_IDLE_FLAG */
  
 -static inline bool page_is_young(struct page *page)
 +static inline bool folio_test_young(struct folio *folio)
  {
        return false;
  }
@@@ -105,35 -135,6 +105,35 @@@ static inline void folio_clear_idle(str
  {
  }
  
- #endif /* CONFIG_IDLE_PAGE_TRACKING */
+ #endif /* CONFIG_PAGE_IDLE_FLAG */
  
 +static inline bool page_is_young(struct page *page)
 +{
 +      return folio_test_young(page_folio(page));
 +}
 +
 +static inline void set_page_young(struct page *page)
 +{
 +      folio_set_young(page_folio(page));
 +}
 +
 +static inline bool test_and_clear_page_young(struct page *page)
 +{
 +      return folio_test_clear_young(page_folio(page));
 +}
 +
 +static inline bool page_is_idle(struct page *page)
 +{
 +      return folio_test_idle(page_folio(page));
 +}
 +
 +static inline void set_page_idle(struct page *page)
 +{
 +      folio_set_idle(page_folio(page));
 +}
 +
 +static inline void clear_page_idle(struct page *page)
 +{
 +      folio_clear_idle(page_folio(page));
 +}
  #endif /* _LINUX_MM_PAGE_IDLE_H */
Simple merge
Simple merge
index e12b524426b0286be06751bb3ae891a5e55b055e,183a1e0e6fb30943230d8493a470587eaa066d62..c111145be9b6b8d5774757242a48dadf75e45770
@@@ -1477,16 -1401,13 +1478,23 @@@ struct task_struct 
        struct llist_head               kretprobe_instances;
  #endif
  
 +#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
 +      /*
 +       * If L1D flush is supported on mm context switch
 +       * then we use this callback head to queue kill work
 +       * to kill tasks that are not running on SMT disabled
 +       * cores
 +       */
 +      struct callback_head            l1d_flush_kill;
 +#endif
 +
+ #ifdef CONFIG_DEBUG_AID_FOR_SYZBOT
+       unsigned long                   getblk_stamp;
+       unsigned int                    getblk_executed;
+       unsigned int                    getblk_bh_count;
+       unsigned long                   getblk_bh_state;
+ #endif
        /*
         * New fields for task_struct should be added above here, so that
         * they are included in the randomized portion of task_struct.
Simple merge
Simple merge
index 4a25e0cc8fb333d19529c55dc99d6523a4ad380e,8b8dc8a84d93c35a9d284f86605a54e13d5c5084..681fc652e3d71318fb457900fdcd5e389e4c5f24
@@@ -4,25 -4,13 +4,29 @@@
  
  #include <linux/math.h>
  
- #define MILLIWATT_PER_WATT    1000L
- #define MICROWATT_PER_MILLIWATT       1000L
- #define MICROWATT_PER_WATT    1000000L
 +/* Metric prefixes in accordance with Système international (d'unités) */
 +#define PETA  1000000000000000ULL
 +#define TERA  1000000000000ULL
 +#define GIGA  1000000000UL
 +#define MEGA  1000000UL
 +#define KILO  1000UL
 +#define HECTO 100UL
 +#define DECA  10UL
 +#define DECI  10UL
 +#define CENTI 100UL
 +#define MILLI 1000UL
 +#define MICRO 1000000UL
 +#define NANO  1000000000UL
 +#define PICO  1000000000000ULL
 +#define FEMTO 1000000000000000ULL
 +
+ #define HZ_PER_KHZ            1000UL
+ #define KHZ_PER_MHZ           1000UL
+ #define HZ_PER_MHZ            1000000UL
+ #define MILLIWATT_PER_WATT    1000UL
+ #define MICROWATT_PER_MILLIWATT       1000UL
+ #define MICROWATT_PER_WATT    1000000UL
  
  #define ABSOLUTE_ZERO_MILLICELSIUS -273150
  
Simple merge
diff --cc init/main.c
Simple merge
diff --cc ipc/sem.c
index ae8d9104b0a0501fbef6e02004d846f7b2f03085,1a8b9f0ac047be005635d0edefb24697d18d0ff6..f833238df1ce2e08bb71907db47386c88b655a2b
+++ b/ipc/sem.c
@@@ -2216,40 -2229,9 +2216,40 @@@ long __do_semtimedop(int semid, struct 
  
        unlink_queue(sma, &queue);
  
 -out_unlock_free:
 +out_unlock:
        sem_unlock(sma, locknum);
        rcu_read_unlock();
-               sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
 +out:
 +      return error;
 +}
 +
 +static long do_semtimedop(int semid, struct sembuf __user *tsops,
 +              unsigned nsops, const struct timespec64 *timeout)
 +{
 +      struct sembuf fast_sops[SEMOPM_FAST];
 +      struct sembuf *sops = fast_sops;
 +      struct ipc_namespace *ns;
 +      int ret;
 +
 +      ns = current->nsproxy->ipc_ns;
 +      if (nsops > ns->sc_semopm)
 +              return -E2BIG;
 +      if (nsops < 1)
 +              return -EINVAL;
 +
 +      if (nsops > SEMOPM_FAST) {
++              sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL_ACCOUNT);
 +              if (sops == NULL)
 +                      return -ENOMEM;
 +      }
 +
 +      if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
 +              ret =  -EFAULT;
 +              goto out_free;
 +      }
 +
 +      ret = __do_semtimedop(semid, sops, nsops, timeout, ns);
 +
  out_free:
        if (sops != fast_sops)
                kvfree(sops);
diff --cc kernel/cpu.c
Simple merge
diff --cc kernel/exit.c
Simple merge
diff --cc kernel/fork.c
Simple merge
Simple merge
Simple merge
diff --cc kernel/signal.c
Simple merge
diff --cc kernel/sys.c
Simple merge
diff --cc kernel/sys_ni.c
Simple merge
diff --cc kernel/sysctl.c
Simple merge
Simple merge
Simple merge
diff --cc lib/Kconfig
Simple merge
Simple merge
Simple merge
diff --cc lib/string.c
Simple merge
Simple merge
Simple merge
diff --cc lib/vsprintf.c
Simple merge
diff --cc mm/Makefile
Simple merge
Simple merge
diff --cc mm/compaction.c
Simple merge
diff --cc mm/filemap.c
index 03d488fcb893585861d0bd2b5d28dc76ea4bcedd,c2fd42d1e81be1bca0fd3bf888b76359d2edfab2..c90b6e4984c92eb843dac0b4f22d2386c13d1f79
@@@ -853,9 -836,9 +858,9 @@@ void replace_page_cache_page(struct pag
        new->mapping = mapping;
        new->index = offset;
  
 -      mem_cgroup_migrate(old, new);
 +      mem_cgroup_migrate(fold, fnew);
  
-       xas_lock_irqsave(&xas, flags);
+       xas_lock_irq(&xas);
        xas_store(&xas, new);
  
        old->mapping = NULL;
Simple merge
diff --cc mm/hugetlb.c
Simple merge
diff --cc mm/internal.h
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc mm/khugepaged.c
Simple merge
diff --cc mm/ksm.c
Simple merge
diff --cc mm/madvise.c
Simple merge
diff --cc mm/memblock.c
Simple merge
diff --cc mm/memcontrol.c
index 12bf352b06147dc6e4ef84446a528e8b990a1ebc,486454344ede7a3873d244372e6870721ce1f5a5..e55f422cc64e464e122bbe28cbf882632a8a092e
  }
  
  /**
-  * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
 - * __mem_cgroup_charge - charge a newly allocated page to a cgroup
 - * @page: page to charge
 - * @mm: mm context of the victim
 - * @gfp_mask: reclaim mode
++ * __mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
 + * @folio: Folio to charge.
 + * @mm: mm context of the allocating task.
 + * @gfp: Reclaim mode.
   *
 - * Try to charge @page to the memcg that @mm belongs to, reclaiming
 - * pages according to @gfp_mask if necessary. if @mm is NULL, try to
 + * Try to charge @folio to the memcg that @mm belongs to, reclaiming
 + * pages according to @gfp if necessary.  If @mm is NULL, try to
   * charge to the active memcg.
   *
 - * Do not use this for pages allocated for swapin.
 + * Do not use this for folios allocated for swapin.
   *
 - * Returns 0 on success. Otherwise, an error code is returned.
 + * Return: 0 on success. Otherwise, an error code is returned.
   */
- int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
 -int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
 -                      gfp_t gfp_mask)
++int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
  {
        struct mem_cgroup *memcg;
        int ret;
  
-       if (mem_cgroup_disabled())
-               return 0;
        memcg = get_mem_cgroup_from_mm(mm);
 -      ret = charge_memcg(page, memcg, gfp_mask);
 +      ret = charge_memcg(folio, memcg, gfp);
        css_put(&memcg->css);
  
        return ret;
@@@ -6938,20 -6902,17 +6921,17 @@@ static void uncharge_folio(struct foli
  }
  
  /**
-  * mem_cgroup_uncharge - Uncharge a folio.
 - * __mem_cgroup_uncharge - uncharge a page
 - * @page: page to uncharge
++ * __mem_cgroup_uncharge - Uncharge a folio.
 + * @folio: Folio to uncharge.
   *
 - * Uncharge a page previously charged with __mem_cgroup_charge().
 + * Uncharge a folio previously charged with mem_cgroup_charge().
   */
- void mem_cgroup_uncharge(struct folio *folio)
 -void __mem_cgroup_uncharge(struct page *page)
++void __mem_cgroup_uncharge(struct folio *folio)
  {
        struct uncharge_gather ug;
  
-       if (mem_cgroup_disabled())
-               return;
 -      /* Don't touch page->lru of any random page, pre-check: */
 -      if (!page_memcg(page))
 +      /* Don't touch folio->lru of any random page, pre-check: */
 +      if (!folio_memcg(folio))
                return;
  
        uncharge_gather_clear(&ug);
   * @page_list: list of pages to uncharge
   *
   * Uncharge a list of pages previously charged with
-  * mem_cgroup_charge().
+  * __mem_cgroup_charge().
   */
- void mem_cgroup_uncharge_list(struct list_head *page_list)
+ void __mem_cgroup_uncharge_list(struct list_head *page_list)
  {
        struct uncharge_gather ug;
 -      struct page *page;
 +      struct folio *folio;
  
-       if (mem_cgroup_disabled())
-               return;
        uncharge_gather_clear(&ug);
 -      list_for_each_entry(page, page_list, lru)
 -              uncharge_page(page, &ug);
 +      list_for_each_entry(folio, page_list, lru)
 +              uncharge_folio(folio, &ug);
        if (ug.memcg)
                uncharge_batch(&ug);
  }
Simple merge
diff --cc mm/memory.c
Simple merge
diff --cc mm/mempolicy.c
Simple merge
diff --cc mm/memremap.c
Simple merge
diff --cc mm/migrate.c
Simple merge
diff --cc mm/mmap.c
Simple merge
index e2f257adea64465378e129ebcac7675271e6a179,c3b00c6f30ce4d816c0e3fc6de5aa21e27ac6fa8..9c64490171e048e1479b7d04954b00f9d8a72985
@@@ -2749,17 -2746,34 +2768,35 @@@ bool folio_clear_dirty_for_io(struct fo
                unlocked_inode_to_wb_end(inode, &cookie);
                return ret;
        }
 -      return TestClearPageDirty(page);
 +      return folio_test_clear_dirty(folio);
  }
 -EXPORT_SYMBOL(clear_page_dirty_for_io);
 +EXPORT_SYMBOL(folio_clear_dirty_for_io);
  
 -int test_clear_page_writeback(struct page *page)
+ static void wb_inode_writeback_start(struct bdi_writeback *wb)
+ {
+       atomic_inc(&wb->writeback_inodes);
+ }
+ static void wb_inode_writeback_end(struct bdi_writeback *wb)
+ {
+       atomic_dec(&wb->writeback_inodes);
+       /*
+        * Make sure estimate of writeback throughput gets updated after
+        * writeback completed. We delay the update by BANDWIDTH_INTERVAL
+        * (which is the interval other bandwidth updates use for batching) so
+        * that if multiple inodes end writeback at a similar time, they get
+        * batched into one bandwidth update.
+        */
+       queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
+ }
 +bool __folio_end_writeback(struct folio *folio)
  {
 -      struct address_space *mapping = page_mapping(page);
 -      int ret;
 +      long nr = folio_nr_pages(folio);
 +      struct address_space *mapping = folio_mapping(folio);
 +      bool ret;
  
 -      lock_page_memcg(page);
 +      folio_memcg_lock(folio);
        if (mapping && mapping_use_writeback_tags(mapping)) {
                struct inode *inode = mapping->host;
                struct backing_dev_info *bdi = inode_to_bdi(inode);
                        if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
                                struct bdi_writeback *wb = inode_to_wb(inode);
  
 -                              dec_wb_stat(wb, WB_WRITEBACK);
 -                              __wb_writeout_inc(wb);
 +                              wb_stat_mod(wb, WB_WRITEBACK, -nr);
 +                              __wb_writeout_add(wb, nr);
+                               if (!mapping_tagged(mapping,
+                                                   PAGECACHE_TAG_WRITEBACK))
+                                       wb_inode_writeback_end(wb);
                        }
                }
  
@@@ -2819,14 -2834,18 +2859,18 @@@ bool __folio_start_writeback(struct fol
                                                   PAGECACHE_TAG_WRITEBACK);
  
                        xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
-                       if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT)
-                               wb_stat_mod(inode_to_wb(inode), WB_WRITEBACK,
-                                               nr);
+                       if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
+                               struct bdi_writeback *wb = inode_to_wb(inode);
 -                              inc_wb_stat(wb, WB_WRITEBACK);
++                              wb_stat_mod(wb, WB_WRITEBACK, nr);
+                               if (!on_wblist)
+                                       wb_inode_writeback_start(wb);
+                       }
  
                        /*
 -                       * We can come through here when swapping anonymous
 -                       * pages, so we don't necessarily have an inode to track
 -                       * for sync.
 +                       * We can come through here when swapping
 +                       * anonymous folios, so we don't necessarily
 +                       * have an inode to track for sync.
                         */
                        if (mapping->host && !on_wblist)
                                sb_mark_inode_writeback(mapping->host);
diff --cc mm/page_alloc.c
Simple merge
diff --cc mm/page_owner.c
Simple merge
diff --cc mm/rmap.c
Simple merge
diff --cc mm/shmem.c
Simple merge
diff --cc mm/swap.c
Simple merge
diff --cc mm/swapfile.c
Simple merge
diff --cc mm/truncate.c
Simple merge
Simple merge
diff --cc mm/vmscan.c
Simple merge
diff --cc mm/vmstat.c
Simple merge
diff --cc mm/workingset.c
Simple merge