From 6c88f72691f88fd1fc493a3c2eed97f91b82b3f0 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:11 +1100 Subject: [PATCH 01/16] mm/huge_memory: add vmf_insert_folio_pmd() Currently DAX folio/page reference counts are managed differently to normal pages. To allow these to be managed the same as normal pages introduce vmf_insert_folio_pmd. This will map the entire PMD-sized folio and take references as it would for a normally mapped page. This is distinct from the current mechanism, vmf_insert_pfn_pmd, which simply inserts a special devmap PMD entry into the page table without holding a reference to the page for the mapping. It is not currently useful to implement a more generic vmf_insert_folio() which selects the correct behaviour based on folio_order(). This is because PTE faults require only a subpage of the folio to be PTE mapped rather than the entire folio. It would be possible to add this context somewhere but callers already need to handle PTE faults and PMD faults separately so a more generic function is not useful. Link: https://lkml.kernel.org/r/7bf92a2e68225d13ea368d53bbfee327314d1c40.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Acked-by: David Hildenbrand Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: Dan Wiliams Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 2 ++ mm/huge_memory.c | 65 +++++++++++++++++++++++++++++++++-------- 2 files changed, 55 insertions(+), 12 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 4eeb54b50621..e57e811cfd3c 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -39,6 +39,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); +vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, + bool write); vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, bool write); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4cd79784f841..e6f4189c1c94 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1375,20 +1375,20 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) return __do_huge_pmd_anonymous_page(vmf); } -static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, +static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, pgtable_t pgtable) { struct mm_struct *mm = vma->vm_mm; pmd_t entry; - spinlock_t *ptl; - ptl = pmd_lock(mm, pmd); + lockdep_assert_held(pmd_lockptr(mm, pmd)); + if (!pmd_none(*pmd)) { if (write) { if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); - goto out_unlock; + return -EEXIST; } entry = pmd_mkyoung(*pmd); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); @@ -1396,7 +1396,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, update_mmu_cache_pmd(vma, addr, pmd); } - goto out_unlock; + return -EEXIST; } entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); @@ -1412,16 +1412,11 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, if (pgtable) { pgtable_trans_huge_deposit(mm, pmd, pgtable); mm_inc_nr_ptes(mm); - pgtable = NULL; } set_pmd_at(mm, addr, pmd, entry); update_mmu_cache_pmd(vma, addr, pmd); - -out_unlock: - spin_unlock(ptl); - if (pgtable) - pte_free(mm, pgtable); + return 0; } /** @@ -1440,6 +1435,8 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) struct vm_area_struct *vma = vmf->vma; pgprot_t pgprot = vma->vm_page_prot; pgtable_t pgtable = NULL; + spinlock_t *ptl; + int error; /* * If we had pmd_special, we could avoid all these restrictions, @@ -1462,12 +1459,56 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) } track_pfn_insert(vma, &pgprot, pfn); + ptl = pmd_lock(vma->vm_mm, vmf->pmd); + error = insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, + pgtable); + spin_unlock(ptl); + if (error && pgtable) + pte_free(vma->vm_mm, pgtable); - insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); return VM_FAULT_NOPAGE; } EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); +vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, + bool write) +{ + struct vm_area_struct *vma = vmf->vma; + unsigned long addr = vmf->address & PMD_MASK; + struct mm_struct *mm = vma->vm_mm; + spinlock_t *ptl; + pgtable_t pgtable = NULL; + int error; + + if (addr < vma->vm_start || addr >= vma->vm_end) + return VM_FAULT_SIGBUS; + + if (WARN_ON_ONCE(folio_order(folio) != PMD_ORDER)) + return VM_FAULT_SIGBUS; + + if (arch_needs_pgtable_deposit()) { + pgtable = pte_alloc_one(vma->vm_mm); + if (!pgtable) + return VM_FAULT_OOM; + } + + ptl = pmd_lock(mm, vmf->pmd); + if (pmd_none(*vmf->pmd)) { + folio_get(folio); + folio_add_file_rmap_pmd(folio, &folio->page, vma); + add_mm_counter(mm, mm_counter_file(folio), HPAGE_PMD_NR); + } + error = insert_pfn_pmd(vma, addr, vmf->pmd, + pfn_to_pfn_t(folio_pfn(folio)), vma->vm_page_prot, + write, pgtable); + spin_unlock(ptl); + if (error && pgtable) + pte_free(mm, pgtable); + + return VM_FAULT_NOPAGE; +} +EXPORT_SYMBOL_GPL(vmf_insert_folio_pmd); + #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) { -- 2.50.1 From e5cb23256347469c80138a2a8804887239465d0b Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:12 +1100 Subject: [PATCH 02/16] mm/gup: don't allow FOLL_LONGTERM pinning of FS DAX pages Longterm pinning of FS DAX pages should already be disallowed by various pXX_devmap checks. However a future change will cause these checks to be invalid for FS DAX pages so make folio_is_longterm_pinnable() return false for FS DAX pages. Link: https://lkml.kernel.org/r/250a31876704b79f7c65b159f3c835e547f052df.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: John Hubbard Reviewed-by: Dan Williams Acked-by: David Hildenbrand Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- include/linux/memremap.h | 11 +++++++++++ include/linux/mm.h | 7 +++++++ 2 files changed, 18 insertions(+) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 0256a4218dc3..4aa151914eab 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -187,6 +187,17 @@ static inline bool folio_is_device_coherent(const struct folio *folio) return is_device_coherent_page(&folio->page); } +static inline bool is_fsdax_page(const struct page *page) +{ + return is_zone_device_page(page) && + page_pgmap(page)->type == MEMORY_DEVICE_FS_DAX; +} + +static inline bool folio_is_fsdax(const struct folio *folio) +{ + return is_fsdax_page(&folio->page); +} + #ifdef CONFIG_ZONE_DEVICE void zone_device_page_init(struct page *page); void *memremap_pages(struct dev_pagemap *pgmap, int nid); diff --git a/include/linux/mm.h b/include/linux/mm.h index c6fb9300f6c0..009484a07074 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2115,6 +2115,13 @@ static inline bool folio_is_longterm_pinnable(struct folio *folio) if (folio_is_device_coherent(folio)) return false; + /* + * Filesystems can only tolerate transient delays to truncate and + * hole-punch operations + */ + if (folio_is_fsdax(folio)) + return false; + /* Otherwise, non-movable zone folios can be pinned. */ return !folio_is_zone_movable(folio); -- 2.50.1 From 653d7825c149932f254e0cd22153ccc945e7e545 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 28 Feb 2025 14:31:13 +1100 Subject: [PATCH 03/16] dcssblk: mark DAX broken, remove FS_DAX_LIMITED support The dcssblk driver has long needed special case supoprt to enable limited dax operation, so called CONFIG_FS_DAX_LIMITED. This mode works around the incomplete support for ZONE_DEVICE on s390 by forgoing the ability of dax-mapped pages to support GUP. Now, pending cleanups to fsdax that fix its reference counting [1] depend on the ability of all dax drivers to supply ZONE_DEVICE pages. To allow that work to move forward, dax support needs to be paused for dcssblk until ZONE_DEVICE support arrives. That work has been known for a few years [2], and the removal of "pte_devmap" requirements [3] makes the conversion easier. For now, place the support behind CONFIG_BROKEN, and remove PFN_SPECIAL (dcssblk was the only user). Link: http://lore.kernel.org/cover.9f0e45d52f5cff58807831b6b867084d0b14b61c.1725941415.git-series.apopple@nvidia.com [1] Link: http://lore.kernel.org/20210820210318.187742e8@thinkpad/ [2] Link: http://lore.kernel.org/4511465a4f8429f45e2ac70d2e65dc5e1df1eb47.1725941415.git-series.apopple@nvidia.com [3] Link: https://lkml.kernel.org/r/33eef2379c0d240f40cc15453fad2df1a4ae34c8.1740713401.git-series.apopple@nvidia.com Signed-off-by: Dan Williams Reviewed-by: Gerald Schaefer Tested-by: Alexander Gordeev Acked-by: David Hildenbrand Tested-by: Alison Schofield Cc: Heiko Carstens Cc: Vasily Gorbik Cc: Christian Borntraeger Cc: Sven Schnelle Cc: Jan Kara Cc: Matthew Wilcox Cc: Christoph Hellwig Cc: Alistair Popple Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Huacai Chen Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Ted Ts'o Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- Documentation/filesystems/dax.rst | 1 - drivers/s390/block/Kconfig | 12 ++++++++++-- drivers/s390/block/dcssblk.c | 27 +++++++++++++++++---------- 3 files changed, 27 insertions(+), 13 deletions(-) diff --git a/Documentation/filesystems/dax.rst b/Documentation/filesystems/dax.rst index 719e90f1988e..08dd5e254cc5 100644 --- a/Documentation/filesystems/dax.rst +++ b/Documentation/filesystems/dax.rst @@ -207,7 +207,6 @@ implement direct_access. These block devices may be used for inspiration: - brd: RAM backed block device driver -- dcssblk: s390 dcss block device driver - pmem: NVDIMM persistent memory driver diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index e3710a762aba..4bfe469c04aa 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -4,13 +4,21 @@ comment "S/390 block device drivers" config DCSSBLK def_tristate m - select FS_DAX_LIMITED - select DAX prompt "DCSSBLK support" depends on S390 && BLOCK help Support for dcss block device +config DCSSBLK_DAX + def_bool y + depends on DCSSBLK + # requires S390 ZONE_DEVICE support + depends on BROKEN + select DAX + prompt "DCSSBLK DAX support" + help + Enable DAX operation for the dcss block device + config DASD def_tristate y prompt "Support for DASD devices" diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 0f14d279d30b..7248e547fefb 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -534,6 +534,21 @@ static const struct attribute_group *dcssblk_dev_attr_groups[] = { NULL, }; +static int dcssblk_setup_dax(struct dcssblk_dev_info *dev_info) +{ + struct dax_device *dax_dev; + + if (!IS_ENABLED(CONFIG_DCSSBLK_DAX)) + return 0; + + dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops); + if (IS_ERR(dax_dev)) + return PTR_ERR(dax_dev); + set_dax_synchronous(dax_dev); + dev_info->dax_dev = dax_dev; + return dax_add_host(dev_info->dax_dev, dev_info->gd); +} + /* * device attribute for adding devices */ @@ -547,7 +562,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char int rc, i, j, num_of_segments; struct dcssblk_dev_info *dev_info; struct segment_info *seg_info, *temp; - struct dax_device *dax_dev; char *local_buf; unsigned long seg_byte_size; @@ -674,14 +688,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char if (rc) goto put_dev; - dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops); - if (IS_ERR(dax_dev)) { - rc = PTR_ERR(dax_dev); - goto put_dev; - } - set_dax_synchronous(dax_dev); - dev_info->dax_dev = dax_dev; - rc = dax_add_host(dev_info->dax_dev, dev_info->gd); + rc = dcssblk_setup_dax(dev_info); if (rc) goto out_dax; @@ -917,7 +924,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, *kaddr = __va(dev_info->start + offset); if (pfn) *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), - PFN_DEV|PFN_SPECIAL); + PFN_DEV); return (dev_sz - offset) / PAGE_SIZE; } -- 2.50.1 From 38607c62b34b46317c46d5baf1df03ac6e48a1c6 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:14 +1100 Subject: [PATCH 04/16] fs/dax: properly refcount fs dax pages Currently fs dax pages are considered free when the refcount drops to one and their refcounts are not increased when mapped via PTEs or decreased when unmapped. This requires special logic in mm paths to detect that these pages should not be properly refcounted, and to detect when the refcount drops to one instead of zero. On the other hand get_user_pages(), etc. will properly refcount fs dax pages by taking a reference and dropping it when the page is unpinned. Tracking this special behaviour requires extra PTE bits (eg. pte_devmap) and introduces rules that are potentially confusing and specific to FS DAX pages. To fix this, and to possibly allow removal of the special PTE bits in future, convert the fs dax page refcounts to be zero based and instead take a reference on the page each time it is mapped as is currently the case for normal pages. This may also allow a future clean-up to remove the pgmap refcounting that is currently done in mm/gup.c. Link: https://lkml.kernel.org/r/c7d886ad7468a20452ef6e0ddab6cfe220874e7c.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Reviewed-by: Dan Williams Tested-by: Alison Schofield Acked-by: David Hildenbrand Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- drivers/nvdimm/pmem.c | 4 +- fs/dax.c | 186 ++++++++++++++++++++++++--------------- fs/fuse/virtio_fs.c | 3 +- include/linux/dax.h | 2 +- include/linux/mm.h | 27 +----- include/linux/mm_types.h | 7 +- mm/gup.c | 9 +- mm/huge_memory.c | 6 +- mm/internal.h | 2 - mm/memory-failure.c | 6 +- mm/memory.c | 6 +- mm/memremap.c | 47 +++++----- mm/mm_init.c | 9 +- mm/swap.c | 2 - 14 files changed, 165 insertions(+), 151 deletions(-) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index d81faa9d89c9..785b2d2dbc82 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -513,7 +513,7 @@ static int pmem_attach_disk(struct device *dev, pmem->disk = disk; pmem->pgmap.owner = pmem; - pmem->pfn_flags = PFN_DEV; + pmem->pfn_flags = 0; if (is_nd_pfn(dev)) { pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.ops = &fsdax_pagemap_ops; @@ -522,7 +522,6 @@ static int pmem_attach_disk(struct device *dev, pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); pmem->pfn_pad = resource_size(res) - range_len(&pmem->pgmap.range); - pmem->pfn_flags |= PFN_MAP; bb_range = pmem->pgmap.range; bb_range.start += pmem->data_offset; } else if (pmem_should_map_pages(dev)) { @@ -532,7 +531,6 @@ static int pmem_attach_disk(struct device *dev, pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.ops = &fsdax_pagemap_ops; addr = devm_memremap_pages(dev, &pmem->pgmap); - pmem->pfn_flags |= PFN_MAP; bb_range = pmem->pgmap.range; } else { addr = devm_memremap(dev, pmem->phys_addr, diff --git a/fs/dax.c b/fs/dax.c index 6674540363e8..cf96f3dd4e5f 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -71,6 +71,11 @@ static unsigned long dax_to_pfn(void *entry) return xa_to_value(entry) >> DAX_SHIFT; } +static struct folio *dax_to_folio(void *entry) +{ + return page_folio(pfn_to_page(dax_to_pfn(entry))); +} + static void *dax_make_entry(pfn_t pfn, unsigned long flags) { return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); @@ -338,19 +343,6 @@ static unsigned long dax_entry_size(void *entry) return PAGE_SIZE; } -static unsigned long dax_end_pfn(void *entry) -{ - return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; -} - -/* - * Iterate through all mapped pfns represented by an entry, i.e. skip - * 'empty' and 'zero' entries. - */ -#define for_each_mapped_pfn(entry, pfn) \ - for (pfn = dax_to_pfn(entry); \ - pfn < dax_end_pfn(entry); pfn++) - /* * A DAX folio is considered shared if it has no mapping set and ->share (which * shares the ->index field) is non-zero. Note this may return false even if the @@ -359,7 +351,7 @@ static unsigned long dax_end_pfn(void *entry) */ static inline bool dax_folio_is_shared(struct folio *folio) { - return !folio->mapping && folio->page.share; + return !folio->mapping && folio->share; } /* @@ -384,75 +376,117 @@ static void dax_folio_make_shared(struct folio *folio) * folio has previously been mapped into one address space so set the * share count. */ - folio->page.share = 1; + folio->share = 1; } -static inline unsigned long dax_folio_share_put(struct folio *folio) +static inline unsigned long dax_folio_put(struct folio *folio) { - return --folio->page.share; + unsigned long ref; + int order, i; + + if (!dax_folio_is_shared(folio)) + ref = 0; + else + ref = --folio->share; + + if (ref) + return ref; + + folio->mapping = NULL; + order = folio_order(folio); + if (!order) + return 0; + + for (i = 0; i < (1UL << order); i++) { + struct dev_pagemap *pgmap = page_pgmap(&folio->page); + struct page *page = folio_page(folio, i); + struct folio *new_folio = (struct folio *)page; + + ClearPageHead(page); + clear_compound_head(page); + + new_folio->mapping = NULL; + /* + * Reset pgmap which was over-written by + * prep_compound_page(). + */ + new_folio->pgmap = pgmap; + new_folio->share = 0; + WARN_ON_ONCE(folio_ref_count(new_folio)); + } + + return ref; +} + +static void dax_folio_init(void *entry) +{ + struct folio *folio = dax_to_folio(entry); + int order = dax_entry_order(entry); + + /* + * Folio should have been split back to order-0 pages in + * dax_folio_put() when they were removed from their + * final mapping. + */ + WARN_ON_ONCE(folio_order(folio)); + + if (order > 0) { + prep_compound_page(&folio->page, order); + if (order > 1) + INIT_LIST_HEAD(&folio->_deferred_list); + WARN_ON_ONCE(folio_ref_count(folio)); + } } static void dax_associate_entry(void *entry, struct address_space *mapping, - struct vm_area_struct *vma, unsigned long address, bool shared) + struct vm_area_struct *vma, + unsigned long address, bool shared) { - unsigned long size = dax_entry_size(entry), pfn, index; - int i = 0; + unsigned long size = dax_entry_size(entry), index; + struct folio *folio = dax_to_folio(entry); if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; index = linear_page_index(vma, address & ~(size - 1)); - for_each_mapped_pfn(entry, pfn) { - struct folio *folio = pfn_folio(pfn); - - if (shared && (folio->mapping || folio->page.share)) { - if (folio->mapping) - dax_folio_make_shared(folio); + if (shared && (folio->mapping || dax_folio_is_shared(folio))) { + if (folio->mapping) + dax_folio_make_shared(folio); - WARN_ON_ONCE(!folio->page.share); - folio->page.share++; - } else { - WARN_ON_ONCE(folio->mapping); - folio->mapping = mapping; - folio->index = index + i++; - } + WARN_ON_ONCE(!folio->share); + WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio)); + folio->share++; + } else { + WARN_ON_ONCE(folio->mapping); + dax_folio_init(entry); + folio = dax_to_folio(entry); + folio->mapping = mapping; + folio->index = index; } } static void dax_disassociate_entry(void *entry, struct address_space *mapping, - bool trunc) + bool trunc) { - unsigned long pfn; + struct folio *folio = dax_to_folio(entry); if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; - for_each_mapped_pfn(entry, pfn) { - struct folio *folio = pfn_folio(pfn); - - WARN_ON_ONCE(trunc && folio_ref_count(folio) > 1); - if (dax_folio_is_shared(folio)) { - /* keep the shared flag if this page is still shared */ - if (dax_folio_share_put(folio) > 0) - continue; - } else - WARN_ON_ONCE(folio->mapping && folio->mapping != mapping); - folio->mapping = NULL; - folio->index = 0; - } + dax_folio_put(folio); } static struct page *dax_busy_page(void *entry) { - unsigned long pfn; + struct folio *folio = dax_to_folio(entry); - for_each_mapped_pfn(entry, pfn) { - struct page *page = pfn_to_page(pfn); + if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) + return NULL; - if (page_ref_count(page) > 1) - return page; - } - return NULL; + if (folio_ref_count(folio) - folio_mapcount(folio)) + return &folio->page; + else + return NULL; } /** @@ -785,7 +819,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping) EXPORT_SYMBOL_GPL(dax_layout_busy_page); static int __dax_invalidate_entry(struct address_space *mapping, - pgoff_t index, bool trunc) + pgoff_t index, bool trunc) { XA_STATE(xas, &mapping->i_pages, index); int ret = 0; @@ -953,7 +987,8 @@ void dax_break_layout_final(struct inode *inode) wait_page_idle_uninterruptible(page, inode); } while (true); - dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX); + if (!page) + dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX); } EXPORT_SYMBOL_GPL(dax_break_layout_final); @@ -1039,8 +1074,10 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, void *old; dax_disassociate_entry(entry, mapping, false); - dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, - shared); + if (!(flags & DAX_ZERO_PAGE)) + dax_associate_entry(new_entry, mapping, vmf->vma, + vmf->address, shared); + /* * Only swap our new entry into the page cache if the current * entry is a zero page or an empty entry. If a normal PTE or @@ -1228,9 +1265,7 @@ static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, goto out; if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) goto out; - /* For larger pages we need devmap */ - if (length > 1 && !pfn_t_devmap(*pfnp)) - goto out; + rc = 0; out_check_addr: @@ -1337,7 +1372,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); - ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); + ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), false); trace_dax_load_hole(inode, vmf, ret); return ret; } @@ -1808,7 +1843,8 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; bool write = iter->flags & IOMAP_WRITE; unsigned long entry_flags = pmd ? DAX_PMD : 0; - int err = 0; + struct folio *folio; + int ret, err = 0; pfn_t pfn; void *kaddr; @@ -1840,17 +1876,19 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, return dax_fault_return(err); } + folio = dax_to_folio(*entry); if (dax_fault_is_synchronous(iter, vmf->vma)) return dax_fault_synchronous_pfnp(pfnp, pfn); - /* insert PMD pfn */ + folio_ref_inc(folio); if (pmd) - return vmf_insert_pfn_pmd(vmf, pfn, write); + ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn_t_to_pfn(pfn)), + write); + else + ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), write); + folio_put(folio); - /* insert PTE pfn */ - if (write) - return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); - return vmf_insert_mixed(vmf->vma, vmf->address, pfn); + return ret; } static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, @@ -2089,6 +2127,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); + struct folio *folio; void *entry; vm_fault_t ret; @@ -2106,14 +2145,17 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); dax_lock_entry(&xas, entry); xas_unlock_irq(&xas); + folio = pfn_folio(pfn_t_to_pfn(pfn)); + folio_ref_inc(folio); if (order == 0) - ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); + ret = vmf_insert_page_mkwrite(vmf, &folio->page, true); #ifdef CONFIG_FS_DAX_PMD else if (order == PMD_ORDER) - ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); + ret = vmf_insert_folio_pmd(vmf, folio, FAULT_FLAG_WRITE); #endif else ret = VM_FAULT_FALLBACK; + folio_put(folio); dax_unlock_entry(&xas, entry); trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); return ret; diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 82afe78ec542..2c7b24cb67ad 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -1017,8 +1017,7 @@ static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, if (kaddr) *kaddr = fs->window_kaddr + offset; if (pfn) - *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, - PFN_DEV | PFN_MAP); + *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, 0); return nr_pages > max_nr_pages ? max_nr_pages : nr_pages; } diff --git a/include/linux/dax.h b/include/linux/dax.h index 2333c30f6d36..dcc9fcdf14e4 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -209,7 +209,7 @@ int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, static inline bool dax_page_is_idle(struct page *page) { - return page && page_ref_count(page) == 1; + return page && page_ref_count(page) == 0; } #if IS_ENABLED(CONFIG_DAX) diff --git a/include/linux/mm.h b/include/linux/mm.h index 009484a07074..de008efd96aa 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1192,6 +1192,8 @@ int vma_is_stack_for_current(struct vm_area_struct *vma); struct mmu_gather; struct inode; +extern void prep_compound_page(struct page *page, unsigned int order); + /* * compound_order() can be called without holding a reference, which means * that niceties like page_folio() don't work. These callers should be @@ -1513,25 +1515,6 @@ vm_fault_t finish_fault(struct vm_fault *vmf); * back into memory. */ -#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) -DECLARE_STATIC_KEY_FALSE(devmap_managed_key); - -bool __put_devmap_managed_folio_refs(struct folio *folio, int refs); -static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs) -{ - if (!static_branch_unlikely(&devmap_managed_key)) - return false; - if (!folio_is_zone_device(folio)) - return false; - return __put_devmap_managed_folio_refs(folio, refs); -} -#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ -static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs) -{ - return false; -} -#endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ - /* 127: arbitrary random number, small enough to assemble well */ #define folio_ref_zero_or_close_to_overflow(folio) \ ((unsigned int) folio_ref_count(folio) + 127u <= 127u) @@ -1652,12 +1635,6 @@ static inline void put_page(struct page *page) if (folio_test_slab(folio)) return; - /* - * For some devmap managed pages we need to catch refcount transition - * from 2 to 1: - */ - if (put_devmap_managed_folio_refs(folio, 1)) - return; folio_put(folio); } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0fa7907d437e..b1827d78ff89 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -296,6 +296,8 @@ typedef struct { * anonymous memory. * @index: Offset within the file, in units of pages. For anonymous memory, * this is the index from the beginning of the mmap. + * @share: number of DAX mappings that reference this folio. See + * dax_associate_entry. * @private: Filesystem per-folio data (see folio_attach_private()). * @swap: Used for swp_entry_t if folio_test_swapcache(). * @_mapcount: Do not access this member directly. Use folio_mapcount() to @@ -345,7 +347,10 @@ struct folio { struct dev_pagemap *pgmap; }; struct address_space *mapping; - pgoff_t index; + union { + pgoff_t index; + unsigned long share; + }; union { void *private; swp_entry_t swap; diff --git a/mm/gup.c b/mm/gup.c index e5d6454df41d..e5040657870e 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -96,8 +96,7 @@ retry: * belongs to this folio. */ if (unlikely(page_folio(page) != folio)) { - if (!put_devmap_managed_folio_refs(folio, refs)) - folio_put_refs(folio, refs); + folio_put_refs(folio, refs); goto retry; } @@ -116,8 +115,7 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) refs *= GUP_PIN_COUNTING_BIAS; } - if (!put_devmap_managed_folio_refs(folio, refs)) - folio_put_refs(folio, refs); + folio_put_refs(folio, refs); } /** @@ -565,8 +563,7 @@ static struct folio *try_grab_folio_fast(struct page *page, int refs, */ if (unlikely((flags & FOLL_LONGTERM) && !folio_is_longterm_pinnable(folio))) { - if (!put_devmap_managed_folio_refs(folio, refs)) - folio_put_refs(folio, refs); + folio_put_refs(folio, refs); return NULL; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e6f4189c1c94..9a15fd3453ff 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2225,7 +2225,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, tlb->fullmm); arch_check_zapped_pmd(vma, orig_pmd); tlb_remove_pmd_tlb_entry(tlb, pmd, addr); - if (vma_is_special_huge(vma)) { + if (!vma_is_dax(vma) && vma_is_special_huge(vma)) { if (arch_needs_pgtable_deposit()) zap_deposited_table(tlb->mm, pmd); spin_unlock(ptl); @@ -2882,13 +2882,15 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, */ if (arch_needs_pgtable_deposit()) zap_deposited_table(mm, pmd); - if (vma_is_special_huge(vma)) + if (!vma_is_dax(vma) && vma_is_special_huge(vma)) return; if (unlikely(is_pmd_migration_entry(old_pmd))) { swp_entry_t entry; entry = pmd_to_swp_entry(old_pmd); folio = pfn_swap_entry_folio(entry); + } else if (is_huge_zero_pmd(old_pmd)) { + return; } else { page = pmd_page(old_pmd); folio = page_folio(page); diff --git a/mm/internal.h b/mm/internal.h index 70fa96e61c76..aa30282a774a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -737,8 +737,6 @@ static inline void prep_compound_tail(struct page *head, int tail_idx) set_page_private(p, 0); } -extern void prep_compound_page(struct page *page, unsigned int order); - void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern bool free_pages_prepare(struct page *page, unsigned int order); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 327e02fdc029..6257c7f5e941 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -419,18 +419,18 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, pud = pud_offset(p4d, address); if (!pud_present(*pud)) return 0; - if (pud_devmap(*pud)) + if (pud_trans_huge(*pud)) return PUD_SHIFT; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return 0; - if (pmd_devmap(*pmd)) + if (pmd_trans_huge(*pmd)) return PMD_SHIFT; pte = pte_offset_map(pmd, address); if (!pte) return 0; ptent = ptep_get(pte); - if (pte_present(ptent) && pte_devmap(ptent)) + if (pte_present(ptent)) ret = PAGE_SHIFT; pte_unmap(pte); return ret; diff --git a/mm/memory.c b/mm/memory.c index f09b4a1d09a8..8d1ea1dd6b52 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3848,13 +3848,15 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a - * VM_PFNMAP VMA. + * VM_PFNMAP VMA. FS DAX also wants ops->pfn_mkwrite called. * * We should not cow pages in a shared writeable mapping. * Just mark the pages writable and/or call ops->pfn_mkwrite. */ - if (!vmf->page) + if (!vmf->page || is_fsdax_page(vmf->page)) { + vmf->page = NULL; return wp_pfn_shared(vmf); + } return wp_page_shared(vmf, folio); } diff --git a/mm/memremap.c b/mm/memremap.c index 68099af9df4c..9a8879bf1ae4 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -458,8 +458,13 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap); void free_zone_device_folio(struct folio *folio) { - if (WARN_ON_ONCE(!folio->pgmap->ops || - !folio->pgmap->ops->page_free)) + struct dev_pagemap *pgmap = folio->pgmap; + + if (WARN_ON_ONCE(!pgmap->ops)) + return; + + if (WARN_ON_ONCE(pgmap->type != MEMORY_DEVICE_FS_DAX && + !pgmap->ops->page_free)) return; mem_cgroup_uncharge(folio); @@ -484,26 +489,36 @@ void free_zone_device_folio(struct folio *folio) * For other types of ZONE_DEVICE pages, migration is either * handled differently or not done at all, so there is no need * to clear folio->mapping. + * + * FS DAX pages clear the mapping when the folio->share count hits + * zero which indicating the page has been removed from the file + * system mapping. */ - folio->mapping = NULL; - folio->pgmap->ops->page_free(folio_page(folio, 0)); + if (pgmap->type != MEMORY_DEVICE_FS_DAX) + folio->mapping = NULL; - switch (folio->pgmap->type) { + switch (pgmap->type) { case MEMORY_DEVICE_PRIVATE: case MEMORY_DEVICE_COHERENT: - put_dev_pagemap(folio->pgmap); + pgmap->ops->page_free(folio_page(folio, 0)); + put_dev_pagemap(pgmap); break; - case MEMORY_DEVICE_FS_DAX: case MEMORY_DEVICE_GENERIC: /* * Reset the refcount to 1 to prepare for handing out the page * again. */ + pgmap->ops->page_free(folio_page(folio, 0)); folio_set_count(folio, 1); break; + case MEMORY_DEVICE_FS_DAX: + wake_up_var(&folio->page); + break; + case MEMORY_DEVICE_PCI_P2PDMA: + pgmap->ops->page_free(folio_page(folio, 0)); break; } } @@ -519,21 +534,3 @@ void zone_device_page_init(struct page *page) lock_page(page); } EXPORT_SYMBOL_GPL(zone_device_page_init); - -#ifdef CONFIG_FS_DAX -bool __put_devmap_managed_folio_refs(struct folio *folio, int refs) -{ - if (folio->pgmap->type != MEMORY_DEVICE_FS_DAX) - return false; - - /* - * fsdax page refcounts are 1-based, rather than 0-based: if - * refcount is 1, then the page is free and the refcount is - * stable because nobody holds a reference on the page. - */ - if (folio_ref_sub_return(folio, refs) == 1) - wake_up_var(&folio->_refcount); - return true; -} -EXPORT_SYMBOL(__put_devmap_managed_folio_refs); -#endif /* CONFIG_FS_DAX */ diff --git a/mm/mm_init.c b/mm/mm_init.c index 73e97ce95f58..133640a93d1d 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1026,23 +1026,22 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, } /* - * ZONE_DEVICE pages other than MEMORY_TYPE_GENERIC and - * MEMORY_TYPE_FS_DAX pages are released directly to the driver page - * allocator which will set the page count to 1 when allocating the - * page. + * ZONE_DEVICE pages other than MEMORY_TYPE_GENERIC are released + * directly to the driver page allocator which will set the page count + * to 1 when allocating the page. * * MEMORY_TYPE_GENERIC and MEMORY_TYPE_FS_DAX pages automatically have * their refcount reset to one whenever they are freed (ie. after * their refcount drops to 0). */ switch (pgmap->type) { + case MEMORY_DEVICE_FS_DAX: case MEMORY_DEVICE_PRIVATE: case MEMORY_DEVICE_COHERENT: case MEMORY_DEVICE_PCI_P2PDMA: set_page_count(page, 0); break; - case MEMORY_DEVICE_FS_DAX: case MEMORY_DEVICE_GENERIC: break; } diff --git a/mm/swap.c b/mm/swap.c index fc8281ef4241..7523b65d8caa 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -956,8 +956,6 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs) unlock_page_lruvec_irqrestore(lruvec, flags); lruvec = NULL; } - if (put_devmap_managed_folio_refs(folio, nr_refs)) - continue; if (folio_ref_sub_and_test(folio, nr_refs)) free_zone_device_folio(folio); continue; -- 2.50.1 From aed877c2b4257a25b2429f165542f86125871071 Mon Sep 17 00:00:00 2001 From: Alistair Popple Date: Fri, 28 Feb 2025 14:31:15 +1100 Subject: [PATCH 05/16] device/dax: properly refcount device dax pages when mapping Device DAX pages are currently not reference counted when mapped, instead relying on the devmap PTE bit to ensure mapping code will not get/put references. This requires special handling in various page table walkers, particularly GUP, to manage references on the underlying pgmap to ensure the pages remain valid. However there is no reason these pages can't be refcounted properly at map time. Doning so eliminates the need for the devmap PTE bit, freeing up a precious PTE bit. It also simplifies GUP as it no longer needs to manage the special pgmap references and can instead just treat the pages normally as defined by vm_normal_page(). Link: https://lkml.kernel.org/r/968d3a8e9157e7492e85d065765c027e525f9fc9.1740713401.git-series.apopple@nvidia.com Signed-off-by: Alistair Popple Tested-by: Alison Schofield Cc: Alexander Gordeev Cc: Asahi Lina Cc: Balbir Singh Cc: Bjorn Helgaas Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christoph Hellwig Cc: Chunyan Zhang Cc: Dan Wiliams Cc: "Darrick J. Wong" Cc: Dave Chinner Cc: Dave Hansen Cc: Dave Jiang Cc: David Hildenbrand Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Huacai Chen Cc: Ira Weiny Cc: Jan Kara Cc: Jason Gunthorpe Cc: Jason Gunthorpe Cc: John Hubbard Cc: linmiaohe Cc: Logan Gunthorpe Cc: Matthew Wilcow (Oracle) Cc: Michael "Camp Drill Sergeant" Ellerman Cc: Nicholas Piggin Cc: Peter Xu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: Vishal Verma Cc: Vivek Goyal Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- drivers/dax/device.c | 15 +++++++++------ mm/memremap.c | 14 +++++++------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/drivers/dax/device.c b/drivers/dax/device.c index bc871a34b9cd..328231cfb028 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -125,11 +125,12 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = phys_to_pfn_t(phys, 0); dax_set_mapping(vmf, pfn, fault_size); - return vmf_insert_mixed(vmf->vma, vmf->address, pfn); + return vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), + vmf->flags & FAULT_FLAG_WRITE); } static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, @@ -168,11 +169,12 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = phys_to_pfn_t(phys, 0); dax_set_mapping(vmf, pfn, fault_size); - return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE); + return vmf_insert_folio_pmd(vmf, page_folio(pfn_t_to_page(pfn)), + vmf->flags & FAULT_FLAG_WRITE); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD @@ -213,11 +215,12 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, return VM_FAULT_SIGBUS; } - pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + pfn = phys_to_pfn_t(phys, 0); dax_set_mapping(vmf, pfn, fault_size); - return vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE); + return vmf_insert_folio_pud(vmf, page_folio(pfn_t_to_page(pfn)), + vmf->flags & FAULT_FLAG_WRITE); } #else static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, diff --git a/mm/memremap.c b/mm/memremap.c index 9a8879bf1ae4..2aebc1b192da 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -460,11 +460,7 @@ void free_zone_device_folio(struct folio *folio) { struct dev_pagemap *pgmap = folio->pgmap; - if (WARN_ON_ONCE(!pgmap->ops)) - return; - - if (WARN_ON_ONCE(pgmap->type != MEMORY_DEVICE_FS_DAX && - !pgmap->ops->page_free)) + if (WARN_ON_ONCE(!pgmap)) return; mem_cgroup_uncharge(folio); @@ -494,12 +490,15 @@ void free_zone_device_folio(struct folio *folio) * zero which indicating the page has been removed from the file * system mapping. */ - if (pgmap->type != MEMORY_DEVICE_FS_DAX) + if (pgmap->type != MEMORY_DEVICE_FS_DAX && + pgmap->type != MEMORY_DEVICE_GENERIC) folio->mapping = NULL; switch (pgmap->type) { case MEMORY_DEVICE_PRIVATE: case MEMORY_DEVICE_COHERENT: + if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) + break; pgmap->ops->page_free(folio_page(folio, 0)); put_dev_pagemap(pgmap); break; @@ -509,7 +508,6 @@ void free_zone_device_folio(struct folio *folio) * Reset the refcount to 1 to prepare for handing out the page * again. */ - pgmap->ops->page_free(folio_page(folio, 0)); folio_set_count(folio, 1); break; @@ -518,6 +516,8 @@ void free_zone_device_folio(struct folio *folio) break; case MEMORY_DEVICE_PCI_P2PDMA: + if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) + break; pgmap->ops->page_free(folio_page(folio, 0)); break; } -- 2.50.1 From 937582ee8e8d227c30ec147629a0179131feaa80 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:34 +0000 Subject: [PATCH 06/16] mm/mremap: correctly handle partial mremap() of VMA starting at 0 Patch series "refactor mremap and fix bug", v3. The existing mremap() logic has grown organically over a very long period of time, resulting in code that is in many parts, very difficult to follow and full of subtleties and sources of confusion. In addition, it is difficult to thread state through the operation correctly, as function arguments have expanded, some parameters are expected to be temporarily altered during the operation, others are intended to remain static and some can be overridden. This series completely refactors the mremap implementation, sensibly separating functions, adding comments to explain the more subtle aspects of the implementation and making use of small structs to thread state through everything. The reason for doing so is to lay the groundwork for planned future changes to the mremap logic, changes which require the ability to easily pass around state. Additionally, it would be unhelpful to add yet more logic to code that is already difficult to follow without first refactoring it like this. The first patch in this series additionally fixes a bug when a VMA with start address zero is partially remapped. Tested on real hardware under heavy workload and all self tests are passing. This patch (of 3): Consider the case of a partial mremap() (that results in a VMA split) of an accountable VMA (i.e. which has the VM_ACCOUNT flag set) whose start address is zero, with the MREMAP_MAYMOVE flag specified and a scenario where a move does in fact occur: addr end | | v v |-------------| | vma | |-------------| 0 This move is affected by unmapping the range [addr, end). In order to prevent an incorrect decrement of accounted memory which has already been determined, the mremap() code in move_vma() clears VM_ACCOUNT from the VMA prior to doing so, before reestablishing it in each of the VMAs post-split: addr end | | v v |---| |---| | A | | B | |---| |---| Commit 6b73cff239e5 ("mm: change munmap splitting order and move_vma()") changed this logic such as to determine whether there is a need to do so by establishing account_start and account_end and, in the instance where such an operation is required, assigning them to vma->vm_start and vma->vm_end. Later the code checks if the operation is required for 'A' referenced above thusly: if (account_start) { ... } However, if the VMA described above has vma->vm_start == 0, which is now assigned to account_start, this branch will not be executed. As a result, the VMA 'A' above will remain stripped of its VM_ACCOUNT flag, incorrectly. The fix is to simply convert these variables to booleans and set them as required. Link: https://lkml.kernel.org/r/cover.1741639347.git.lorenzo.stoakes@oracle.com Link: https://lkml.kernel.org/r/dc55cb6db25d97c3d9e460de4986a323fa959676.1741639347.git.lorenzo.stoakes@oracle.com Fixes: 6b73cff239e5 ("mm: change munmap splitting order and move_vma()") Signed-off-by: Lorenzo Stoakes Reviewed-by: Harry Yoo Reviewed-by: Liam R. Howlett Reviewed-by: Vlastimil Babka Cc: Signed-off-by: Andrew Morton --- mm/mremap.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index cff7f552f909..c3e4c86d0b8d 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -705,8 +705,8 @@ static unsigned long move_vma(struct vm_area_struct *vma, unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; - unsigned long account_start = 0; - unsigned long account_end = 0; + bool account_start = false; + bool account_end = false; unsigned long hiwater_vm; int err = 0; bool need_rmap_locks; @@ -790,9 +790,9 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { vm_flags_clear(vma, VM_ACCOUNT); if (vma->vm_start < old_addr) - account_start = vma->vm_start; + account_start = true; if (vma->vm_end > old_addr + old_len) - account_end = vma->vm_end; + account_end = true; } /* @@ -832,7 +832,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, /* OOM: unable to split vma, just get accounts right */ if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) vm_acct_memory(old_len >> PAGE_SHIFT); - account_start = account_end = 0; + account_start = account_end = false; } if (vm_flags & VM_LOCKED) { -- 2.50.1 From 85ea6bdd88a27317875088b54119bec209c6c09c Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:35 +0000 Subject: [PATCH 07/16] mm/mremap: refactor mremap() system call implementation Place checks into a separate function so the mremap() system call is less egregiously long, remove unnecessary mremap_to() offset_in_page() check and just check that earlier so we keep all such basic checks together. Separate out the VMA in-place expansion, hugetlb and expand/move logic into separate, readable functions. De-duplicate code where possible, add comments and ensure that all error handling explicitly specifies the error at the point of it occurring rather than setting a prefixed error value and implicitly setting (which is bug prone). This lays the groundwork for subsequent patches further simplifying and extending the mremap() implementation. Link: https://lkml.kernel.org/r/fc4a925396dc3cc36791ec92c4d329209e816308.1741639347.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Harry Yoo Reviewed-by: Vlastimil Babka Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/mremap.c | 405 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 251 insertions(+), 154 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index c3e4c86d0b8d..c4abda8dfc57 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -942,33 +942,14 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, unsigned long ret; unsigned long map_flags = 0; - if (offset_in_page(new_addr)) - return -EINVAL; - + /* Is the new length or address silly? */ if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) return -EINVAL; - /* Ensure the old/new locations do not overlap */ + /* Ensure the old/new locations do not overlap. */ if (addr + old_len > new_addr && new_addr + new_len > addr) return -EINVAL; - /* - * move_vma() need us to stay 4 maps below the threshold, otherwise - * it will bail out at the very beginning. - * That is a problem if we have already unmaped the regions here - * (new_addr, and old_addr), because userspace will not know the - * state of the vma's after it gets -ENOMEM. - * So, to avoid such scenario we can pre-compute if the whole - * operation has high chances to success map-wise. - * Worst-scenario case is when both vma's (new_addr and old_addr) get - * split in 3 before unmapping it. - * That means 2 more maps (1 for each) to the ones we already hold. - * Check whether current map count plus 2 still leads us to 4 maps below - * the threshold, otherwise return -ENOMEM here to be more safe. - */ - if ((mm->map_count + 2) >= sysctl_max_map_count - 3) - return -ENOMEM; - if (flags & MREMAP_FIXED) { /* * In mremap_to(). @@ -1035,6 +1016,218 @@ static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) return 1; } +/* Do the mremap() flags require that the new_addr parameter be specified? */ +static bool implies_new_addr(unsigned long flags) +{ + return flags & (MREMAP_FIXED | MREMAP_DONTUNMAP); +} + +/* + * Are the parameters passed to mremap() valid? If so return 0, otherwise return + * error. + */ +static unsigned long check_mremap_params(unsigned long addr, + unsigned long flags, + unsigned long old_len, + unsigned long new_len, + unsigned long new_addr) +{ + /* Ensure no unexpected flag values. */ + if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) + return -EINVAL; + + /* Start address must be page-aligned. */ + if (offset_in_page(addr)) + return -EINVAL; + + /* + * We allow a zero old-len as a special case + * for DOS-emu "duplicate shm area" thing. But + * a zero new-len is nonsensical. + */ + if (!PAGE_ALIGN(new_len)) + return -EINVAL; + + /* Remainder of checks are for cases with specific new_addr. */ + if (!implies_new_addr(flags)) + return 0; + + /* The new address must be page-aligned. */ + if (offset_in_page(new_addr)) + return -EINVAL; + + /* A fixed address implies a move. */ + if (!(flags & MREMAP_MAYMOVE)) + return -EINVAL; + + /* MREMAP_DONTUNMAP does not allow resizing in the process. */ + if (flags & MREMAP_DONTUNMAP && old_len != new_len) + return -EINVAL; + + /* + * move_vma() need us to stay 4 maps below the threshold, otherwise + * it will bail out at the very beginning. + * That is a problem if we have already unmaped the regions here + * (new_addr, and old_addr), because userspace will not know the + * state of the vma's after it gets -ENOMEM. + * So, to avoid such scenario we can pre-compute if the whole + * operation has high chances to success map-wise. + * Worst-scenario case is when both vma's (new_addr and old_addr) get + * split in 3 before unmapping it. + * That means 2 more maps (1 for each) to the ones we already hold. + * Check whether current map count plus 2 still leads us to 4 maps below + * the threshold, otherwise return -ENOMEM here to be more safe. + */ + if ((current->mm->map_count + 2) >= sysctl_max_map_count - 3) + return -ENOMEM; + + return 0; +} + +/* + * We know we can expand the VMA in-place by delta pages, so do so. + * + * If we discover the VMA is locked, update mm_struct statistics accordingly and + * indicate so to the caller. + */ +static unsigned long expand_vma_inplace(struct vm_area_struct *vma, + unsigned long delta, bool *locked) +{ + struct mm_struct *mm = current->mm; + long pages = delta >> PAGE_SHIFT; + VMA_ITERATOR(vmi, mm, vma->vm_end); + long charged = 0; + + if (vma->vm_flags & VM_ACCOUNT) { + if (security_vm_enough_memory_mm(mm, pages)) + return -ENOMEM; + + charged = pages; + } + + /* + * Function vma_merge_extend() is called on the + * extension we are adding to the already existing vma, + * vma_merge_extend() will merge this extension with the + * already existing vma (expand operation itself) and + * possibly also with the next vma if it becomes + * adjacent to the expanded vma and otherwise + * compatible. + */ + vma = vma_merge_extend(&vmi, vma, delta); + if (!vma) { + vm_unacct_memory(charged); + return -ENOMEM; + } + + vm_stat_account(mm, vma->vm_flags, pages); + if (vma->vm_flags & VM_LOCKED) { + mm->locked_vm += pages; + *locked = true; + } + + return 0; +} + +static bool align_hugetlb(struct vm_area_struct *vma, + unsigned long addr, + unsigned long new_addr, + unsigned long *old_len_ptr, + unsigned long *new_len_ptr, + unsigned long *delta_ptr) +{ + unsigned long old_len = *old_len_ptr; + unsigned long new_len = *new_len_ptr; + struct hstate *h __maybe_unused = hstate_vma(vma); + + old_len = ALIGN(old_len, huge_page_size(h)); + new_len = ALIGN(new_len, huge_page_size(h)); + + /* addrs must be huge page aligned */ + if (addr & ~huge_page_mask(h)) + return false; + if (new_addr & ~huge_page_mask(h)) + return false; + + /* + * Don't allow remap expansion, because the underlying hugetlb + * reservation is not yet capable to handle split reservation. + */ + if (new_len > old_len) + return false; + + *old_len_ptr = old_len; + *new_len_ptr = new_len; + *delta_ptr = abs_diff(old_len, new_len); + return true; +} + +/* + * We are mremap()'ing without specifying a fixed address to move to, but are + * requesting that the VMA's size be increased. + * + * Try to do so in-place, if this fails, then move the VMA to a new location to + * action the change. + */ +static unsigned long expand_vma(struct vm_area_struct *vma, + unsigned long addr, unsigned long old_len, + unsigned long new_len, unsigned long flags, + bool *locked_ptr, unsigned long *new_addr_ptr, + struct vm_userfaultfd_ctx *uf_ptr, + struct list_head *uf_unmap_ptr) +{ + unsigned long err; + unsigned long map_flags; + unsigned long new_addr; /* We ignore any user-supplied one. */ + pgoff_t pgoff; + + err = resize_is_valid(vma, addr, old_len, new_len, flags); + if (err) + return err; + + /* + * [addr, old_len) spans precisely to the end of the VMA, so try to + * expand it in-place. + */ + if (old_len == vma->vm_end - addr && + vma_expandable(vma, new_len - old_len)) { + err = expand_vma_inplace(vma, new_len - old_len, locked_ptr); + if (IS_ERR_VALUE(err)) + return err; + + /* + * We want to populate the newly expanded portion of the VMA to + * satisfy the expectation that mlock()'ing a VMA maintains all + * of its pages in memory. + */ + if (*locked_ptr) + *new_addr_ptr = addr; + + /* OK we're done! */ + return addr; + } + + /* + * We weren't able to just expand or shrink the area, + * we need to create a new one and move it. + */ + + /* We're not allowed to move the VMA, so error out. */ + if (!(flags & MREMAP_MAYMOVE)) + return -ENOMEM; + + /* Find a new location to move the VMA to. */ + map_flags = (vma->vm_flags & VM_MAYSHARE) ? MAP_SHARED : 0; + pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); + new_addr = get_unmapped_area(vma->vm_file, 0, new_len, pgoff, map_flags); + if (IS_ERR_VALUE(new_addr)) + return new_addr; + *new_addr_ptr = new_addr; + + return move_vma(vma, addr, old_len, new_len, new_addr, + locked_ptr, flags, uf_ptr, uf_unmap_ptr); +} + /* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) @@ -1048,7 +1241,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long ret = -EINVAL; + unsigned long ret; + unsigned long delta; bool locked = false; struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; LIST_HEAD(uf_unmap_early); @@ -1067,70 +1261,38 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, */ addr = untagged_addr(addr); - if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) - return ret; - - if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) - return ret; - - /* - * MREMAP_DONTUNMAP is always a move and it does not allow resizing - * in the process. - */ - if (flags & MREMAP_DONTUNMAP && - (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) - return ret; - - - if (offset_in_page(addr)) + ret = check_mremap_params(addr, flags, old_len, new_len, new_addr); + if (ret) return ret; old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); + delta = abs_diff(old_len, new_len); - /* - * We allow a zero old-len as a special case - * for DOS-emu "duplicate shm area" thing. But - * a zero new-len is nonsensical. - */ - if (!new_len) - return ret; - - if (mmap_write_lock_killable(current->mm)) + if (mmap_write_lock_killable(mm)) return -EINTR; + vma = vma_lookup(mm, addr); if (!vma) { ret = -EFAULT; goto out; } - /* Don't allow remapping vmas when they have already been sealed */ + /* If mseal()'d, mremap() is prohibited. */ if (!can_modify_vma(vma)) { ret = -EPERM; goto out; } - if (is_vm_hugetlb_page(vma)) { - struct hstate *h __maybe_unused = hstate_vma(vma); - - old_len = ALIGN(old_len, huge_page_size(h)); - new_len = ALIGN(new_len, huge_page_size(h)); - - /* addrs must be huge page aligned */ - if (addr & ~huge_page_mask(h)) - goto out; - if (new_addr & ~huge_page_mask(h)) - goto out; - - /* - * Don't allow remap expansion, because the underlying hugetlb - * reservation is not yet capable to handle split reservation. - */ - if (new_len > old_len) - goto out; + /* Align to hugetlb page size, if required. */ + if (is_vm_hugetlb_page(vma) && + !align_hugetlb(vma, addr, new_addr, &old_len, &new_len, &delta)) { + ret = -EINVAL; + goto out; } - if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) { + /* Are we RELOCATING the VMA to a SPECIFIC address? */ + if (implies_new_addr(flags)) { ret = mremap_to(addr, old_len, new_addr, new_len, &locked, flags, &uf, &uf_unmap_early, &uf_unmap); @@ -1138,109 +1300,44 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, } /* - * Always allow a shrinking remap: that just unmaps - * the unnecessary pages.. - * do_vmi_munmap does all the needed commit accounting, and - * unlocks the mmap_lock if so directed. + * From here on in we are only RESIZING the VMA, attempting to do so + * in-place, moving the VMA if we cannot. */ - if (old_len >= new_len) { - VMA_ITERATOR(vmi, mm, addr + new_len); - if (old_len == new_len) { - ret = addr; - goto out; - } + /* NO-OP CASE - resizing to the same size. */ + if (new_len == old_len) { + ret = addr; + goto out; + } + + /* SHRINK CASE. Can always be done in-place. */ + if (new_len < old_len) { + VMA_ITERATOR(vmi, mm, addr + new_len); - ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len, + /* + * Simply unmap the shrunken portion of the VMA. This does all + * the needed commit accounting, unlocking the mmap lock. + */ + ret = do_vmi_munmap(&vmi, mm, addr + new_len, delta, &uf_unmap, true); if (ret) goto out; + /* We succeeded, mmap lock released for us. */ ret = addr; goto out_unlocked; } - /* - * Ok, we need to grow.. - */ - ret = resize_is_valid(vma, addr, old_len, new_len, flags); - if (ret) - goto out; - - /* old_len exactly to the end of the area.. - */ - if (old_len == vma->vm_end - addr) { - unsigned long delta = new_len - old_len; - - /* can we just expand the current mapping? */ - if (vma_expandable(vma, delta)) { - long pages = delta >> PAGE_SHIFT; - VMA_ITERATOR(vmi, mm, vma->vm_end); - long charged = 0; - - if (vma->vm_flags & VM_ACCOUNT) { - if (security_vm_enough_memory_mm(mm, pages)) { - ret = -ENOMEM; - goto out; - } - charged = pages; - } - - /* - * Function vma_merge_extend() is called on the - * extension we are adding to the already existing vma, - * vma_merge_extend() will merge this extension with the - * already existing vma (expand operation itself) and - * possibly also with the next vma if it becomes - * adjacent to the expanded vma and otherwise - * compatible. - */ - vma = vma_merge_extend(&vmi, vma, delta); - if (!vma) { - vm_unacct_memory(charged); - ret = -ENOMEM; - goto out; - } + /* EXPAND case. We try to do in-place, if we can't, then we move it. */ + ret = expand_vma(vma, addr, old_len, new_len, flags, &locked, &new_addr, + &uf, &uf_unmap); - vm_stat_account(mm, vma->vm_flags, pages); - if (vma->vm_flags & VM_LOCKED) { - mm->locked_vm += pages; - locked = true; - new_addr = addr; - } - ret = addr; - goto out; - } - } - - /* - * We weren't able to just expand or shrink the area, - * we need to create a new one and move it.. - */ - ret = -ENOMEM; - if (flags & MREMAP_MAYMOVE) { - unsigned long map_flags = 0; - if (vma->vm_flags & VM_MAYSHARE) - map_flags |= MAP_SHARED; - - new_addr = get_unmapped_area(vma->vm_file, 0, new_len, - vma->vm_pgoff + - ((addr - vma->vm_start) >> PAGE_SHIFT), - map_flags); - if (IS_ERR_VALUE(new_addr)) { - ret = new_addr; - goto out; - } - - ret = move_vma(vma, addr, old_len, new_len, new_addr, - &locked, flags, &uf, &uf_unmap); - } out: if (offset_in_page(ret)) locked = false; - mmap_write_unlock(current->mm); + mmap_write_unlock(mm); if (locked && new_len > old_len) - mm_populate(new_addr + old_len, new_len - old_len); + mm_populate(new_addr + old_len, delta); out_unlocked: userfaultfd_unmap_complete(mm, &uf_unmap_early); mremap_userfaultfd_complete(&uf, addr, ret, old_len); -- 2.50.1 From 221bf5cac5c2f3b9d8a07f4b2738225a59962945 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:36 +0000 Subject: [PATCH 08/16] mm/mremap: introduce and use vma_remap_struct threaded state A number of mremap() calls both pass around and modify a large number of parameters, making the code less readable and often repeatedly having to determine things such as VMA, size delta, and more. Avoid this by using the common pattern of passing a state object through the operation, updating it as we go. We introduce the vma_remap_struct or 'VRM' for this purpose. This also gives us the ability to accumulate further state through the operation that would otherwise require awkward and error-prone pointer passing. We can also now trivially define helper functions that operate on a VRM object. This pattern has proven itself to be very powerful when implemented for VMA merge, VMA unmapping and memory mapping operations, so it is battle-tested and functional. We both introduce the data structure and use it, introducing helper functions as needed to make things readable, we move some state such as mmap lock and mlock() status to the VRM, we introduce a means of classifying the type of mremap() operation and de-duplicate the get_unmapped_area() lookup. We also neatly thread userfaultfd state throughout the operation. Note that there is further refactoring to be done, chiefly adjust move_vma() to accept a VRM parameter. We defer this as there is pre-requisite work required to be able to do so which we will do in a subsequent patch. Link: https://lkml.kernel.org/r/27951739dc83b2b1523b81fa9c009ba348388d40.1741639347.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Reviewed-by: Harry Yoo Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/mremap.c | 570 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 363 insertions(+), 207 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index c4abda8dfc57..af022e3b89e2 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -32,6 +32,44 @@ #include "internal.h" +/* Classify the kind of remap operation being performed. */ +enum mremap_type { + MREMAP_INVALID, /* Initial state. */ + MREMAP_NO_RESIZE, /* old_len == new_len, if not moved, do nothing. */ + MREMAP_SHRINK, /* old_len > new_len. */ + MREMAP_EXPAND, /* old_len < new_len. */ +}; + +/* + * Describes a VMA mremap() operation and is threaded throughout it. + * + * Any of the fields may be mutated by the operation, however these values will + * always accurately reflect the remap (for instance, we may adjust lengths and + * delta to account for hugetlb alignment). + */ +struct vma_remap_struct { + /* User-provided state. */ + unsigned long addr; /* User-specified address from which we remap. */ + unsigned long old_len; /* Length of range being remapped. */ + unsigned long new_len; /* Desired new length of mapping. */ + unsigned long flags; /* user-specified MREMAP_* flags. */ + unsigned long new_addr; /* Optionally, desired new address. */ + + /* uffd state. */ + struct vm_userfaultfd_ctx *uf; + struct list_head *uf_unmap_early; + struct list_head *uf_unmap; + + /* VMA state, determined in do_mremap(). */ + struct vm_area_struct *vma; + + /* Internal state, determined in do_mremap(). */ + unsigned long delta; /* Absolute delta of old_len,new_len. */ + bool mlocked; /* Was the VMA mlock()'d? */ + enum mremap_type remap_type; /* expand, shrink, etc. */ + bool mmap_locked; /* Is mm currently write-locked? */ +}; + static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; @@ -693,10 +731,95 @@ again: return len + old_addr - old_end; /* how much done */ } +/* Set vrm->delta to the difference in VMA size specified by user. */ +static void vrm_set_delta(struct vma_remap_struct *vrm) +{ + vrm->delta = abs_diff(vrm->old_len, vrm->new_len); +} + +/* Determine what kind of remap this is - shrink, expand or no resize at all. */ +static enum mremap_type vrm_remap_type(struct vma_remap_struct *vrm) +{ + if (vrm->delta == 0) + return MREMAP_NO_RESIZE; + + if (vrm->old_len > vrm->new_len) + return MREMAP_SHRINK; + + return MREMAP_EXPAND; +} + +/* + * When moving a VMA to vrm->new_adr, does this result in the new and old VMAs + * overlapping? + */ +static bool vrm_overlaps(struct vma_remap_struct *vrm) +{ + unsigned long start_old = vrm->addr; + unsigned long start_new = vrm->new_addr; + unsigned long end_old = vrm->addr + vrm->old_len; + unsigned long end_new = vrm->new_addr + vrm->new_len; + + /* + * start_old end_old + * |-----------| + * | | + * |-----------| + * |-------------| + * | | + * |-------------| + * start_new end_new + */ + if (end_old > start_new && end_new > start_old) + return true; + + return false; +} + +/* Do the mremap() flags require that the new_addr parameter be specified? */ +static bool vrm_implies_new_addr(struct vma_remap_struct *vrm) +{ + return vrm->flags & (MREMAP_FIXED | MREMAP_DONTUNMAP); +} + +/* + * Find an unmapped area for the requested vrm->new_addr. + * + * If MREMAP_FIXED then this is equivalent to a MAP_FIXED mmap() call. If only + * MREMAP_DONTUNMAP is set, then this is equivalent to providing a hint to + * mmap(), otherwise this is equivalent to mmap() specifying a NULL address. + * + * Returns 0 on success (with vrm->new_addr updated), or an error code upon + * failure. + */ +static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm) +{ + struct vm_area_struct *vma = vrm->vma; + unsigned long map_flags = 0; + /* Page Offset _into_ the VMA. */ + pgoff_t internal_pgoff = (vrm->addr - vma->vm_start) >> PAGE_SHIFT; + pgoff_t pgoff = vma->vm_pgoff + internal_pgoff; + unsigned long new_addr = vrm_implies_new_addr(vrm) ? vrm->new_addr : 0; + unsigned long res; + + if (vrm->flags & MREMAP_FIXED) + map_flags |= MAP_FIXED; + if (vma->vm_flags & VM_MAYSHARE) + map_flags |= MAP_SHARED; + + res = get_unmapped_area(vma->vm_file, new_addr, vrm->new_len, pgoff, + map_flags); + if (IS_ERR_VALUE(res)) + return res; + + vrm->new_addr = res; + return 0; +} + static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr, - bool *locked, unsigned long flags, + bool *mlocked, unsigned long flags, struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) { long to_account = new_len - old_len; @@ -837,7 +960,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; - *locked = true; + *mlocked = true; } mm->hiwater_vm = hiwater_vm; @@ -860,18 +983,15 @@ static unsigned long move_vma(struct vm_area_struct *vma, * resize_is_valid() - Ensure the vma can be resized to the new length at the give * address. * - * @vma: The vma to resize - * @addr: The old address - * @old_len: The current size - * @new_len: The desired size - * @flags: The vma flags - * * Return 0 on success, error otherwise. */ -static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr, - unsigned long old_len, unsigned long new_len, unsigned long flags) +static int resize_is_valid(struct vma_remap_struct *vrm) { struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = vrm->vma; + unsigned long addr = vrm->addr; + unsigned long old_len = vrm->old_len; + unsigned long new_len = vrm->new_len; unsigned long pgoff; /* @@ -883,11 +1003,12 @@ static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr, * behavior. As a result, fail such attempts. */ if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { - pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid); + pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", + current->comm, current->pid); return -EINVAL; } - if ((flags & MREMAP_DONTUNMAP) && + if ((vrm->flags & MREMAP_DONTUNMAP) && (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) return -EINVAL; @@ -907,99 +1028,122 @@ static int resize_is_valid(struct vm_area_struct *vma, unsigned long addr, if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) return -EFAULT; - if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len)) + if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta)) return -EAGAIN; - if (!may_expand_vm(mm, vma->vm_flags, - (new_len - old_len) >> PAGE_SHIFT)) + if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT)) return -ENOMEM; return 0; } /* - * mremap_to() - remap a vma to a new location - * @addr: The old address - * @old_len: The old size - * @new_addr: The target address - * @new_len: The new size - * @locked: If the returned vma is locked (VM_LOCKED) - * @flags: the mremap flags - * @uf: The mremap userfaultfd context - * @uf_unmap_early: The userfaultfd unmap early context - * @uf_unmap: The userfaultfd unmap context + * The user has requested that the VMA be shrunk (i.e., old_len > new_len), so + * execute this, optionally dropping the mmap lock when we do so. * + * In both cases this invalidates the VMA, however if we don't drop the lock, + * then load the correct VMA into vrm->vma afterwards. + */ +static unsigned long shrink_vma(struct vma_remap_struct *vrm, + bool drop_lock) +{ + struct mm_struct *mm = current->mm; + unsigned long unmap_start = vrm->addr + vrm->new_len; + unsigned long unmap_bytes = vrm->delta; + unsigned long res; + VMA_ITERATOR(vmi, mm, unmap_start); + + VM_BUG_ON(vrm->remap_type != MREMAP_SHRINK); + + res = do_vmi_munmap(&vmi, mm, unmap_start, unmap_bytes, + vrm->uf_unmap, drop_lock); + vrm->vma = NULL; /* Invalidated. */ + if (res) + return res; + + /* + * If we've not dropped the lock, then we should reload the VMA to + * replace the invalidated VMA with the one that may have now been + * split. + */ + if (drop_lock) { + vrm->mmap_locked = false; + } else { + vrm->vma = vma_lookup(mm, vrm->addr); + if (!vrm->vma) + return -EFAULT; + } + + return 0; +} + +/* + * mremap_to() - remap a vma to a new location. * Returns: The new address of the vma or an error. */ -static unsigned long mremap_to(unsigned long addr, unsigned long old_len, - unsigned long new_addr, unsigned long new_len, bool *locked, - unsigned long flags, struct vm_userfaultfd_ctx *uf, - struct list_head *uf_unmap_early, - struct list_head *uf_unmap) +static unsigned long mremap_to(struct vma_remap_struct *vrm) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long ret; - unsigned long map_flags = 0; + unsigned long err; /* Is the new length or address silly? */ - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) + if (vrm->new_len > TASK_SIZE || + vrm->new_addr > TASK_SIZE - vrm->new_len) return -EINVAL; - /* Ensure the old/new locations do not overlap. */ - if (addr + old_len > new_addr && new_addr + new_len > addr) + if (vrm_overlaps(vrm)) return -EINVAL; - if (flags & MREMAP_FIXED) { + if (vrm->flags & MREMAP_FIXED) { /* * In mremap_to(). * VMA is moved to dst address, and munmap dst first. * do_munmap will check if dst is sealed. */ - ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); - if (ret) - return ret; - } + err = do_munmap(mm, vrm->new_addr, vrm->new_len, + vrm->uf_unmap_early); + vrm->vma = NULL; /* Invalidated. */ + if (err) + return err; - if (old_len > new_len) { - ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); - if (ret) - return ret; - old_len = new_len; + /* + * If we remap a portion of a VMA elsewhere in the same VMA, + * this can invalidate the old VMA. Reset. + */ + vrm->vma = vma_lookup(mm, vrm->addr); + if (!vrm->vma) + return -EFAULT; } - vma = vma_lookup(mm, addr); - if (!vma) - return -EFAULT; - - ret = resize_is_valid(vma, addr, old_len, new_len, flags); - if (ret) - return ret; + if (vrm->remap_type == MREMAP_SHRINK) { + err = shrink_vma(vrm, /* drop_lock= */false); + if (err) + return err; - /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ - if (flags & MREMAP_DONTUNMAP && - !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { - return -ENOMEM; + /* Set up for the move now shrink has been executed. */ + vrm->old_len = vrm->new_len; } - if (flags & MREMAP_FIXED) - map_flags |= MAP_FIXED; + err = resize_is_valid(vrm); + if (err) + return err; - if (vma->vm_flags & VM_MAYSHARE) - map_flags |= MAP_SHARED; + /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ + if (vrm->flags & MREMAP_DONTUNMAP) { + vm_flags_t vm_flags = vrm->vma->vm_flags; + unsigned long pages = vrm->old_len >> PAGE_SHIFT; - ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + - ((addr - vma->vm_start) >> PAGE_SHIFT), - map_flags); - if (IS_ERR_VALUE(ret)) - return ret; + if (!may_expand_vm(mm, vm_flags, pages)) + return -ENOMEM; + } - /* We got a new mapping */ - if (!(flags & MREMAP_FIXED)) - new_addr = ret; + err = vrm_set_new_addr(vrm); + if (err) + return err; - return move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, - uf, uf_unmap); + return move_vma(vrm->vma, vrm->addr, vrm->old_len, vrm->new_len, + vrm->new_addr, &vrm->mlocked, vrm->flags, + vrm->uf, vrm->uf_unmap); } static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) @@ -1016,22 +1160,33 @@ static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) return 1; } -/* Do the mremap() flags require that the new_addr parameter be specified? */ -static bool implies_new_addr(unsigned long flags) +/* Determine whether we are actually able to execute an in-place expansion. */ +static bool vrm_can_expand_in_place(struct vma_remap_struct *vrm) { - return flags & (MREMAP_FIXED | MREMAP_DONTUNMAP); + /* Number of bytes from vrm->addr to end of VMA. */ + unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr; + + /* If end of range aligns to end of VMA, we can just expand in-place. */ + if (suffix_bytes != vrm->old_len) + return false; + + /* Check whether this is feasible. */ + if (!vma_expandable(vrm->vma, vrm->delta)) + return false; + + return true; } /* * Are the parameters passed to mremap() valid? If so return 0, otherwise return * error. */ -static unsigned long check_mremap_params(unsigned long addr, - unsigned long flags, - unsigned long old_len, - unsigned long new_len, - unsigned long new_addr) +static unsigned long check_mremap_params(struct vma_remap_struct *vrm) + { + unsigned long addr = vrm->addr; + unsigned long flags = vrm->flags; + /* Ensure no unexpected flag values. */ if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) return -EINVAL; @@ -1045,15 +1200,15 @@ static unsigned long check_mremap_params(unsigned long addr, * for DOS-emu "duplicate shm area" thing. But * a zero new-len is nonsensical. */ - if (!PAGE_ALIGN(new_len)) + if (!PAGE_ALIGN(vrm->new_len)) return -EINVAL; /* Remainder of checks are for cases with specific new_addr. */ - if (!implies_new_addr(flags)) + if (!vrm_implies_new_addr(vrm)) return 0; /* The new address must be page-aligned. */ - if (offset_in_page(new_addr)) + if (offset_in_page(vrm->new_addr)) return -EINVAL; /* A fixed address implies a move. */ @@ -1061,7 +1216,7 @@ static unsigned long check_mremap_params(unsigned long addr, return -EINVAL; /* MREMAP_DONTUNMAP does not allow resizing in the process. */ - if (flags & MREMAP_DONTUNMAP && old_len != new_len) + if (flags & MREMAP_DONTUNMAP && vrm->old_len != vrm->new_len) return -EINVAL; /* @@ -1090,11 +1245,11 @@ static unsigned long check_mremap_params(unsigned long addr, * If we discover the VMA is locked, update mm_struct statistics accordingly and * indicate so to the caller. */ -static unsigned long expand_vma_inplace(struct vm_area_struct *vma, - unsigned long delta, bool *locked) +static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm) { struct mm_struct *mm = current->mm; - long pages = delta >> PAGE_SHIFT; + long pages = vrm->delta >> PAGE_SHIFT; + struct vm_area_struct *vma = vrm->vma; VMA_ITERATOR(vmi, mm, vma->vm_end); long charged = 0; @@ -1114,7 +1269,7 @@ static unsigned long expand_vma_inplace(struct vm_area_struct *vma, * adjacent to the expanded vma and otherwise * compatible. */ - vma = vma_merge_extend(&vmi, vma, delta); + vma = vrm->vma = vma_merge_extend(&vmi, vma, vrm->delta); if (!vma) { vm_unacct_memory(charged); return -ENOMEM; @@ -1123,42 +1278,34 @@ static unsigned long expand_vma_inplace(struct vm_area_struct *vma, vm_stat_account(mm, vma->vm_flags, pages); if (vma->vm_flags & VM_LOCKED) { mm->locked_vm += pages; - *locked = true; + vrm->mlocked = true; } return 0; } -static bool align_hugetlb(struct vm_area_struct *vma, - unsigned long addr, - unsigned long new_addr, - unsigned long *old_len_ptr, - unsigned long *new_len_ptr, - unsigned long *delta_ptr) +static bool align_hugetlb(struct vma_remap_struct *vrm) { - unsigned long old_len = *old_len_ptr; - unsigned long new_len = *new_len_ptr; - struct hstate *h __maybe_unused = hstate_vma(vma); + struct hstate *h __maybe_unused = hstate_vma(vrm->vma); - old_len = ALIGN(old_len, huge_page_size(h)); - new_len = ALIGN(new_len, huge_page_size(h)); + vrm->old_len = ALIGN(vrm->old_len, huge_page_size(h)); + vrm->new_len = ALIGN(vrm->new_len, huge_page_size(h)); /* addrs must be huge page aligned */ - if (addr & ~huge_page_mask(h)) + if (vrm->addr & ~huge_page_mask(h)) return false; - if (new_addr & ~huge_page_mask(h)) + if (vrm->new_addr & ~huge_page_mask(h)) return false; /* * Don't allow remap expansion, because the underlying hugetlb * reservation is not yet capable to handle split reservation. */ - if (new_len > old_len) + if (vrm->new_len > vrm->old_len) return false; - *old_len_ptr = old_len; - *new_len_ptr = new_len; - *delta_ptr = abs_diff(old_len, new_len); + vrm_set_delta(vrm); + return true; } @@ -1169,19 +1316,16 @@ static bool align_hugetlb(struct vm_area_struct *vma, * Try to do so in-place, if this fails, then move the VMA to a new location to * action the change. */ -static unsigned long expand_vma(struct vm_area_struct *vma, - unsigned long addr, unsigned long old_len, - unsigned long new_len, unsigned long flags, - bool *locked_ptr, unsigned long *new_addr_ptr, - struct vm_userfaultfd_ctx *uf_ptr, - struct list_head *uf_unmap_ptr) +static unsigned long expand_vma(struct vma_remap_struct *vrm) { unsigned long err; - unsigned long map_flags; - unsigned long new_addr; /* We ignore any user-supplied one. */ - pgoff_t pgoff; + struct vm_area_struct *vma = vrm->vma; + unsigned long addr = vrm->addr; + unsigned long old_len = vrm->old_len; + unsigned long new_len = vrm->new_len; + unsigned long flags = vrm->flags; - err = resize_is_valid(vma, addr, old_len, new_len, flags); + err = resize_is_valid(vrm); if (err) return err; @@ -1189,10 +1333,9 @@ static unsigned long expand_vma(struct vm_area_struct *vma, * [addr, old_len) spans precisely to the end of the VMA, so try to * expand it in-place. */ - if (old_len == vma->vm_end - addr && - vma_expandable(vma, new_len - old_len)) { - err = expand_vma_inplace(vma, new_len - old_len, locked_ptr); - if (IS_ERR_VALUE(err)) + if (vrm_can_expand_in_place(vrm)) { + err = expand_vma_in_place(vrm); + if (err) return err; /* @@ -1200,8 +1343,8 @@ static unsigned long expand_vma(struct vm_area_struct *vma, * satisfy the expectation that mlock()'ing a VMA maintains all * of its pages in memory. */ - if (*locked_ptr) - *new_addr_ptr = addr; + if (vrm->mlocked) + vrm->new_addr = addr; /* OK we're done! */ return addr; @@ -1217,62 +1360,67 @@ static unsigned long expand_vma(struct vm_area_struct *vma, return -ENOMEM; /* Find a new location to move the VMA to. */ - map_flags = (vma->vm_flags & VM_MAYSHARE) ? MAP_SHARED : 0; - pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); - new_addr = get_unmapped_area(vma->vm_file, 0, new_len, pgoff, map_flags); - if (IS_ERR_VALUE(new_addr)) - return new_addr; - *new_addr_ptr = new_addr; + err = vrm_set_new_addr(vrm); + if (err) + return err; - return move_vma(vma, addr, old_len, new_len, new_addr, - locked_ptr, flags, uf_ptr, uf_unmap_ptr); + return move_vma(vma, addr, old_len, new_len, vrm->new_addr, + &vrm->mlocked, flags, vrm->uf, vrm->uf_unmap); } /* - * Expand (or shrink) an existing mapping, potentially moving it at the - * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) - * - * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise - * This option implies MREMAP_MAYMOVE. + * Attempt to resize the VMA in-place, if we cannot, then move the VMA to the + * first available address to perform the operation. */ -SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, - unsigned long, new_len, unsigned long, flags, - unsigned long, new_addr) +static unsigned long mremap_at(struct vma_remap_struct *vrm) +{ + unsigned long res; + + switch (vrm->remap_type) { + case MREMAP_INVALID: + break; + case MREMAP_NO_RESIZE: + /* NO-OP CASE - resizing to the same size. */ + return vrm->addr; + case MREMAP_SHRINK: + /* + * SHRINK CASE. Can always be done in-place. + * + * Simply unmap the shrunken portion of the VMA. This does all + * the needed commit accounting, and we indicate that the mmap + * lock should be dropped. + */ + res = shrink_vma(vrm, /* drop_lock= */true); + if (res) + return res; + + return vrm->addr; + case MREMAP_EXPAND: + return expand_vma(vrm); + } + + BUG(); +} + +static unsigned long do_mremap(struct vma_remap_struct *vrm) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret; - unsigned long delta; - bool locked = false; - struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; - LIST_HEAD(uf_unmap_early); - LIST_HEAD(uf_unmap); - - /* - * There is a deliberate asymmetry here: we strip the pointer tag - * from the old address but leave the new address alone. This is - * for consistency with mmap(), where we prevent the creation of - * aliasing mappings in userspace by leaving the tag bits of the - * mapping address intact. A non-zero tag will cause the subsequent - * range checks to reject the address as invalid. - * - * See Documentation/arch/arm64/tagged-address-abi.rst for more - * information. - */ - addr = untagged_addr(addr); - ret = check_mremap_params(addr, flags, old_len, new_len, new_addr); + ret = check_mremap_params(vrm); if (ret) return ret; - old_len = PAGE_ALIGN(old_len); - new_len = PAGE_ALIGN(new_len); - delta = abs_diff(old_len, new_len); + vrm->old_len = PAGE_ALIGN(vrm->old_len); + vrm->new_len = PAGE_ALIGN(vrm->new_len); + vrm_set_delta(vrm); if (mmap_write_lock_killable(mm)) return -EINTR; + vrm->mmap_locked = true; - vma = vma_lookup(mm, addr); + vma = vrm->vma = vma_lookup(mm, vrm->addr); if (!vma) { ret = -EFAULT; goto out; @@ -1285,62 +1433,70 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, } /* Align to hugetlb page size, if required. */ - if (is_vm_hugetlb_page(vma) && - !align_hugetlb(vma, addr, new_addr, &old_len, &new_len, &delta)) { + if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm)) { ret = -EINVAL; goto out; } - /* Are we RELOCATING the VMA to a SPECIFIC address? */ - if (implies_new_addr(flags)) { - ret = mremap_to(addr, old_len, new_addr, new_len, - &locked, flags, &uf, &uf_unmap_early, - &uf_unmap); - goto out; - } - - /* - * From here on in we are only RESIZING the VMA, attempting to do so - * in-place, moving the VMA if we cannot. - */ + vrm->remap_type = vrm_remap_type(vrm); - /* NO-OP CASE - resizing to the same size. */ - if (new_len == old_len) { - ret = addr; - goto out; - } + /* Actually execute mremap. */ + ret = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm); - /* SHRINK CASE. Can always be done in-place. */ - if (new_len < old_len) { - VMA_ITERATOR(vmi, mm, addr + new_len); +out: + if (vrm->mmap_locked) { + mmap_write_unlock(mm); + vrm->mmap_locked = false; - /* - * Simply unmap the shrunken portion of the VMA. This does all - * the needed commit accounting, unlocking the mmap lock. - */ - ret = do_vmi_munmap(&vmi, mm, addr + new_len, delta, - &uf_unmap, true); - if (ret) - goto out; - - /* We succeeded, mmap lock released for us. */ - ret = addr; - goto out_unlocked; + if (!offset_in_page(ret) && vrm->mlocked && vrm->new_len > vrm->old_len) + mm_populate(vrm->new_addr + vrm->old_len, vrm->delta); } - /* EXPAND case. We try to do in-place, if we can't, then we move it. */ - ret = expand_vma(vma, addr, old_len, new_len, flags, &locked, &new_addr, - &uf, &uf_unmap); + userfaultfd_unmap_complete(mm, vrm->uf_unmap_early); + mremap_userfaultfd_complete(vrm->uf, vrm->addr, ret, vrm->old_len); + userfaultfd_unmap_complete(mm, vrm->uf_unmap); -out: - if (offset_in_page(ret)) - locked = false; - mmap_write_unlock(mm); - if (locked && new_len > old_len) - mm_populate(new_addr + old_len, delta); -out_unlocked: - userfaultfd_unmap_complete(mm, &uf_unmap_early); - mremap_userfaultfd_complete(&uf, addr, ret, old_len); - userfaultfd_unmap_complete(mm, &uf_unmap); return ret; } + +/* + * Expand (or shrink) an existing mapping, potentially moving it at the + * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) + * + * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise + * This option implies MREMAP_MAYMOVE. + */ +SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, + unsigned long, new_len, unsigned long, flags, + unsigned long, new_addr) +{ + struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; + LIST_HEAD(uf_unmap_early); + LIST_HEAD(uf_unmap); + /* + * There is a deliberate asymmetry here: we strip the pointer tag + * from the old address but leave the new address alone. This is + * for consistency with mmap(), where we prevent the creation of + * aliasing mappings in userspace by leaving the tag bits of the + * mapping address intact. A non-zero tag will cause the subsequent + * range checks to reject the address as invalid. + * + * See Documentation/arch/arm64/tagged-address-abi.rst for more + * information. + */ + struct vma_remap_struct vrm = { + .addr = untagged_addr(addr), + .old_len = old_len, + .new_len = new_len, + .flags = flags, + .new_addr = new_addr, + + .uf = &uf, + .uf_unmap_early = &uf_unmap_early, + .uf_unmap = &uf_unmap, + + .remap_type = MREMAP_INVALID, /* We set later. */ + }; + + return do_mremap(&vrm); +} -- 2.50.1 From d5c8aec0542e2d79b64de9089b88fabdebe05c1e Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:37 +0000 Subject: [PATCH 09/16] mm/mremap: initial refactor of move_vma() Update move_vma() to use the threaded VRM object, de-duplicate code and separate into smaller functions to aid readability and debug-ability. This in turn allows further simplification of expand_vma() as we can simply thread VRM through the function. We also take the opportunity to abstract the account charging page count into the VRM in order that we can correctly thread this through the operation. We additionally do the same for tracking mm statistics - exec_vm, stack_vm, data_vm, and locked_vm. As part of this change, we slightly modify when locked pages statistics are counted for in mm_struct statistics. However this should cause no issues, as there is no chance of underflow, nor will any rlimit failures occur as a result. This is an intermediate step before a further refactoring of move_vma() in order to aid review. Link: https://lkml.kernel.org/r/ab611d6efae11bddab2db2b8bb3925b1d1954c7d.1741639347.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Reviewed-by: Vlastimil Babka Cc: Harry Yoo Signed-off-by: Andrew Morton --- mm/mremap.c | 186 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 122 insertions(+), 64 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index af022e3b89e2..6305cb9a86f6 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -68,6 +68,7 @@ struct vma_remap_struct { bool mlocked; /* Was the VMA mlock()'d? */ enum mremap_type remap_type; /* expand, shrink, etc. */ bool mmap_locked; /* Is mm currently write-locked? */ + unsigned long charged; /* If VM_ACCOUNT, # pages to account. */ }; static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) @@ -816,35 +817,88 @@ static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm) return 0; } -static unsigned long move_vma(struct vm_area_struct *vma, - unsigned long old_addr, unsigned long old_len, - unsigned long new_len, unsigned long new_addr, - bool *mlocked, unsigned long flags, - struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) +/* + * Keep track of pages which have been added to the memory mapping. If the VMA + * is accounted, also check to see if there is sufficient memory. + * + * Returns true on success, false if insufficient memory to charge. + */ +static bool vrm_charge(struct vma_remap_struct *vrm) { - long to_account = new_len - old_len; - struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *new_vma; - unsigned long vm_flags = vma->vm_flags; - unsigned long new_pgoff; - unsigned long moved_len; - bool account_start = false; - bool account_end = false; - unsigned long hiwater_vm; - int err = 0; - bool need_rmap_locks; - struct vma_iterator vmi; + unsigned long charged; + + if (!(vrm->vma->vm_flags & VM_ACCOUNT)) + return true; + + /* + * If we don't unmap the old mapping, then we account the entirety of + * the length of the new one. Otherwise it's just the delta in size. + */ + if (vrm->flags & MREMAP_DONTUNMAP) + charged = vrm->new_len >> PAGE_SHIFT; + else + charged = vrm->delta >> PAGE_SHIFT; + + + /* This accounts 'charged' pages of memory. */ + if (security_vm_enough_memory_mm(current->mm, charged)) + return false; + + vrm->charged = charged; + return true; +} + +/* + * an error has occurred so we will not be using vrm->charged memory. Unaccount + * this memory if the VMA is accounted. + */ +static void vrm_uncharge(struct vma_remap_struct *vrm) +{ + if (!(vrm->vma->vm_flags & VM_ACCOUNT)) + return; + + vm_unacct_memory(vrm->charged); + vrm->charged = 0; +} + +/* + * Update mm exec_vm, stack_vm, data_vm, and locked_vm fields as needed to + * account for 'bytes' memory used, and if locked, indicate this in the VRM so + * we can handle this correctly later. + */ +static void vrm_stat_account(struct vma_remap_struct *vrm, + unsigned long bytes) +{ + unsigned long pages = bytes >> PAGE_SHIFT; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = vrm->vma; + + vm_stat_account(mm, vma->vm_flags, pages); + if (vma->vm_flags & VM_LOCKED) { + mm->locked_vm += pages; + vrm->mlocked = true; + } +} + +/* + * Perform checks before attempting to write a VMA prior to it being + * moved. + */ +static unsigned long prep_move_vma(struct vma_remap_struct *vrm, + unsigned long *vm_flags_ptr) +{ + unsigned long err = 0; + struct vm_area_struct *vma = vrm->vma; + unsigned long old_addr = vrm->addr; + unsigned long old_len = vrm->old_len; /* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ - if (mm->map_count >= sysctl_max_map_count - 3) + if (current->mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; - if (unlikely(flags & MREMAP_DONTUNMAP)) - to_account = new_len; - if (vma->vm_ops && vma->vm_ops->may_split) { if (vma->vm_start != old_addr) err = vma->vm_ops->may_split(vma, old_addr); @@ -862,22 +916,46 @@ static unsigned long move_vma(struct vm_area_struct *vma, * so KSM can come around to merge on vma and new_vma afterwards. */ err = ksm_madvise(vma, old_addr, old_addr + old_len, - MADV_UNMERGEABLE, &vm_flags); + MADV_UNMERGEABLE, vm_flags_ptr); if (err) return err; - if (vm_flags & VM_ACCOUNT) { - if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT)) - return -ENOMEM; - } + return 0; +} + +static unsigned long move_vma(struct vma_remap_struct *vrm) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma = vrm->vma; + struct vm_area_struct *new_vma; + unsigned long vm_flags = vma->vm_flags; + unsigned long old_addr = vrm->addr, new_addr = vrm->new_addr; + unsigned long old_len = vrm->old_len, new_len = vrm->new_len; + unsigned long new_pgoff; + unsigned long moved_len; + unsigned long account_start = false; + unsigned long account_end = false; + unsigned long hiwater_vm; + int err; + bool need_rmap_locks; + struct vma_iterator vmi; + + err = prep_move_vma(vrm, &vm_flags); + if (err) + return err; + + /* If accounted, charge the number of bytes the operation will use. */ + if (!vrm_charge(vrm)) + return -ENOMEM; vma_start_write(vma); new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); - new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, + new_vma = copy_vma(&vrm->vma, new_addr, new_len, new_pgoff, &need_rmap_locks); + /* This may have been updated. */ + vma = vrm->vma; if (!new_vma) { - if (vm_flags & VM_ACCOUNT) - vm_unacct_memory(to_account >> PAGE_SHIFT); + vrm_uncharge(vrm); return -ENOMEM; } @@ -902,7 +980,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, old_addr = new_addr; new_addr = err; } else { - mremap_userfaultfd_prep(new_vma, uf); + mremap_userfaultfd_prep(new_vma, vrm->uf); } if (is_vm_hugetlb_page(vma)) { @@ -910,7 +988,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, } /* Conceal VM_ACCOUNT so old reservation is not undone */ - if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { + if (vm_flags & VM_ACCOUNT && !(vrm->flags & MREMAP_DONTUNMAP)) { vm_flags_clear(vma, VM_ACCOUNT); if (vma->vm_start < old_addr) account_start = true; @@ -928,13 +1006,12 @@ static unsigned long move_vma(struct vm_area_struct *vma, * If this were a serious issue, we'd add a flag to do_munmap(). */ hiwater_vm = mm->hiwater_vm; - vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); /* Tell pfnmap has moved from this vma */ if (unlikely(vma->vm_flags & VM_PFNMAP)) untrack_pfn_clear(vma); - if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { + if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP))) { /* We always clear VM_LOCKED[ONFAULT] on the old vma */ vm_flags_clear(vma, VM_LOCKED_MASK); @@ -947,22 +1024,20 @@ static unsigned long move_vma(struct vm_area_struct *vma, unlink_anon_vmas(vma); /* Because we won't unmap we don't need to touch locked_vm */ + vrm_stat_account(vrm, new_len); return new_addr; } + vrm_stat_account(vrm, new_len); + vma_iter_init(&vmi, mm, old_addr); - if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) { + if (do_vmi_munmap(&vmi, mm, old_addr, old_len, vrm->uf_unmap, false) < 0) { /* OOM: unable to split vma, just get accounts right */ - if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) + if (vm_flags & VM_ACCOUNT && !(vrm->flags & MREMAP_DONTUNMAP)) vm_acct_memory(old_len >> PAGE_SHIFT); account_start = account_end = false; } - if (vm_flags & VM_LOCKED) { - mm->locked_vm += new_len >> PAGE_SHIFT; - *mlocked = true; - } - mm->hiwater_vm = hiwater_vm; /* Restore VM_ACCOUNT if one or two pieces of vma left */ @@ -1141,9 +1216,7 @@ static unsigned long mremap_to(struct vma_remap_struct *vrm) if (err) return err; - return move_vma(vrm->vma, vrm->addr, vrm->old_len, vrm->new_len, - vrm->new_addr, &vrm->mlocked, vrm->flags, - vrm->uf, vrm->uf_unmap); + return move_vma(vrm); } static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) @@ -1248,17 +1321,11 @@ static unsigned long check_mremap_params(struct vma_remap_struct *vrm) static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm) { struct mm_struct *mm = current->mm; - long pages = vrm->delta >> PAGE_SHIFT; struct vm_area_struct *vma = vrm->vma; VMA_ITERATOR(vmi, mm, vma->vm_end); - long charged = 0; - - if (vma->vm_flags & VM_ACCOUNT) { - if (security_vm_enough_memory_mm(mm, pages)) - return -ENOMEM; - charged = pages; - } + if (!vrm_charge(vrm)) + return -ENOMEM; /* * Function vma_merge_extend() is called on the @@ -1271,15 +1338,11 @@ static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm) */ vma = vrm->vma = vma_merge_extend(&vmi, vma, vrm->delta); if (!vma) { - vm_unacct_memory(charged); + vrm_uncharge(vrm); return -ENOMEM; } - vm_stat_account(mm, vma->vm_flags, pages); - if (vma->vm_flags & VM_LOCKED) { - mm->locked_vm += pages; - vrm->mlocked = true; - } + vrm_stat_account(vrm, vrm->delta); return 0; } @@ -1319,11 +1382,7 @@ static bool align_hugetlb(struct vma_remap_struct *vrm) static unsigned long expand_vma(struct vma_remap_struct *vrm) { unsigned long err; - struct vm_area_struct *vma = vrm->vma; unsigned long addr = vrm->addr; - unsigned long old_len = vrm->old_len; - unsigned long new_len = vrm->new_len; - unsigned long flags = vrm->flags; err = resize_is_valid(vrm); if (err) @@ -1356,7 +1415,7 @@ static unsigned long expand_vma(struct vma_remap_struct *vrm) */ /* We're not allowed to move the VMA, so error out. */ - if (!(flags & MREMAP_MAYMOVE)) + if (!(vrm->flags & MREMAP_MAYMOVE)) return -ENOMEM; /* Find a new location to move the VMA to. */ @@ -1364,8 +1423,7 @@ static unsigned long expand_vma(struct vma_remap_struct *vrm) if (err) return err; - return move_vma(vma, addr, old_len, new_len, vrm->new_addr, - &vrm->mlocked, flags, vrm->uf, vrm->uf_unmap); + return move_vma(vrm); } /* -- 2.50.1 From b714ccb02a76e170f3e6475749ed0812ee25f777 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:38 +0000 Subject: [PATCH 10/16] mm/mremap: complete refactor of move_vma() We invoke ksm_madvise() with an intentionally dummy flags field, so no need to pass around. Additionally, the code tries to be 'clever' with account_start, account_end, using these to both check that vma->vm_start != 0 and that we ought to account the newly split portion of VMA post-move, either before or after it. We need to do this because we intentionally removed VM_ACCOUNT on the VMA prior to unmapping, so we don't erroneously unaccount memory (we have already calculated the correct amount to account and accounted it, any subsequent subtraction will be incorrect). This patch significantly expands the comment (from 2002!) about 'concealing' the flag to make it abundantly clear what's going on, as well as adding and expanding a number of other comments also. We can remove account_start, account_end by instead tracking when we account (i.e. vma->vm_flags has the VM_ACCOUNT flag set, and this is not an MREMAP_DONTUNMAP operation), and figuring out when to reinstate the VM_ACCOUNT flag on prior/subsequent VMAs separately. We additionally break the function into logical pieces and attack the very confusing error handling logic (where, for instance, new_addr is set to err). After this change the code is considerably more readable and easy to manipulate. Link: https://lkml.kernel.org/r/e7eaa307e444ba2b04d94fd985c907c8e896f893.1741639347.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Harry Yoo Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/mremap.c | 293 ++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 204 insertions(+), 89 deletions(-) diff --git a/mm/mremap.c b/mm/mremap.c index 6305cb9a86f6..7dc058d5d5e2 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -884,13 +884,13 @@ static void vrm_stat_account(struct vma_remap_struct *vrm, * Perform checks before attempting to write a VMA prior to it being * moved. */ -static unsigned long prep_move_vma(struct vma_remap_struct *vrm, - unsigned long *vm_flags_ptr) +static unsigned long prep_move_vma(struct vma_remap_struct *vrm) { unsigned long err = 0; struct vm_area_struct *vma = vrm->vma; unsigned long old_addr = vrm->addr; unsigned long old_len = vrm->old_len; + unsigned long dummy = vma->vm_flags; /* * We'd prefer to avoid failure later on in do_munmap: @@ -916,56 +916,151 @@ static unsigned long prep_move_vma(struct vma_remap_struct *vrm, * so KSM can come around to merge on vma and new_vma afterwards. */ err = ksm_madvise(vma, old_addr, old_addr + old_len, - MADV_UNMERGEABLE, vm_flags_ptr); + MADV_UNMERGEABLE, &dummy); if (err) return err; return 0; } -static unsigned long move_vma(struct vma_remap_struct *vrm) +/* + * Unmap source VMA for VMA move, turning it from a copy to a move, being + * careful to ensure we do not underflow memory account while doing so if an + * accountable move. + * + * This is best effort, if we fail to unmap then we simply try to correct + * accounting and exit. + */ +static void unmap_source_vma(struct vma_remap_struct *vrm) { struct mm_struct *mm = current->mm; + unsigned long addr = vrm->addr; + unsigned long len = vrm->old_len; struct vm_area_struct *vma = vrm->vma; - struct vm_area_struct *new_vma; - unsigned long vm_flags = vma->vm_flags; - unsigned long old_addr = vrm->addr, new_addr = vrm->new_addr; - unsigned long old_len = vrm->old_len, new_len = vrm->new_len; - unsigned long new_pgoff; - unsigned long moved_len; - unsigned long account_start = false; - unsigned long account_end = false; - unsigned long hiwater_vm; + VMA_ITERATOR(vmi, mm, addr); int err; - bool need_rmap_locks; - struct vma_iterator vmi; + unsigned long vm_start; + unsigned long vm_end; + /* + * It might seem odd that we check for MREMAP_DONTUNMAP here, given this + * function implies that we unmap the original VMA, which seems + * contradictory. + * + * However, this occurs when this operation was attempted and an error + * arose, in which case we _do_ wish to unmap the _new_ VMA, which means + * we actually _do_ want it be unaccounted. + */ + bool accountable_move = (vma->vm_flags & VM_ACCOUNT) && + !(vrm->flags & MREMAP_DONTUNMAP); - err = prep_move_vma(vrm, &vm_flags); - if (err) - return err; + /* + * So we perform a trick here to prevent incorrect accounting. Any merge + * or new VMA allocation performed in copy_vma() does not adjust + * accounting, it is expected that callers handle this. + * + * And indeed we already have, accounting appropriately in the case of + * both in vrm_charge(). + * + * However, when we unmap the existing VMA (to effect the move), this + * code will, if the VMA has VM_ACCOUNT set, attempt to unaccount + * removed pages. + * + * To avoid this we temporarily clear this flag, reinstating on any + * portions of the original VMA that remain. + */ + if (accountable_move) { + vm_flags_clear(vma, VM_ACCOUNT); + /* We are about to split vma, so store the start/end. */ + vm_start = vma->vm_start; + vm_end = vma->vm_end; + } - /* If accounted, charge the number of bytes the operation will use. */ - if (!vrm_charge(vrm)) - return -ENOMEM; + err = do_vmi_munmap(&vmi, mm, addr, len, vrm->uf_unmap, /* unlock= */false); + vrm->vma = NULL; /* Invalidated. */ + if (err) { + /* OOM: unable to split vma, just get accounts right */ + vm_acct_memory(len >> PAGE_SHIFT); + return; + } - vma_start_write(vma); - new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); - new_vma = copy_vma(&vrm->vma, new_addr, new_len, new_pgoff, + /* + * If we mremap() from a VMA like this: + * + * addr end + * | | + * v v + * |-------------| + * | | + * |-------------| + * + * Having cleared VM_ACCOUNT from the whole VMA, after we unmap above + * we'll end up with: + * + * addr end + * | | + * v v + * |---| |---| + * | A | | B | + * |---| |---| + * + * The VMI is still pointing at addr, so vma_prev() will give us A, and + * a subsequent or lone vma_next() will give as B. + * + * do_vmi_munmap() will have restored the VMI back to addr. + */ + if (accountable_move) { + unsigned long end = addr + len; + + if (vm_start < addr) { + struct vm_area_struct *prev = vma_prev(&vmi); + + vm_flags_set(prev, VM_ACCOUNT); /* Acquires VMA lock. */ + } + + if (vm_end > end) { + struct vm_area_struct *next = vma_next(&vmi); + + vm_flags_set(next, VM_ACCOUNT); /* Acquires VMA lock. */ + } + } +} + +/* + * Copy vrm->vma over to vrm->new_addr possibly adjusting size as part of the + * process. Additionally handle an error occurring on moving of page tables, + * where we reset vrm state to cause unmapping of the new VMA. + * + * Outputs the newly installed VMA to new_vma_ptr. Returns 0 on success or an + * error code. + */ +static int copy_vma_and_data(struct vma_remap_struct *vrm, + struct vm_area_struct **new_vma_ptr) +{ + unsigned long internal_offset = vrm->addr - vrm->vma->vm_start; + unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT; + unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff; + unsigned long moved_len; + bool need_rmap_locks; + struct vm_area_struct *vma; + struct vm_area_struct *new_vma; + int err = 0; + + new_vma = copy_vma(&vrm->vma, vrm->new_addr, vrm->new_len, new_pgoff, &need_rmap_locks); - /* This may have been updated. */ - vma = vrm->vma; if (!new_vma) { vrm_uncharge(vrm); + *new_vma_ptr = NULL; return -ENOMEM; } + vma = vrm->vma; - moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, - need_rmap_locks, false); - if (moved_len < old_len) { + moved_len = move_page_tables(vma, vrm->addr, new_vma, + vrm->new_addr, vrm->old_len, + need_rmap_locks, /* for_stack= */false); + if (moved_len < vrm->old_len) err = -ENOMEM; - } else if (vma->vm_ops && vma->vm_ops->mremap) { + else if (vma->vm_ops && vma->vm_ops->mremap) err = vma->vm_ops->mremap(new_vma); - } if (unlikely(err)) { /* @@ -973,28 +1068,84 @@ static unsigned long move_vma(struct vma_remap_struct *vrm) * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ - move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, - true, false); - vma = new_vma; - old_len = new_len; - old_addr = new_addr; - new_addr = err; + move_page_tables(new_vma, vrm->new_addr, vma, vrm->addr, + moved_len, /* need_rmap_locks = */true, + /* for_stack= */false); + vrm->vma = new_vma; + vrm->old_len = vrm->new_len; + vrm->addr = vrm->new_addr; } else { mremap_userfaultfd_prep(new_vma, vrm->uf); } - if (is_vm_hugetlb_page(vma)) { + if (is_vm_hugetlb_page(vma)) clear_vma_resv_huge_pages(vma); - } - /* Conceal VM_ACCOUNT so old reservation is not undone */ - if (vm_flags & VM_ACCOUNT && !(vrm->flags & MREMAP_DONTUNMAP)) { - vm_flags_clear(vma, VM_ACCOUNT); - if (vma->vm_start < old_addr) - account_start = true; - if (vma->vm_end > old_addr + old_len) - account_end = true; - } + /* Tell pfnmap has moved from this vma */ + if (unlikely(vma->vm_flags & VM_PFNMAP)) + untrack_pfn_clear(vma); + + *new_vma_ptr = new_vma; + return err; +} + +/* + * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and + * account flags on remaining VMA by convention (it cannot be mlock()'d any + * longer, as pages in range are no longer mapped), and removing anon_vma_chain + * links from it (if the entire VMA was copied over). + */ +static void dontunmap_complete(struct vma_remap_struct *vrm, + struct vm_area_struct *new_vma) +{ + unsigned long start = vrm->addr; + unsigned long end = vrm->addr + vrm->old_len; + unsigned long old_start = vrm->vma->vm_start; + unsigned long old_end = vrm->vma->vm_end; + + /* + * We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old + * vma. + */ + vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT); + + /* + * anon_vma links of the old vma is no longer needed after its page + * table has been moved. + */ + if (new_vma != vrm->vma && start == old_start && end == old_end) + unlink_anon_vmas(vrm->vma); + + /* Because we won't unmap we don't need to touch locked_vm. */ +} + +static unsigned long move_vma(struct vma_remap_struct *vrm) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *new_vma; + unsigned long hiwater_vm; + int err; + + err = prep_move_vma(vrm); + if (err) + return err; + + /* If accounted, charge the number of bytes the operation will use. */ + if (!vrm_charge(vrm)) + return -ENOMEM; + + /* We don't want racing faults. */ + vma_start_write(vrm->vma); + + /* Perform copy step. */ + err = copy_vma_and_data(vrm, &new_vma); + /* + * If we established the copied-to VMA, we attempt to recover from the + * error by setting the destination VMA to the source VMA and unmapping + * it below. + */ + if (err && !new_vma) + return err; /* * If we failed to move page tables we still do total_vm increment @@ -1007,51 +1158,15 @@ static unsigned long move_vma(struct vma_remap_struct *vrm) */ hiwater_vm = mm->hiwater_vm; - /* Tell pfnmap has moved from this vma */ - if (unlikely(vma->vm_flags & VM_PFNMAP)) - untrack_pfn_clear(vma); - - if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP))) { - /* We always clear VM_LOCKED[ONFAULT] on the old vma */ - vm_flags_clear(vma, VM_LOCKED_MASK); - - /* - * anon_vma links of the old vma is no longer needed after its page - * table has been moved. - */ - if (new_vma != vma && vma->vm_start == old_addr && - vma->vm_end == (old_addr + old_len)) - unlink_anon_vmas(vma); - - /* Because we won't unmap we don't need to touch locked_vm */ - vrm_stat_account(vrm, new_len); - return new_addr; - } - - vrm_stat_account(vrm, new_len); - - vma_iter_init(&vmi, mm, old_addr); - if (do_vmi_munmap(&vmi, mm, old_addr, old_len, vrm->uf_unmap, false) < 0) { - /* OOM: unable to split vma, just get accounts right */ - if (vm_flags & VM_ACCOUNT && !(vrm->flags & MREMAP_DONTUNMAP)) - vm_acct_memory(old_len >> PAGE_SHIFT); - account_start = account_end = false; - } + vrm_stat_account(vrm, vrm->new_len); + if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP))) + dontunmap_complete(vrm, new_vma); + else + unmap_source_vma(vrm); mm->hiwater_vm = hiwater_vm; - /* Restore VM_ACCOUNT if one or two pieces of vma left */ - if (account_start) { - vma = vma_prev(&vmi); - vm_flags_set(vma, VM_ACCOUNT); - } - - if (account_end) { - vma = vma_next(&vmi); - vm_flags_set(vma, VM_ACCOUNT); - } - - return new_addr; + return err ? (unsigned long)err : vrm->new_addr; } /* -- 2.50.1 From 2a4077f49ccd6f904be6edb363714646e47292c9 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:39 +0000 Subject: [PATCH 11/16] mm/mremap: refactor move_page_tables(), abstracting state A lot of state is threaded throughout the page table moving logic within the mremap code, including boolean values which control behaviour specifically in regard to whether rmap locks need be held over the operation and whether the VMA belongs to a temporary stack being moved by move_arg_pages() (and consequently, relocate_vma_down()). As we already transmit state throughout this operation, it is neater and more readable to maintain a small state object. We do so in the form of pagetable_move_control. In addition, this allows us to update parameters within the state as we manipulate things, for instance with regard to the page table realignment logic. In future I want to add additional functionality to the page table logic, so this is an additional motivation for making it easier to do so. This patch changes move_page_tables() to accept a pointer to a pagetable_move_control struct, and performs changes at this level only. Further page table logic will be updated in a subsequent patch. We additionally also take the opportunity to add significant comments describing the address realignment logic to make it abundantly clear what is going on in this code. Link: https://lkml.kernel.org/r/e20180add9c8746184aa3f23a61fff69a06cdaa9.1741639347.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Harry Yoo Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/internal.h | 43 +++++++++++-- mm/mmap.c | 5 +- mm/mremap.c | 172 ++++++++++++++++++++++++++++++++++++-------------- 3 files changed, 168 insertions(+), 52 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index aa30282a774a..06f816cdb43b 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -24,6 +24,44 @@ struct folio_batch; +/* + * Maintains state across a page table move. The operation assumes both source + * and destination VMAs already exist and are specified by the user. + * + * Partial moves are permitted, but the old and new ranges must both reside + * within a VMA. + * + * mmap lock must be held in write and VMA write locks must be held on any VMA + * that is visible. + * + * Use the PAGETABLE_MOVE() macro to initialise this struct. + * + * NOTE: The page table move is affected by reading from [old_addr, old_end), + * and old_addr may be updated for better page table alignment, so len_in + * represents the length of the range being copied as specified by the user. + */ +struct pagetable_move_control { + struct vm_area_struct *old; /* Source VMA. */ + struct vm_area_struct *new; /* Destination VMA. */ + unsigned long old_addr; /* Address from which the move begins. */ + unsigned long old_end; /* Exclusive address at which old range ends. */ + unsigned long new_addr; /* Address to move page tables to. */ + unsigned long len_in; /* Bytes to remap specified by user. */ + + bool need_rmap_locks; /* Do rmap locks need to be taken? */ + bool for_stack; /* Is this an early temp stack being moved? */ +}; + +#define PAGETABLE_MOVE(name, old_, new_, old_addr_, new_addr_, len_) \ + struct pagetable_move_control name = { \ + .old = old_, \ + .new = new_, \ + .old_addr = old_addr_, \ + .old_end = (old_addr_) + (len_), \ + .new_addr = new_addr_, \ + .len_in = len_, \ + } + /* * The set of flags that only affect watermark checking and reclaim * behaviour. This is used by the MM to obey the caller constraints @@ -1527,10 +1565,7 @@ extern struct list_lru shadow_nodes; } while (0) /* mremap.c */ -unsigned long move_page_tables(struct vm_area_struct *vma, - unsigned long old_addr, struct vm_area_struct *new_vma, - unsigned long new_addr, unsigned long len, - bool need_rmap_locks, bool for_stack); +unsigned long move_page_tables(struct pagetable_move_control *pmc); #ifdef CONFIG_UNACCEPTED_MEMORY void accept_page(struct page *page); diff --git a/mm/mmap.c b/mm/mmap.c index 15d6cd7cc845..efcc4ca7500d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1694,6 +1694,7 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff); struct vm_area_struct *next; struct mmu_gather tlb; + PAGETABLE_MOVE(pmc, vma, vma, old_start, new_start, length); BUG_ON(new_start > new_end); @@ -1716,8 +1717,8 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) * move the page tables downwards, on failure we rely on * process cleanup to remove whatever mess we made. */ - if (length != move_page_tables(vma, old_start, - vma, new_start, length, false, true)) + pmc.for_stack = true; + if (length != move_page_tables(&pmc)) return -ENOMEM; tlb_gather_mmu(&tlb, mm); diff --git a/mm/mremap.c b/mm/mremap.c index 7dc058d5d5e2..3a2ac167e876 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -580,8 +580,9 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, * the VMA that is created to span the source and destination of the move, * so we make an exception for it. */ -static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align, - unsigned long mask, bool for_stack) +static bool can_align_down(struct pagetable_move_control *pmc, + struct vm_area_struct *vma, unsigned long addr_to_align, + unsigned long mask) { unsigned long addr_masked = addr_to_align & mask; @@ -590,11 +591,11 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali * of the corresponding VMA, we can't align down or we will destroy part * of the current mapping. */ - if (!for_stack && vma->vm_start != addr_to_align) + if (!pmc->for_stack && vma->vm_start != addr_to_align) return false; /* In the stack case we explicitly permit in-VMA alignment. */ - if (for_stack && addr_masked >= vma->vm_start) + if (pmc->for_stack && addr_masked >= vma->vm_start) return true; /* @@ -604,54 +605,131 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; } -/* Opportunistically realign to specified boundary for faster copy. */ -static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma, - unsigned long *new_addr, struct vm_area_struct *new_vma, - unsigned long mask, bool for_stack) +/* + * Determine if are in fact able to realign for efficiency to a higher page + * table boundary. + */ +static bool can_realign_addr(struct pagetable_move_control *pmc, + unsigned long pagetable_mask) { + unsigned long align_mask = ~pagetable_mask; + unsigned long old_align = pmc->old_addr & align_mask; + unsigned long new_align = pmc->new_addr & align_mask; + unsigned long pagetable_size = align_mask + 1; + unsigned long old_align_next = pagetable_size - old_align; + + /* + * We don't want to have to go hunting for VMAs from the end of the old + * VMA to the next page table boundary, also we want to make sure the + * operation is wortwhile. + * + * So ensure that we only perform this realignment if the end of the + * range being copied reaches or crosses the page table boundary. + * + * boundary boundary + * .<- old_align -> . + * . |----------------.-----------| + * . | vma . | + * . |----------------.-----------| + * . <----------------.-----------> + * . len_in + * <-------------------------------> + * . pagetable_size . + * . <----------------> + * . old_align_next . + */ + if (pmc->len_in < old_align_next) + return false; + /* Skip if the addresses are already aligned. */ - if ((*old_addr & ~mask) == 0) - return; + if (old_align == 0) + return false; /* Only realign if the new and old addresses are mutually aligned. */ - if ((*old_addr & ~mask) != (*new_addr & ~mask)) - return; + if (old_align != new_align) + return false; /* Ensure realignment doesn't cause overlap with existing mappings. */ - if (!can_align_down(old_vma, *old_addr, mask, for_stack) || - !can_align_down(new_vma, *new_addr, mask, for_stack)) + if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) || + !can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask)) + return false; + + return true; +} + +/* + * Opportunistically realign to specified boundary for faster copy. + * + * Consider an mremap() of a VMA with page table boundaries as below, and no + * preceding VMAs from the lower page table boundary to the start of the VMA, + * with the end of the range reaching or crossing the page table boundary. + * + * boundary boundary + * . |----------------.-----------| + * . | vma . | + * . |----------------.-----------| + * . pmc->old_addr . pmc->old_end + * . <----------------------------> + * . move these page tables + * + * If we proceed with moving page tables in this scenario, we will have a lot of + * work to do traversing old page tables and establishing new ones in the + * destination across multiple lower level page tables. + * + * The idea here is simply to align pmc->old_addr, pmc->new_addr down to the + * page table boundary, so we can simply copy a single page table entry for the + * aligned portion of the VMA instead: + * + * boundary boundary + * . |----------------.-----------| + * . | vma . | + * . |----------------.-----------| + * pmc->old_addr . pmc->old_end + * <-------------------------------------------> + * . move these page tables + */ +static void try_realign_addr(struct pagetable_move_control *pmc, + unsigned long pagetable_mask) +{ + + if (!can_realign_addr(pmc, pagetable_mask)) return; - *old_addr = *old_addr & mask; - *new_addr = *new_addr & mask; + /* + * Simply align to page table boundaries. Note that we do NOT update the + * pmc->old_end value, and since the move_page_tables() operation spans + * from [old_addr, old_end) (offsetting new_addr as it is performed), + * this simply changes the start of the copy, not the end. + */ + pmc->old_addr &= pagetable_mask; + pmc->new_addr &= pagetable_mask; } -unsigned long move_page_tables(struct vm_area_struct *vma, - unsigned long old_addr, struct vm_area_struct *new_vma, - unsigned long new_addr, unsigned long len, - bool need_rmap_locks, bool for_stack) +unsigned long move_page_tables(struct pagetable_move_control *pmc) { unsigned long extent, old_end; struct mmu_notifier_range range; pmd_t *old_pmd, *new_pmd; pud_t *old_pud, *new_pud; + unsigned long old_addr, new_addr; + struct vm_area_struct *vma = pmc->old; - if (!len) + if (!pmc->len_in) return 0; - old_end = old_addr + len; - if (is_vm_hugetlb_page(vma)) - return move_hugetlb_page_tables(vma, new_vma, old_addr, - new_addr, len); + return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr, + pmc->new_addr, pmc->len_in); + old_end = pmc->old_end; /* * If possible, realign addresses to PMD boundary for faster copy. * Only realign if the mremap copying hits a PMD boundary. */ - if (len >= PMD_SIZE - (old_addr & ~PMD_MASK)) - try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK, - for_stack); + try_realign_addr(pmc, PMD_MASK); + /* These may have been changed. */ + old_addr = pmc->old_addr; + new_addr = pmc->new_addr; flush_cache_range(vma, old_addr, old_end); mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, @@ -675,12 +753,11 @@ unsigned long move_page_tables(struct vm_area_struct *vma, if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { if (extent == HPAGE_PUD_SIZE) { move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, - old_pud, new_pud, need_rmap_locks); + old_pud, new_pud, pmc->need_rmap_locks); /* We ignore and continue on error? */ continue; } } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { - if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, old_pud, new_pud, true)) continue; @@ -698,7 +775,7 @@ again: pmd_devmap(*old_pmd)) { if (extent == HPAGE_PMD_SIZE && move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, - old_pmd, new_pmd, need_rmap_locks)) + old_pmd, new_pmd, pmc->need_rmap_locks)) continue; split_huge_pmd(vma, old_pmd, old_addr); } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && @@ -713,10 +790,10 @@ again: } if (pmd_none(*old_pmd)) continue; - if (pte_alloc(new_vma->vm_mm, new_pmd)) + if (pte_alloc(pmc->new->vm_mm, new_pmd)) break; if (move_ptes(vma, old_pmd, old_addr, old_addr + extent, - new_vma, new_pmd, new_addr, need_rmap_locks) < 0) + pmc->new, new_pmd, new_addr, pmc->need_rmap_locks) < 0) goto again; } @@ -726,10 +803,10 @@ again: * Prevent negative return values when {old,new}_addr was realigned * but we broke out of the above loop for the first PMD itself. */ - if (old_addr < old_end - len) + if (old_addr < old_end - pmc->len_in) return 0; - return len + old_addr - old_end; /* how much done */ + return pmc->len_in + old_addr - old_end; /* how much done */ } /* Set vrm->delta to the difference in VMA size specified by user. */ @@ -1040,37 +1117,40 @@ static int copy_vma_and_data(struct vma_remap_struct *vrm, unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT; unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff; unsigned long moved_len; - bool need_rmap_locks; - struct vm_area_struct *vma; + struct vm_area_struct *vma = vrm->vma; struct vm_area_struct *new_vma; int err = 0; + PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len); - new_vma = copy_vma(&vrm->vma, vrm->new_addr, vrm->new_len, new_pgoff, - &need_rmap_locks); + new_vma = copy_vma(&vma, vrm->new_addr, vrm->new_len, new_pgoff, + &pmc.need_rmap_locks); if (!new_vma) { vrm_uncharge(vrm); *new_vma_ptr = NULL; return -ENOMEM; } - vma = vrm->vma; + vrm->vma = vma; + pmc.old = vma; + pmc.new = new_vma; - moved_len = move_page_tables(vma, vrm->addr, new_vma, - vrm->new_addr, vrm->old_len, - need_rmap_locks, /* for_stack= */false); + moved_len = move_page_tables(&pmc); if (moved_len < vrm->old_len) err = -ENOMEM; else if (vma->vm_ops && vma->vm_ops->mremap) err = vma->vm_ops->mremap(new_vma); if (unlikely(err)) { + PAGETABLE_MOVE(pmc_revert, new_vma, vma, vrm->new_addr, + vrm->addr, moved_len); + /* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ - move_page_tables(new_vma, vrm->new_addr, vma, vrm->addr, - moved_len, /* need_rmap_locks = */true, - /* for_stack= */false); + pmc_revert.need_rmap_locks = true; + move_page_tables(&pmc_revert); + vrm->vma = new_vma; vrm->old_len = vrm->new_len; vrm->addr = vrm->new_addr; -- 2.50.1 From 664dc4da2694fa56e49dc3e4a041bcf7608a060b Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 10 Mar 2025 20:50:40 +0000 Subject: [PATCH 12/16] mm/mremap: thread state through move page table operation Finish refactoring the page table logic by threading the PMC state throughout the operation, allowing us to control the operation as we go. Additionally, update the old_addr, new_addr fields in move_page_tables() as we progress through the process making use of the fact we have this state object now to track this. With these changes made, not only is the code far more readable, but we can finally transmit state throughout the entire operation, which lays the groundwork for sensibly making changes in future to how the mremap() operation is performed. Additionally take the opportunity to refactor the means of determining the progress of the operation, abstracting this to pmc_progress() and simplifying the logic to make it clearer what's going on. Link: https://lkml.kernel.org/r/230dd7a2b7b01a6eef442678f284d575e800356e.1741639347.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Cc: Harry Yoo Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/internal.h | 3 + mm/mremap.c | 196 +++++++++++++++++++++++++++++--------------------- 2 files changed, 116 insertions(+), 83 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 06f816cdb43b..1ac433d2092b 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -36,6 +36,9 @@ struct folio_batch; * * Use the PAGETABLE_MOVE() macro to initialise this struct. * + * The old_addr and new_addr fields are updated as the page table move is + * executed. + * * NOTE: The page table move is affected by reading from [old_addr, old_end), * and old_addr may be updated for better page table alignment, so len_in * represents the length of the range being copied as specified by the user. diff --git a/mm/mremap.c b/mm/mremap.c index 3a2ac167e876..0865387531ed 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -108,8 +108,7 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) return pmd; } -static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr) +static pud_t *alloc_new_pud(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; p4d_t *p4d; @@ -122,13 +121,12 @@ static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, return pud_alloc(mm, p4d, addr); } -static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr) +static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr) { pud_t *pud; pmd_t *pmd; - pud = alloc_new_pud(mm, vma, addr); + pud = alloc_new_pud(mm, addr); if (!pud) return NULL; @@ -172,17 +170,19 @@ static pte_t move_soft_dirty_pte(pte_t pte) return pte; } -static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, - unsigned long old_addr, unsigned long old_end, - struct vm_area_struct *new_vma, pmd_t *new_pmd, - unsigned long new_addr, bool need_rmap_locks) +static int move_ptes(struct pagetable_move_control *pmc, + unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd) { + struct vm_area_struct *vma = pmc->old; bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; pmd_t dummy_pmdval; spinlock_t *old_ptl, *new_ptl; bool force_flush = false; + unsigned long old_addr = pmc->old_addr; + unsigned long new_addr = pmc->new_addr; + unsigned long old_end = old_addr + extent; unsigned long len = old_end - old_addr; int err = 0; @@ -204,7 +204,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, * serialize access to individual ptes, but only rmap traversal * order guarantees that we won't miss both the old and new ptes). */ - if (need_rmap_locks) + if (pmc->need_rmap_locks) take_rmap_locks(vma); /* @@ -278,7 +278,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); out: - if (need_rmap_locks) + if (pmc->need_rmap_locks) drop_rmap_locks(vma); return err; } @@ -293,10 +293,11 @@ static inline bool arch_supports_page_table_move(void) #endif #ifdef CONFIG_HAVE_MOVE_PMD -static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) +static bool move_normal_pmd(struct pagetable_move_control *pmc, + pmd_t *old_pmd, pmd_t *new_pmd) { spinlock_t *old_ptl, *new_ptl; + struct vm_area_struct *vma = pmc->old; struct mm_struct *mm = vma->vm_mm; bool res = false; pmd_t pmd; @@ -342,7 +343,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ - old_ptl = pmd_lock(vma->vm_mm, old_pmd); + old_ptl = pmd_lock(mm, old_pmd); new_ptl = pmd_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); @@ -359,7 +360,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, VM_BUG_ON(!pmd_none(*new_pmd)); pmd_populate(mm, new_pmd, pmd_pgtable(pmd)); - flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); + flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE); out_unlock: if (new_ptl != old_ptl) spin_unlock(new_ptl); @@ -368,19 +369,19 @@ out_unlock: return res; } #else -static inline bool move_normal_pmd(struct vm_area_struct *vma, - unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, - pmd_t *new_pmd) +static inline bool move_normal_pmd(struct pagetable_move_control *pmc, + pmd_t *old_pmd, pmd_t *new_pmd) { return false; } #endif #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD) -static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +static bool move_normal_pud(struct pagetable_move_control *pmc, + pud_t *old_pud, pud_t *new_pud) { spinlock_t *old_ptl, *new_ptl; + struct vm_area_struct *vma = pmc->old; struct mm_struct *mm = vma->vm_mm; pud_t pud; @@ -406,7 +407,7 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ - old_ptl = pud_lock(vma->vm_mm, old_pud); + old_ptl = pud_lock(mm, old_pud); new_ptl = pud_lockptr(mm, new_pud); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); @@ -418,7 +419,7 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, VM_BUG_ON(!pud_none(*new_pud)); pud_populate(mm, new_pud, pud_pgtable(pud)); - flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); + flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); @@ -426,19 +427,19 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, return true; } #else -static inline bool move_normal_pud(struct vm_area_struct *vma, - unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, - pud_t *new_pud) +static inline bool move_normal_pud(struct pagetable_move_control *pmc, + pud_t *old_pud, pud_t *new_pud) { return false; } #endif #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) -static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +static bool move_huge_pud(struct pagetable_move_control *pmc, + pud_t *old_pud, pud_t *new_pud) { spinlock_t *old_ptl, *new_ptl; + struct vm_area_struct *vma = pmc->old; struct mm_struct *mm = vma->vm_mm; pud_t pud; @@ -453,7 +454,7 @@ static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ - old_ptl = pud_lock(vma->vm_mm, old_pud); + old_ptl = pud_lock(mm, old_pud); new_ptl = pud_lockptr(mm, new_pud); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); @@ -466,8 +467,8 @@ static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, /* Set the new pud */ /* mark soft_ditry when we add pud level soft dirty support */ - set_pud_at(mm, new_addr, new_pud, pud); - flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); + set_pud_at(mm, pmc->new_addr, new_pud, pud); + flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); @@ -475,8 +476,9 @@ static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, return true; } #else -static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) +static bool move_huge_pud(struct pagetable_move_control *pmc, + pud_t *old_pud, pud_t *new_pud) + { WARN_ON_ONCE(1); return false; @@ -497,10 +499,12 @@ enum pgt_entry { * destination pgt_entry. */ static __always_inline unsigned long get_extent(enum pgt_entry entry, - unsigned long old_addr, unsigned long old_end, - unsigned long new_addr) + struct pagetable_move_control *pmc) { unsigned long next, extent, mask, size; + unsigned long old_addr = pmc->old_addr; + unsigned long old_end = pmc->old_end; + unsigned long new_addr = pmc->new_addr; switch (entry) { case HPAGE_PMD: @@ -529,38 +533,51 @@ static __always_inline unsigned long get_extent(enum pgt_entry entry, return extent; } +/* + * Should move_pgt_entry() acquire the rmap locks? This is either expressed in + * the PMC, or overridden in the case of normal, larger page tables. + */ +static bool should_take_rmap_locks(struct pagetable_move_control *pmc, + enum pgt_entry entry) +{ + switch (entry) { + case NORMAL_PMD: + case NORMAL_PUD: + return true; + default: + return pmc->need_rmap_locks; + } +} + /* * Attempts to speedup the move by moving entry at the level corresponding to * pgt_entry. Returns true if the move was successful, else false. */ -static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, - unsigned long old_addr, unsigned long new_addr, - void *old_entry, void *new_entry, bool need_rmap_locks) +static bool move_pgt_entry(struct pagetable_move_control *pmc, + enum pgt_entry entry, void *old_entry, void *new_entry) { bool moved = false; + bool need_rmap_locks = should_take_rmap_locks(pmc, entry); /* See comment in move_ptes() */ if (need_rmap_locks) - take_rmap_locks(vma); + take_rmap_locks(pmc->old); switch (entry) { case NORMAL_PMD: - moved = move_normal_pmd(vma, old_addr, new_addr, old_entry, - new_entry); + moved = move_normal_pmd(pmc, old_entry, new_entry); break; case NORMAL_PUD: - moved = move_normal_pud(vma, old_addr, new_addr, old_entry, - new_entry); + moved = move_normal_pud(pmc, old_entry, new_entry); break; case HPAGE_PMD: moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - move_huge_pmd(vma, old_addr, new_addr, old_entry, + move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry, new_entry); break; case HPAGE_PUD: moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - move_huge_pud(vma, old_addr, new_addr, old_entry, - new_entry); + move_huge_pud(pmc, old_entry, new_entry); break; default: @@ -569,7 +586,7 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, } if (need_rmap_locks) - drop_rmap_locks(vma); + drop_rmap_locks(pmc->old); return moved; } @@ -705,108 +722,121 @@ static void try_realign_addr(struct pagetable_move_control *pmc, pmc->new_addr &= pagetable_mask; } +/* Is the page table move operation done? */ +static bool pmc_done(struct pagetable_move_control *pmc) +{ + return pmc->old_addr >= pmc->old_end; +} + +/* Advance to the next page table, offset by extent bytes. */ +static void pmc_next(struct pagetable_move_control *pmc, unsigned long extent) +{ + pmc->old_addr += extent; + pmc->new_addr += extent; +} + +/* + * Determine how many bytes in the specified input range have had their page + * tables moved so far. + */ +static unsigned long pmc_progress(struct pagetable_move_control *pmc) +{ + unsigned long orig_old_addr = pmc->old_end - pmc->len_in; + unsigned long old_addr = pmc->old_addr; + + /* + * Prevent negative return values when {old,new}_addr was realigned but + * we broke out of the loop in move_page_tables() for the first PMD + * itself. + */ + return old_addr < orig_old_addr ? 0 : old_addr - orig_old_addr; +} + unsigned long move_page_tables(struct pagetable_move_control *pmc) { - unsigned long extent, old_end; + unsigned long extent; struct mmu_notifier_range range; pmd_t *old_pmd, *new_pmd; pud_t *old_pud, *new_pud; - unsigned long old_addr, new_addr; - struct vm_area_struct *vma = pmc->old; + struct mm_struct *mm = pmc->old->vm_mm; if (!pmc->len_in) return 0; - if (is_vm_hugetlb_page(vma)) + if (is_vm_hugetlb_page(pmc->old)) return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr, pmc->new_addr, pmc->len_in); - old_end = pmc->old_end; /* * If possible, realign addresses to PMD boundary for faster copy. * Only realign if the mremap copying hits a PMD boundary. */ try_realign_addr(pmc, PMD_MASK); - /* These may have been changed. */ - old_addr = pmc->old_addr; - new_addr = pmc->new_addr; - flush_cache_range(vma, old_addr, old_end); - mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, - old_addr, old_end); + flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end); + mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm, + pmc->old_addr, pmc->old_end); mmu_notifier_invalidate_range_start(&range); - for (; old_addr < old_end; old_addr += extent, new_addr += extent) { + for (; !pmc_done(pmc); pmc_next(pmc, extent)) { cond_resched(); /* * If extent is PUD-sized try to speed up the move by moving at the * PUD level if possible. */ - extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr); + extent = get_extent(NORMAL_PUD, pmc); - old_pud = get_old_pud(vma->vm_mm, old_addr); + old_pud = get_old_pud(mm, pmc->old_addr); if (!old_pud) continue; - new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); + new_pud = alloc_new_pud(mm, pmc->new_addr); if (!new_pud) break; if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { if (extent == HPAGE_PUD_SIZE) { - move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, - old_pud, new_pud, pmc->need_rmap_locks); + move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud); /* We ignore and continue on error? */ continue; } } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { - if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, - old_pud, new_pud, true)) + if (move_pgt_entry(pmc, NORMAL_PUD, old_pud, new_pud)) continue; } - extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr); - old_pmd = get_old_pmd(vma->vm_mm, old_addr); + extent = get_extent(NORMAL_PMD, pmc); + old_pmd = get_old_pmd(mm, pmc->old_addr); if (!old_pmd) continue; - new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); + new_pmd = alloc_new_pmd(mm, pmc->new_addr); if (!new_pmd) break; again: if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { if (extent == HPAGE_PMD_SIZE && - move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, - old_pmd, new_pmd, pmc->need_rmap_locks)) + move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd)) continue; - split_huge_pmd(vma, old_pmd, old_addr); + split_huge_pmd(pmc->old, old_pmd, pmc->old_addr); } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && extent == PMD_SIZE) { /* * If the extent is PMD-sized, try to speed the move by * moving at the PMD level if possible. */ - if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr, - old_pmd, new_pmd, true)) + if (move_pgt_entry(pmc, NORMAL_PMD, old_pmd, new_pmd)) continue; } if (pmd_none(*old_pmd)) continue; if (pte_alloc(pmc->new->vm_mm, new_pmd)) break; - if (move_ptes(vma, old_pmd, old_addr, old_addr + extent, - pmc->new, new_pmd, new_addr, pmc->need_rmap_locks) < 0) + if (move_ptes(pmc, extent, old_pmd, new_pmd) < 0) goto again; } mmu_notifier_invalidate_range_end(&range); - /* - * Prevent negative return values when {old,new}_addr was realigned - * but we broke out of the above loop for the first PMD itself. - */ - if (old_addr < old_end - pmc->len_in) - return 0; - - return pmc->len_in + old_addr - old_end; /* how much done */ + return pmc_progress(pmc); } /* Set vrm->delta to the difference in VMA size specified by user. */ -- 2.50.1 From 6220ea5583e977e1eeea06c237cc440e5ac3de46 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:29:54 +0100 Subject: [PATCH 13/16] mm: factor out large folio handling from folio_order() into folio_large_order() MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Patch series "mm: MM owner tracking for large folios (!hugetlb) + CONFIG_NO_PAGE_MAPCOUNT", v3. Let's add an "easy" way to decide -- without false positives, without page-mapcounts and without page table/rmap scanning -- whether a large folio is "certainly mapped exclusively" into a single MM, or whether it "maybe mapped shared" into multiple MMs. Use that information to implement Copy-on-Write reuse, to convert folio_likely_mapped_shared() to folio_maybe_mapped_share(), and to introduce a kernel config option that lets us not use+maintain per-page mapcounts in large folios anymore. The bigger picture was presented at LSF/MM [1]. This series is effectively a follow-up on my early work [2], which implemented a more precise, but also more complicated, way to identify whether a large folio is "mapped shared" into multiple MMs or "mapped exclusively" into a single MM. 1 Patch Organization ==================== Patch #1 -> #6: make more room in order-1 folios, so we have two "unsigned long" available for our purposes Patch #7 -> #11: preparations Patch #12: MM owner tracking for large folios Patch #13: COW reuse for PTE-mapped anon THP Patch #14: folio_maybe_mapped_shared() Patch #15 -> #20: introduce and implement CONFIG_NO_PAGE_MAPCOUNT 2 MM owner tracking =================== We assign each MM a unique ID ("MM ID"), to be able to squeeze more information in our folios. On 32bit we use 15-bit IDs, on 64bit we use 31-bit IDs. For each large folios, we now store two MM-ID+mapcount ("slot") combinations: * mm0_id + mm0_mapcount * mm1_id + mm1_mapcount On 32bit, we use a 16-bit per-MM mapcount, on 64bit an ordinary 32bit mapcount. This way, we require 2x "unsigned long" on 32bit and 64bit for both slots. Paired with the large mapcount, we can reliably identify whether one of these MMs is the current owner (-> owns all mappings) or even holds all folio references (-> owns all mappings, and all references are from mappings). As long as only two MMs map folio pages at a time, we can reliably and precisely identify whether a large folio is "mapped shared" or "mapped exclusively". Any additional MM that starts mapping the folio while there are no free slots becomes an "untracked MM". If one such "untracked MM" is the last one mapping a folio exclusively, we will not detect the folio as "mapped exclusively" but instead as "maybe mapped shared". (exception: only a single mapping remains) So that's where the approach gets imprecise. For now, we use a bit-spinlock to sync the large mapcount + slots, and make sure we do keep the machinery fast, to not degrade (un)map performance drastically: for example, we make sure to only use a single atomic (when grabbing the bit-spinlock), like we would already perform when updating the large mapcount. 3 CONFIG_NO_PAGE_MAPCOUNT ========================= patch #15 -> #20 spell out and document what exactly is affected when not maintaining the per-page mapcounts in large folios anymore. Most importantly, as we cannot maintain folio->_nr_pages_mapped anymore when (un)mapping pages, we'll account a complete folio as mapped if a single page is mapped. In addition, we'll not detect partially mapped anonymous folios as such in all cases yet. Likely less relevant changes include that we might now under-estimate the USS (Unique Set Size) of a process, but never over-estimate it. The goal is to make CONFIG_NO_PAGE_MAPCOUNT the default at some point, to then slowly make it the only option, as we learn about real-life impacts and possible ways to mitigate them. 4 Performance ============= Detailed performance numbers were included in v1 [3], and not that much changed between v1 and v2. I did plenty of measurements on different systems in the meantime, that all revealed slightly different results. The pte-mapped-folio micro-benchmarks [4] are fairly sensitive to code layout changes on some systems. Especially the fork() benchmark started being more-shaky-than-before on recent kernels for some reason. In summary, with my micro-benchmarks: * Small folios are not impacted. * CoW performance seems to be mostly unchanged across all folios sizes. * CoW reuse performance of large folios now matches CoW reuse performance of small folios, because we now actually implement the CoW reuse optimization. On an Intel Xeon Silver 4210R I measured a ~65% reduction in runtime, on an arm64 system I measured ~54% reduction. * munmap() performance improves with CONFIG_NO_PAGE_MAPCOUNT. I saw double-digit % reduction (up to ~30% on an Intel Xeon Silver 4210R and up to ~70% on an AmpereOne A192-32X) with larger folios. The larger the folios, the larger the performance improvement. * munmao() performance very slightly (couple percent) degrades without CONFIG_NO_PAGE_MAPCOUNT for smaller folios. For larger folios, there seems to be no change at all. * fork() performance improves with CONFIG_NO_PAGE_MAPCOUNT. I saw double-digit % reduction (up to ~20% on an Intel Xeon Silver 4210R and up to ~10% on an AmpereOne A192-32X) with larger folios. The larger the folios, the larger the performance improvement. * While fork() performance without CONFIG_NO_PAGE_MAPCOUNT seems to be almost unchanged on some systems, I saw some degradation for smaller folios on the AmpereOne A192-32X. I did not investigate the details yet, but I suspect code layout changes or suboptimal code placement / inlining. I'm not to worried about the fork() micro-benchmarks for smaller folios given how shaky the results are lately and by how much we improved fork() performance recently. I also ran case-anon-cow-rand and case-anon-cow-seq part of vm-scalability, to assess the scalability and the impact of the bit-spinlock. My measurements on a two 2-socket 10-core Intel Xeon Silver 4210R CPU revealed no significant changes. Similarly, running these benchmarks with 2 MiB THPs enabled on the AmpereOne A192-32X with 192 cores, I got < 1% difference with < 1% stdev, which is nice. So far, I did not get my hands on a similarly large system with multiple sockets. I found no other fitting scalability benchmarks that seem to really hammer on concurrent mapping/unmapping of large folio pages like case-anon-cow-seq does. 5 Concerns ========== 5.1 Bit spinlock ---------------- I'm not quite happy about the bit-spinlock, but so far it does not seem to affect scalability in my measurements. If it ever becomes a problem we could either investigate improving the locking, or simply stopping the MM tracking once there are "too many mappings" and simply assume that the folio is "mapped shared" until it was freed. This would be similar (but slightly different) to the "0,1,2,stopped" counting idea Willy had at some point. Adding that logic to "stop tracking" adds more code to the hot path, so I avoided that for now. 5.2 folio_maybe_mapped_shared() ------------------------------- I documented the change from folio_likely_mapped_shared() to folio_maybe_mapped_shared() quite extensively. If we run into surprises, I have some ideas on how to resolve them. For now, I think we should be fine. 5.3 Added code to map/unmap hot path ------------------------------------ So far, it looks like the added code on the rmap hot path does not really seem to matter much in the bigger picture. I'd like to further reduce it (and possibly improve fork() performance further), but I don't easily see how right now. Well, and I am out of puff 🙂 Having that said, alternatives I considered (e.g., per-MM per-folio mapcount) would add a lot more overhead to these hot paths. 6 Future Work ============= 6.1 Large mapcount ------------------ It would be very handy if the large mapcount would count how often folio pages are actually mapped into page tables: a PMD on x86-64 would count 512 times. Calculating the average per-page mapcount will be easy, and remapping (PMD->PTE) folios would get even faster. That would also remove the need for the entire mapcount (except for PMD-sized folios for memory statistics reasons ...), and allow for mapping folios larger than PMDs (e.g., 4 MiB) easily. We likely would also have to take the same number of folio references to make our folio_mapcount() == folio_ref_count() work, and we'd want to be able to avoid mapcount+refcount overflows: this could already become an issue with pte-mapped PUD-sized folios (fsdax). One approach we discussed in the THP cabal meeting is (1) extending the mapcount for large folios to 64bit (at least on 64bit systems) and (2) keeping the refcount at 32bit, but (3) having exactly one reference if the the mapcount != 0. It should be doable, but there are some corner cases to consider on the unmap path; it is something that I will be looking into next. 6.2 hugetlb ----------- I'd love to make use of the same tracking also for hugetlb. The real problem is PMD table sharing: getting a page mapped by MM X and unmapped by MM Y will not work. With mshare, that problem should not exist (all mapping/unmapping will be routed through the mshare MM). [1] https://lwn.net/Articles/974223/ [2] https://lore.kernel.org/linux-mm/a9922f58-8129-4f15-b160-e0ace581bcbe@redhat.com/T/ [3] https://lkml.kernel.org/r/20240829165627.2256514-1-david@redhat.com [4] https://gitlab.com/davidhildenbrand/scratchspace/-/raw/main/pte-mapped-folio-benchmarks.c This patch (of 20): Let's factor it out into a simple helper function. This helper will also come in handy when working with code where we know that our folio is large. Maybe in the future we'll have the order readily available for small and large folios; in that case, folio_large_order() would simply translate to folio_order(). Link: https://lkml.kernel.org/r/20250303163014.1128035-1-david@redhat.com Link: https://lkml.kernel.org/r/20250303163014.1128035-2-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Lance Yang Reviewed-by: Kirill A. Shutemov Cc: Thomas Gleixner Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: David Hildenbrand Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/mm.h | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index de008efd96aa..e5cdbb94ef81 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1194,6 +1194,11 @@ struct inode; extern void prep_compound_page(struct page *page, unsigned int order); +static inline unsigned int folio_large_order(const struct folio *folio) +{ + return folio->_flags_1 & 0xff; +} + /* * compound_order() can be called without holding a reference, which means * that niceties like page_folio() don't work. These callers should be @@ -1207,7 +1212,7 @@ static inline unsigned int compound_order(struct page *page) if (!test_bit(PG_head, &folio->flags)) return 0; - return folio->_flags_1 & 0xff; + return folio_large_order(folio); } /** @@ -1223,7 +1228,7 @@ static inline unsigned int folio_order(const struct folio *folio) { if (!folio_test_large(folio)) return 0; - return folio->_flags_1 & 0xff; + return folio_large_order(folio); } #include @@ -2145,7 +2150,7 @@ static inline long folio_nr_pages(const struct folio *folio) #ifdef CONFIG_64BIT return folio->_folio_nr_pages; #else - return 1L << (folio->_flags_1 & 0xff); + return 1L << folio_large_order(folio); #endif } @@ -2170,7 +2175,7 @@ static inline unsigned long compound_nr(struct page *page) #ifdef CONFIG_64BIT return folio->_folio_nr_pages; #else - return 1L << (folio->_flags_1 & 0xff); + return 1L << folio_large_order(folio); #endif } -- 2.50.1 From 1ea5212aed0682ae1249cba9df7ad7ef539d0a7d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:29:55 +0100 Subject: [PATCH 14/16] mm: factor out large folio handling from folio_nr_pages() into folio_large_nr_pages() Let's factor it out into a simple helper function. This helper will also come in handy when working with code where we know that our folio is large. While at it, let's consistently return a "long" value from all these similar functions. Note that we cannot use "unsigned int" (even though _folio_nr_pages is of that type), because it would break some callers that do stuff like "-folio_nr_pages()". Both "int" or "unsigned long" would work as well. Maybe in the future we'll have the nr_pages readily available for all large folios, maybe even for small folios, or maybe for none. Link: https://lkml.kernel.org/r/20250303163014.1128035-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Kirill A. Shutemov Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/mm.h | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index e5cdbb94ef81..cf892c08a88c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1199,6 +1199,18 @@ static inline unsigned int folio_large_order(const struct folio *folio) return folio->_flags_1 & 0xff; } +#ifdef CONFIG_64BIT +static inline long folio_large_nr_pages(const struct folio *folio) +{ + return folio->_folio_nr_pages; +} +#else +static inline long folio_large_nr_pages(const struct folio *folio) +{ + return 1L << folio_large_order(folio); +} +#endif + /* * compound_order() can be called without holding a reference, which means * that niceties like page_folio() don't work. These callers should be @@ -2147,11 +2159,7 @@ static inline long folio_nr_pages(const struct folio *folio) { if (!folio_test_large(folio)) return 1; -#ifdef CONFIG_64BIT - return folio->_folio_nr_pages; -#else - return 1L << folio_large_order(folio); -#endif + return folio_large_nr_pages(folio); } /* Only hugetlbfs can allocate folios larger than MAX_ORDER */ @@ -2166,24 +2174,20 @@ static inline long folio_nr_pages(const struct folio *folio) * page. compound_nr() can be called on a tail page, and is defined to * return 1 in that case. */ -static inline unsigned long compound_nr(struct page *page) +static inline long compound_nr(struct page *page) { struct folio *folio = (struct folio *)page; if (!test_bit(PG_head, &folio->flags)) return 1; -#ifdef CONFIG_64BIT - return folio->_folio_nr_pages; -#else - return 1L << folio_large_order(folio); -#endif + return folio_large_nr_pages(folio); } /** * thp_nr_pages - The number of regular pages in this huge page. * @page: The head page of a huge page. */ -static inline int thp_nr_pages(struct page *page) +static inline long thp_nr_pages(struct page *page) { return folio_nr_pages((struct folio *)page); } -- 2.50.1 From 4996fc547f5b49f4a43c261dfadb02cf165cdb51 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:29:56 +0100 Subject: [PATCH 15/16] mm: let _folio_nr_pages overlay memcg_data in first tail page Let's free up some more of the "unconditionally available on 64BIT" space in order-1 folios by letting _folio_nr_pages overlay memcg_data in the first tail page (second folio page). Consequently, we have the optimization now whenever we have CONFIG_MEMCG, independent of 64BIT. We have to make sure that page->memcg on tail pages does not return "surprises". page_memcg_check() already properly refuses PageTail(). Let's do that earlier in print_page_owner_memcg() to avoid printing wrong "Slab cache page" information. No other code should touch that field on tail pages of compound pages. Reset the "_nr_pages" to 0 when splitting folios, or when freeing them back to the buddy (to avoid false page->memcg_data "bad page" reports). Note that in __split_huge_page(), folio_nr_pages() would stop working already as soon as we start messing with the subpages. Most kernel configs should have at least CONFIG_MEMCG enabled, even if disabled at runtime. 64byte "struct memmap" is what we usually have on 64BIT. While at it, rename "_folio_nr_pages" to "_nr_pages". Hopefully memdescs / dynamically allocating "strut folio" in the future will further clean this up, e.g., making _nr_pages available in all configs and maybe even in small folios. Doing that should be fairly easy on top of this change. [david@redhat.com: make "make htmldoc" happy] Link: https://lkml.kernel.org/r/a97f8a91-ec41-4796-81e3-7c9e0e491ba4@redhat.com Link: https://lkml.kernel.org/r/20250303163014.1128035-4-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Kirill A. Shutemov Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/mm.h | 4 ++-- include/linux/mm_types.h | 32 ++++++++++++++++++++++++-------- mm/huge_memory.c | 8 ++++++++ mm/internal.h | 4 ++-- mm/page_alloc.c | 6 +++++- mm/page_owner.c | 2 +- 6 files changed, 42 insertions(+), 14 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index cf892c08a88c..c9c2ca345350 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1199,10 +1199,10 @@ static inline unsigned int folio_large_order(const struct folio *folio) return folio->_flags_1 & 0xff; } -#ifdef CONFIG_64BIT +#ifdef NR_PAGES_IN_LARGE_FOLIO static inline long folio_large_nr_pages(const struct folio *folio) { - return folio->_folio_nr_pages; + return folio->_nr_pages; } #else static inline long folio_large_nr_pages(const struct folio *folio) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index b1827d78ff89..f3b519fbeca1 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -287,6 +287,11 @@ typedef struct { unsigned long val; } swp_entry_t; +#if defined(CONFIG_MEMCG) || defined(CONFIG_SLAB_OBJ_EXT) +/* We have some extra room after the refcount in tail pages. */ +#define NR_PAGES_IN_LARGE_FOLIO +#endif + /** * struct folio - Represents a contiguous set of bytes. * @flags: Identical to the page flags. @@ -312,7 +317,7 @@ typedef struct { * @_large_mapcount: Do not use directly, call folio_mapcount(). * @_nr_pages_mapped: Do not use outside of rmap and debug code. * @_pincount: Do not use directly, call folio_maybe_dma_pinned(). - * @_folio_nr_pages: Do not use directly, call folio_nr_pages(). + * @_nr_pages: Do not use directly, call folio_nr_pages(). * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h. * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h. * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h. @@ -376,14 +381,23 @@ struct folio { struct { unsigned long _flags_1; unsigned long _head_1; + union { + struct { /* public: */ - atomic_t _large_mapcount; - atomic_t _entire_mapcount; - atomic_t _nr_pages_mapped; - atomic_t _pincount; -#ifdef CONFIG_64BIT - unsigned int _folio_nr_pages; -#endif + atomic_t _large_mapcount; + atomic_t _entire_mapcount; + atomic_t _nr_pages_mapped; + atomic_t _pincount; + /* private: the union with struct page is transitional */ + }; + unsigned long _usable_1[4]; + }; + atomic_t _mapcount_1; + atomic_t _refcount_1; + /* public: */ +#ifdef NR_PAGES_IN_LARGE_FOLIO + unsigned int _nr_pages; +#endif /* NR_PAGES_IN_LARGE_FOLIO */ /* private: the union with struct page is transitional */ }; struct page __page_1; @@ -435,6 +449,8 @@ FOLIO_MATCH(_last_cpupid, _last_cpupid); offsetof(struct page, pg) + sizeof(struct page)) FOLIO_MATCH(flags, _flags_1); FOLIO_MATCH(compound_head, _head_1); +FOLIO_MATCH(_mapcount, _mapcount_1); +FOLIO_MATCH(_refcount, _refcount_1); #undef FOLIO_MATCH #define FOLIO_MATCH(pg, fl) \ static_assert(offsetof(struct folio, fl) == \ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9a15fd3453ff..2364990974ba 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3399,6 +3399,14 @@ static void __split_huge_page(struct page *page, struct list_head *list, int order = folio_order(folio); unsigned int nr = 1 << order; + /* + * Reset any memcg data overlay in the tail pages. folio_nr_pages() + * is unreliable after this point. + */ +#ifdef NR_PAGES_IN_LARGE_FOLIO + folio->_nr_pages = 0; +#endif + /* complete memcg works before add pages to LRU */ split_page_memcg(head, order, new_order); diff --git a/mm/internal.h b/mm/internal.h index 1ac433d2092b..1cd977413859 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -725,8 +725,8 @@ static inline void folio_set_order(struct folio *folio, unsigned int order) return; folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; -#ifdef CONFIG_64BIT - folio->_folio_nr_pages = 1U << order; +#ifdef NR_PAGES_IN_LARGE_FOLIO + folio->_nr_pages = 1U << order; #endif } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 57f959af79c5..e1135dff9a86 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1174,8 +1174,12 @@ __always_inline bool free_pages_prepare(struct page *page, if (unlikely(order)) { int i; - if (compound) + if (compound) { page[1].flags &= ~PAGE_FLAGS_SECOND; +#ifdef NR_PAGES_IN_LARGE_FOLIO + folio->_nr_pages = 0; +#endif + } for (i = 1; i < (1 << order); i++) { if (compound) bad += free_tail_page_prepare(page, page + i); diff --git a/mm/page_owner.c b/mm/page_owner.c index 2d6360eaccbb..a409e2561a8f 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -507,7 +507,7 @@ static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret, rcu_read_lock(); memcg_data = READ_ONCE(page->memcg_data); - if (!memcg_data) + if (!memcg_data || PageTail(page)) goto out_unlock; if (memcg_data & MEMCG_DATA_OBJEXTS) -- 2.50.1 From 4eeec8c89a0c4a8c20fb13a4e7093cc8efce383d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 3 Mar 2025 17:29:57 +0100 Subject: [PATCH 16/16] mm: move hugetlb specific things in folio to page[3] Let's just move the hugetlb specific stuff to a separate page, and stop letting it overlay other fields for now. This frees up some space in page[2], which we will use on 32bit to free up some space in page[1]. While we could move these things to page[3] instead, it's cleaner to just move the hugetlb specific things out of the way and pack the core-folio stuff as tight as possible. ... and we can minimize the work required in dump_folio. We can now avoid re-initializing &folio->_deferred_list in hugetlb code. Hopefully dynamically allocating "strut folio" in the future will further clean this up. Link: https://lkml.kernel.org/r/20250303163014.1128035-5-david@redhat.com Signed-off-by: David Hildenbrand Cc: Andy Lutomirks^H^Hski Cc: Borislav Betkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Kirill A. Shutemov Cc: Lance Yang Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcow (Oracle) Cc: Michal Koutn Cc: Muchun Song Cc: tejun heo Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Zefan Li Signed-off-by: Andrew Morton --- include/linux/mm_types.h | 27 +++++++++++++++++---------- mm/hugetlb.c | 1 - mm/page_alloc.c | 5 +++++ 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index f3b519fbeca1..727322ecbfdd 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -407,20 +407,23 @@ struct folio { unsigned long _flags_2; unsigned long _head_2; /* public: */ - void *_hugetlb_subpool; - void *_hugetlb_cgroup; - void *_hugetlb_cgroup_rsvd; - void *_hugetlb_hwpoison; + struct list_head _deferred_list; /* private: the union with struct page is transitional */ }; + struct page __page_2; + }; + union { struct { - unsigned long _flags_2a; - unsigned long _head_2a; + unsigned long _flags_3; + unsigned long _head_3; /* public: */ - struct list_head _deferred_list; + void *_hugetlb_subpool; + void *_hugetlb_cgroup; + void *_hugetlb_cgroup_rsvd; + void *_hugetlb_hwpoison; /* private: the union with struct page is transitional */ }; - struct page __page_2; + struct page __page_3; }; }; @@ -457,8 +460,12 @@ FOLIO_MATCH(_refcount, _refcount_1); offsetof(struct page, pg) + 2 * sizeof(struct page)) FOLIO_MATCH(flags, _flags_2); FOLIO_MATCH(compound_head, _head_2); -FOLIO_MATCH(flags, _flags_2a); -FOLIO_MATCH(compound_head, _head_2a); +#undef FOLIO_MATCH +#define FOLIO_MATCH(pg, fl) \ + static_assert(offsetof(struct folio, fl) == \ + offsetof(struct page, pg) + 3 * sizeof(struct page)) +FOLIO_MATCH(flags, _flags_3); +FOLIO_MATCH(compound_head, _head_3); #undef FOLIO_MATCH /** diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7a47a08e8526..438de55dd38d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1649,7 +1649,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, folio_ref_unfreeze(folio, 1); - INIT_LIST_HEAD(&folio->_deferred_list); hugetlb_free_folio(folio); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e1135dff9a86..735192222c36 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -971,6 +971,11 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) goto out; } break; + case 3: + /* the third tail page: hugetlb specifics overlap ->mappings */ + if (IS_ENABLED(CONFIG_HUGETLB_PAGE)) + break; + fallthrough; default: if (page->mapping != TAIL_MAPPING) { bad_page(page, "corrupted mapping in tail page"); -- 2.50.1