remove_inode_hugepages(inode, offset, LLONG_MAX);
 }
 
+static void hugetlbfs_zero_partial_page(struct hstate *h,
+                                       struct address_space *mapping,
+                                       loff_t start,
+                                       loff_t end)
+{
+       pgoff_t idx = start >> huge_page_shift(h);
+       struct folio *folio;
+
+       folio = filemap_lock_folio(mapping, idx);
+       if (!folio)
+               return;
+
+       start = start & ~huge_page_mask(h);
+       end = end & ~huge_page_mask(h);
+       if (!end)
+               end = huge_page_size(h);
+
+       folio_zero_segment(folio, (size_t)start, (size_t)end);
+
+       folio_unlock(folio);
+       folio_put(folio);
+}
+
 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 {
+       struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
+       struct address_space *mapping = inode->i_mapping;
        struct hstate *h = hstate_inode(inode);
        loff_t hpage_size = huge_page_size(h);
        loff_t hole_start, hole_end;
 
        /*
-        * For hole punch round up the beginning offset of the hole and
-        * round down the end.
+        * hole_start and hole_end indicate the full pages within the hole.
         */
        hole_start = round_up(offset, hpage_size);
        hole_end = round_down(offset + len, hpage_size);
 
-       if (hole_end > hole_start) {
-               struct address_space *mapping = inode->i_mapping;
-               struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
+       inode_lock(inode);
 
-               inode_lock(inode);
+       /* protected by i_rwsem */
+       if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
+               inode_unlock(inode);
+               return -EPERM;
+       }
 
-               /* protected by i_rwsem */
-               if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
-                       inode_unlock(inode);
-                       return -EPERM;
-               }
+       i_mmap_lock_write(mapping);
+
+       /* If range starts before first full page, zero partial page. */
+       if (offset < hole_start)
+               hugetlbfs_zero_partial_page(h, mapping,
+                               offset, min(offset + len, hole_start));
 
-               i_mmap_lock_write(mapping);
+       /* Unmap users of full pages in the hole. */
+       if (hole_end > hole_start) {
                if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
                        hugetlb_vmdelete_list(&mapping->i_mmap,
                                              hole_start >> PAGE_SHIFT,
                                              hole_end >> PAGE_SHIFT, 0);
-               i_mmap_unlock_write(mapping);
-               remove_inode_hugepages(inode, hole_start, hole_end);
-               inode_unlock(inode);
        }
 
+       /* If range extends beyond last full page, zero partial page. */
+       if ((offset + len) > hole_end && (offset + len) > hole_start)
+               hugetlbfs_zero_partial_page(h, mapping,
+                               hole_end, offset + len);
+
+       i_mmap_unlock_write(mapping);
+
+       /* Remove full pages from the file. */
+       if (hole_end > hole_start)
+               remove_inode_hugepages(inode, hole_start, hole_end);
+
+       inode_unlock(inode);
+
        return 0;
 }