]> www.infradead.org Git - nvme.git/commitdiff
drm/xe/userptr: fix notifier vs folio deadlock
authorMatthew Auld <matthew.auld@intel.com>
Mon, 14 Apr 2025 13:25:40 +0000 (14:25 +0100)
committerLucas De Marchi <lucas.demarchi@intel.com>
Fri, 18 Apr 2025 01:53:38 +0000 (18:53 -0700)
User is reporting what smells like notifier vs folio deadlock, where
migrate_pages_batch() on core kernel side is holding folio lock(s) and
then interacting with the mappings of it, however those mappings are
tied to some userptr, which means calling into the notifier callback and
grabbing the notifier lock. With perfect timing it looks possible that
the pages we pulled from the hmm fault can get sniped by
migrate_pages_batch() at the same time that we are holding the notifier
lock to mark the pages as accessed/dirty, but at this point we also want
to grab the folio locks(s) to mark them as dirty, but if they are
contended from notifier/migrate_pages_batch side then we deadlock since
folio lock won't be dropped until we drop the notifier lock.

Fortunately the mark_page_accessed/dirty is not really needed in the
first place it seems and should have already been done by hmm fault, so
just remove it.

Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/4765
Fixes: 0a98219bcc96 ("drm/xe/hmm: Don't dereference struct page pointers without notifier lock")
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <stable@vger.kernel.org> # v6.10+
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250414132539.26654-2-matthew.auld@intel.com
(cherry picked from commit bd7c0cb695e87c0e43247be8196b4919edbe0e85)
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
drivers/gpu/drm/xe/xe_hmm.c

index c3cc0fa105e84add4f8767415c4f961b490a668a..57b71956ddf42a9c5b7c712457d938cf042b891a 100644 (file)
@@ -19,29 +19,6 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end)
        return (end - start) >> PAGE_SHIFT;
 }
 
-/**
- * xe_mark_range_accessed() - mark a range is accessed, so core mm
- * have such information for memory eviction or write back to
- * hard disk
- * @range: the range to mark
- * @write: if write to this range, we mark pages in this range
- * as dirty
- */
-static void xe_mark_range_accessed(struct hmm_range *range, bool write)
-{
-       struct page *page;
-       u64 i, npages;
-
-       npages = xe_npages_in_range(range->start, range->end);
-       for (i = 0; i < npages; i++) {
-               page = hmm_pfn_to_page(range->hmm_pfns[i]);
-               if (write)
-                       set_page_dirty_lock(page);
-
-               mark_page_accessed(page);
-       }
-}
-
 static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
                       struct hmm_range *range, struct rw_semaphore *notifier_sem)
 {
@@ -331,7 +308,6 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
        if (ret)
                goto out_unlock;
 
-       xe_mark_range_accessed(&hmm_range, write);
        userptr->sg = &userptr->sgt;
        xe_hmm_userptr_set_mapped(uvma);
        userptr->notifier_seq = hmm_range.notifier_seq;