*/
#ifdef CONFIG_LOCKDEP
-static struct lockdep_map xe_pm_runtime_lockdep_map = {
- .name = "xe_pm_runtime_lockdep_map"
+static struct lockdep_map xe_pm_runtime_d3cold_map = {
+ .name = "xe_rpm_d3cold_map"
+};
+
+static struct lockdep_map xe_pm_runtime_nod3cold_map = {
+ .name = "xe_rpm_nod3cold_map"
};
#endif
+static bool __maybe_unused xe_rpm_reclaim_safe(const struct xe_device *xe)
+{
+ return !xe->d3cold.capable && !xe->info.has_sriov;
+}
+
+static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
+{
+ lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
+ &xe_pm_runtime_nod3cold_map :
+ &xe_pm_runtime_d3cold_map);
+}
+
+static void xe_rpm_lockmap_release(const struct xe_device *xe)
+{
+ lock_map_release(xe_rpm_reclaim_safe(xe) ?
+ &xe_pm_runtime_nod3cold_map :
+ &xe_pm_runtime_d3cold_map);
+}
+
/**
* xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
* @xe: xe device instance
* annotation here and in xe_pm_runtime_get() lockdep will see
* the potential lock inversion and give us a nice splat.
*/
- lock_map_acquire(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_acquire(xe);
/*
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
out:
if (err)
xe_display_pm_resume(xe, true);
- lock_map_release(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return err;
}
/* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current);
- lock_map_acquire(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_acquire(xe);
if (xe->d3cold.allowed) {
err = xe_pcode_ready(xe, true);
}
out:
- lock_map_release(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return err;
}
* stuff that can happen inside the runtime_resume callback by acquiring
* a dummy lock (it doesn't protect anything and gets compiled out on
* non-debug builds). Lockdep then only needs to see the
- * xe_pm_runtime_lockdep_map -> runtime_resume callback once, and then can
- * hopefully validate all the (callers_locks) -> xe_pm_runtime_lockdep_map.
+ * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
+ * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
* For example if the (callers_locks) are ever grabbed in the
* runtime_resume callback, lockdep should give us a nice splat.
*/
-static void pm_runtime_lockdep_prime(void)
+static void xe_rpm_might_enter_cb(const struct xe_device *xe)
{
- lock_map_acquire(&xe_pm_runtime_lockdep_map);
- lock_map_release(&xe_pm_runtime_lockdep_map);
+ xe_rpm_lockmap_acquire(xe);
+ xe_rpm_lockmap_release(xe);
+}
+
+/*
+ * Prime the lockdep maps for known locking orders that need to
+ * be supported but that may not always occur on all systems.
+ */
+static void xe_pm_runtime_lockdep_prime(void)
+{
+ struct dma_resv lockdep_resv;
+
+ dma_resv_init(&lockdep_resv);
+ lock_map_acquire(&xe_pm_runtime_d3cold_map);
+ /* D3Cold takes the dma_resv locks to evict bos */
+ dma_resv_lock(&lockdep_resv, NULL);
+ dma_resv_unlock(&lockdep_resv);
+ lock_map_release(&xe_pm_runtime_d3cold_map);
+
+ /* Shrinkers might like to wake up the device under reclaim. */
+ fs_reclaim_acquire(GFP_KERNEL);
+ lock_map_acquire(&xe_pm_runtime_nod3cold_map);
+ lock_map_release(&xe_pm_runtime_nod3cold_map);
+ fs_reclaim_release(GFP_KERNEL);
}
/**
if (xe_pm_read_callback_task(xe) == current)
return;
- pm_runtime_lockdep_prime();
+ xe_rpm_might_enter_cb(xe);
pm_runtime_resume(xe->drm.dev);
}
if (WARN_ON(xe_pm_read_callback_task(xe) == current))
return -ELOOP;
- pm_runtime_lockdep_prime();
+ xe_rpm_might_enter_cb(xe);
return pm_runtime_get_sync(xe->drm.dev);
}
return true;
}
- pm_runtime_lockdep_prime();
+ xe_rpm_might_enter_cb(xe);
return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
}
drm_dbg(&xe->drm,
"d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
}
+
+/**
+ * xe_pm_module_init() - Perform xe_pm specific module initialization.
+ *
+ * Return: 0 on success. Currently doesn't fail.
+ */
+int __init xe_pm_module_init(void)
+{
+ xe_pm_runtime_lockdep_prime();
+ return 0;
+}