#include <asm/cacheflush.h>
 
-#define KFENCE_SKIP_ARCH_FAULT_HANDLER "el1_sync"
-
 static inline bool arch_kfence_init_pool(void) { return true; }
 
 static inline bool kfence_protect_page(unsigned long addr, bool protect)
 
        } else if (addr < PAGE_SIZE) {
                msg = "NULL pointer dereference";
        } else {
-               if (kfence_handle_page_fault(addr))
+               if (kfence_handle_page_fault(addr, regs))
                        return;
 
                msg = "paging request";
 
 #include <asm/set_memory.h>
 #include <asm/tlbflush.h>
 
-/*
- * The page fault handler entry function, up to which the stack trace is
- * truncated in reports.
- */
-#define KFENCE_SKIP_ARCH_FAULT_HANDLER "asm_exc_page_fault"
-
 /* Force 4K pages for __kfence_pool. */
 static inline bool arch_kfence_init_pool(void)
 {
 
                efi_crash_gracefully_on_page_fault(address);
 
        /* Only not-present faults should be handled by KFENCE. */
-       if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address))
+       if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address, regs))
                return;
 
 oops:
 
 /**
  * kfence_handle_page_fault() - perform page fault handling for KFENCE pages
  * @addr: faulting address
+ * @regs: current struct pt_regs (can be NULL, but shows full stack trace)
  *
  * Return:
  * * false - address outside KFENCE pool,
  * cases KFENCE prints an error message and marks the offending page as
  * present, so that the kernel can proceed.
  */
-bool __must_check kfence_handle_page_fault(unsigned long addr);
+bool __must_check kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs);
 
 #else /* CONFIG_KFENCE */
 
 static inline void *kfence_object_start(const void *addr) { return NULL; }
 static inline void __kfence_free(void *addr) { }
 static inline bool __must_check kfence_free(void *addr) { return false; }
-static inline bool __must_check kfence_handle_page_fault(unsigned long addr) { return false; }
+static inline bool __must_check kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs) { return false; }
 
 #endif
 
 
                return true;
 
        atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
-       kfence_report_error((unsigned long)addr, addr_to_metadata((unsigned long)addr),
+       kfence_report_error((unsigned long)addr, NULL, addr_to_metadata((unsigned long)addr),
                            KFENCE_ERROR_CORRUPTION);
        return false;
 }
        if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
                /* Invalid or double-free, bail out. */
                atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
-               kfence_report_error((unsigned long)addr, meta, KFENCE_ERROR_INVALID_FREE);
+               kfence_report_error((unsigned long)addr, NULL, meta, KFENCE_ERROR_INVALID_FREE);
                raw_spin_unlock_irqrestore(&meta->lock, flags);
                return;
        }
                kfence_guarded_free(addr, meta, false);
 }
 
-bool kfence_handle_page_fault(unsigned long addr)
+bool kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs)
 {
        const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
        struct kfence_metadata *to_report = NULL;
 
 out:
        if (to_report) {
-               kfence_report_error(addr, to_report, error_type);
+               kfence_report_error(addr, regs, to_report, error_type);
                raw_spin_unlock_irqrestore(&to_report->lock, flags);
        } else {
                /* This may be a UAF or OOB access, but we can't be sure. */
-               kfence_report_error(addr, NULL, KFENCE_ERROR_INVALID);
+               kfence_report_error(addr, regs, NULL, KFENCE_ERROR_INVALID);
        }
 
        return kfence_unprotect(addr); /* Unprotect and let access proceed. */
 
        KFENCE_ERROR_INVALID_FREE,      /* Invalid free. */
 };
 
-void kfence_report_error(unsigned long address, const struct kfence_metadata *meta,
-                        enum kfence_error_type type);
+void kfence_report_error(unsigned long address, struct pt_regs *regs,
+                        const struct kfence_metadata *meta, enum kfence_error_type type);
 
 void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta);
 
 
 #include <linux/kernel.h>
 #include <linux/lockdep.h>
 #include <linux/printk.h>
+#include <linux/sched/debug.h>
 #include <linux/seq_file.h>
 #include <linux/stacktrace.h>
 #include <linux/string.h>
 {
        char buf[64];
        int skipnr, fallback = 0;
-       bool is_access_fault = false;
 
        if (type) {
                /* Depending on error type, find different stack entries. */
                case KFENCE_ERROR_UAF:
                case KFENCE_ERROR_OOB:
                case KFENCE_ERROR_INVALID:
-                       is_access_fault = true;
-                       break;
+                       /*
+                        * kfence_handle_page_fault() may be called with pt_regs
+                        * set to NULL; in that case we'll simply show the full
+                        * stack trace.
+                        */
+                       return 0;
                case KFENCE_ERROR_CORRUPTION:
                case KFENCE_ERROR_INVALID_FREE:
                        break;
        for (skipnr = 0; skipnr < num_entries; skipnr++) {
                int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]);
 
-               if (is_access_fault) {
-                       if (!strncmp(buf, KFENCE_SKIP_ARCH_FAULT_HANDLER, len))
-                               goto found;
-               } else {
-                       if (str_has_prefix(buf, "kfence_") || str_has_prefix(buf, "__kfence_") ||
-                           !strncmp(buf, "__slab_free", len)) {
-                               /*
-                                * In case of tail calls from any of the below
-                                * to any of the above.
-                                */
-                               fallback = skipnr + 1;
-                       }
-
-                       /* Also the *_bulk() variants by only checking prefixes. */
-                       if (str_has_prefix(buf, "kfree") ||
-                           str_has_prefix(buf, "kmem_cache_free") ||
-                           str_has_prefix(buf, "__kmalloc") ||
-                           str_has_prefix(buf, "kmem_cache_alloc"))
-                               goto found;
+               if (str_has_prefix(buf, "kfence_") || str_has_prefix(buf, "__kfence_") ||
+                   !strncmp(buf, "__slab_free", len)) {
+                       /*
+                        * In case of tail calls from any of the below
+                        * to any of the above.
+                        */
+                       fallback = skipnr + 1;
                }
+
+               /* Also the *_bulk() variants by only checking prefixes. */
+               if (str_has_prefix(buf, "kfree") ||
+                   str_has_prefix(buf, "kmem_cache_free") ||
+                   str_has_prefix(buf, "__kmalloc") ||
+                   str_has_prefix(buf, "kmem_cache_alloc"))
+                       goto found;
        }
        if (fallback < num_entries)
                return fallback;
        pr_cont(" ]");
 }
 
-void kfence_report_error(unsigned long address, const struct kfence_metadata *meta,
-                        enum kfence_error_type type)
+void kfence_report_error(unsigned long address, struct pt_regs *regs,
+                        const struct kfence_metadata *meta, enum kfence_error_type type)
 {
        unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 };
-       int num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1);
-       int skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type);
        const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1;
+       int num_stack_entries;
+       int skipnr = 0;
+
+       if (regs) {
+               num_stack_entries = stack_trace_save_regs(regs, stack_entries, KFENCE_STACK_DEPTH, 0);
+       } else {
+               num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1);
+               skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type);
+       }
 
        /* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */
        if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta))
 
        /* Print report footer. */
        pr_err("\n");
-       dump_stack_print_info(KERN_ERR);
+       if (IS_ENABLED(CONFIG_DEBUG_KERNEL) && regs)
+               show_regs(regs);
+       else
+               dump_stack_print_info(KERN_ERR);
        pr_err("==================================================================\n");
 
        lockdep_on();