/* set if tracing memory operations is enabled */
 static int kmemleak_enabled;
+/* same as above but only for the kmemleak_free() callback */
+static int kmemleak_free_enabled;
 /* set in the late_initcall if there were no errors */
 static int kmemleak_initialized;
 /* enables or disables early logging of the memory operations */
 {
        pr_debug("%s(0x%p)\n", __func__, ptr);
 
-       if (kmemleak_enabled && ptr && !IS_ERR(ptr))
+       if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
                delete_object_full((unsigned long)ptr);
        else if (kmemleak_early_log)
                log_early(KMEMLEAK_FREE, ptr, 0, 0);
 
        pr_debug("%s(0x%p)\n", __func__, ptr);
 
-       if (kmemleak_enabled && ptr && !IS_ERR(ptr))
+       if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
                for_each_possible_cpu(cpu)
                        delete_object_full((unsigned long)per_cpu_ptr(ptr,
                                                                      cpu));
        mutex_lock(&scan_mutex);
        stop_scan_thread();
 
+       /*
+        * Once the scan thread has stopped, it is safe to no longer track
+        * object freeing. Ordering of the scan thread stopping and the memory
+        * accesses below is guaranteed by the kthread_stop() function.
+        */
+       kmemleak_free_enabled = 0;
+
        if (!kmemleak_found_leaks)
                __kmemleak_do_cleanup();
        else
        /* check whether it is too early for a kernel thread */
        if (kmemleak_initialized)
                schedule_work(&cleanup_work);
+       else
+               kmemleak_free_enabled = 0;
 
        pr_info("Kernel memory leak detector disabled\n");
 }
        if (kmemleak_error) {
                local_irq_restore(flags);
                return;
-       } else
+       } else {
                kmemleak_enabled = 1;
+               kmemleak_free_enabled = 1;
+       }
        local_irq_restore(flags);
 
        /*