LOCK_USED = 0,
        LOCK_USED_IN_HARDIRQ,
        LOCK_USED_IN_SOFTIRQ,
+       LOCK_USED_IN_RECLAIM_FS,
        LOCK_ENABLED_SOFTIRQS,
        LOCK_ENABLED_HARDIRQS,
+       LOCK_HELD_OVER_RECLAIM_FS,
        LOCK_USED_IN_HARDIRQ_READ,
        LOCK_USED_IN_SOFTIRQ_READ,
+       LOCK_USED_IN_RECLAIM_FS_READ,
        LOCK_ENABLED_SOFTIRQS_READ,
        LOCK_ENABLED_HARDIRQS_READ,
+       LOCK_HELD_OVER_RECLAIM_FS_READ,
        LOCK_USAGE_STATES
 };
 
 #define LOCKF_USED                     (1 << LOCK_USED)
 #define LOCKF_USED_IN_HARDIRQ          (1 << LOCK_USED_IN_HARDIRQ)
 #define LOCKF_USED_IN_SOFTIRQ          (1 << LOCK_USED_IN_SOFTIRQ)
+#define LOCKF_USED_IN_RECLAIM_FS       (1 << LOCK_USED_IN_RECLAIM_FS)
 #define LOCKF_ENABLED_HARDIRQS         (1 << LOCK_ENABLED_HARDIRQS)
 #define LOCKF_ENABLED_SOFTIRQS         (1 << LOCK_ENABLED_SOFTIRQS)
+#define LOCKF_HELD_OVER_RECLAIM_FS     (1 << LOCK_HELD_OVER_RECLAIM_FS)
 
 #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
 
 #define LOCKF_USED_IN_HARDIRQ_READ     (1 << LOCK_USED_IN_HARDIRQ_READ)
 #define LOCKF_USED_IN_SOFTIRQ_READ     (1 << LOCK_USED_IN_SOFTIRQ_READ)
+#define LOCKF_USED_IN_RECLAIM_FS_READ  (1 << LOCK_USED_IN_RECLAIM_FS_READ)
 #define LOCKF_ENABLED_HARDIRQS_READ    (1 << LOCK_ENABLED_HARDIRQS_READ)
 #define LOCKF_ENABLED_SOFTIRQS_READ    (1 << LOCK_ENABLED_SOFTIRQS_READ)
+#define LOCKF_HELD_OVER_RECLAIM_FS_READ        (1 << LOCK_HELD_OVER_RECLAIM_FS_READ)
 
 #define LOCKF_ENABLED_IRQS_READ \
                (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
        lock_set_class(lock, lock->name, lock->key, subclass, ip);
 }
 
-# define INIT_LOCKDEP                          .lockdep_recursion = 0,
+extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
+extern void lockdep_clear_current_reclaim_state(void);
+extern void lockdep_trace_alloc(gfp_t mask);
+
+# define INIT_LOCKDEP                          .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
 
 #define lockdep_depth(tsk)     (debug_locks ? (tsk)->lockdep_depth : 0)
 
 # define lock_release(l, n, i)                 do { } while (0)
 # define lock_set_class(l, n, k, s, i)         do { } while (0)
 # define lock_set_subclass(l, s, i)            do { } while (0)
+# define lockdep_set_current_reclaim_state(g)  do { } while (0)
+# define lockdep_clear_current_reclaim_state() do { } while (0)
+# define lockdep_trace_alloc(g)                        do { } while (0)
 # define lockdep_init()                                do { } while (0)
 # define lockdep_info()                                do { } while (0)
 # define lockdep_init_map(lock, name, key, sub) \
 
 #if VERBOSE
 # define HARDIRQ_VERBOSE       1
 # define SOFTIRQ_VERBOSE       1
+# define RECLAIM_VERBOSE       1
 #else
 # define HARDIRQ_VERBOSE       0
 # define SOFTIRQ_VERBOSE       0
+# define RECLAIM_VERBOSE       0
 #endif
 
-#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
+#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
 /*
  * Quick filtering for interesting events:
  */
        [LOCK_USED_IN_SOFTIRQ_READ] =   "in-softirq-R",
        [LOCK_ENABLED_SOFTIRQS_READ] =  "softirq-on-R",
        [LOCK_ENABLED_HARDIRQS_READ] =  "hardirq-on-R",
+       [LOCK_USED_IN_RECLAIM_FS] =     "in-reclaim-W",
+       [LOCK_USED_IN_RECLAIM_FS_READ] = "in-reclaim-R",
+       [LOCK_HELD_OVER_RECLAIM_FS] =   "ov-reclaim-W",
+       [LOCK_HELD_OVER_RECLAIM_FS_READ] = "ov-reclaim-R",
 };
 
 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
 }
 
 void
-get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
+get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3,
+                                       char *c4, char *c5, char *c6)
 {
-       *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
+       *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.', *c5 = '.', *c6 = '.';
 
        if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
                *c1 = '+';
                if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
                        *c4 = '?';
        }
+
+       if (class->usage_mask & LOCKF_USED_IN_RECLAIM_FS)
+               *c5 = '+';
+       else
+               if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS)
+                       *c5 = '-';
+
+       if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS_READ)
+               *c6 = '-';
+       if (class->usage_mask & LOCKF_USED_IN_RECLAIM_FS_READ) {
+               *c6 = '+';
+               if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS_READ)
+                       *c6 = '?';
+       }
+
 }
 
 static void print_lock_name(struct lock_class *class)
 {
-       char str[KSYM_NAME_LEN], c1, c2, c3, c4;
+       char str[KSYM_NAME_LEN], c1, c2, c3, c4, c5, c6;
        const char *name;
 
-       get_usage_chars(class, &c1, &c2, &c3, &c4);
+       get_usage_chars(class, &c1, &c2, &c3, &c4, &c5, &c6);
 
        name = class->name;
        if (!name) {
                if (class->subclass)
                        printk("/%d", class->subclass);
        }
-       printk("){%c%c%c%c}", c1, c2, c3, c4);
+       printk("){%c%c%c%c%c%c}", c1, c2, c3, c4, c5, c6);
 }
 
 static void print_lockdep_cache(struct lockdep_map *lock)
                                        LOCK_ENABLED_SOFTIRQS, "soft"))
                return 0;
 
+       /*
+        * Prove that the new dependency does not connect a reclaim-fs-safe
+        * lock with a reclaim-fs-unsafe lock - to achieve this we search
+        * the backwards-subgraph starting at <prev>, and the
+        * forwards-subgraph starting at <next>:
+        */
+       if (!check_usage(curr, prev, next, LOCK_USED_IN_RECLAIM_FS,
+                                       LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs"))
+               return 0;
+
+       /*
+        * Prove that the new dependency does not connect a reclaim-fs-safe-read
+        * lock with a reclaim-fs-unsafe lock - to achieve this we search
+        * the backwards-subgraph starting at <prev>, and the
+        * forwards-subgraph starting at <next>:
+        */
+       if (!check_usage(curr, prev, next, LOCK_USED_IN_RECLAIM_FS_READ,
+                                       LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs-read"))
+               return 0;
+
        return 1;
 }
 
        return 0;
 }
 
+static int reclaim_verbose(struct lock_class *class)
+{
+#if RECLAIM_VERBOSE
+       return class_filter(class);
+#endif
+       return 0;
+}
+
 #define STRICT_READ_CHECKS     1
 
 static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                if (softirq_verbose(hlock_class(this)))
                        ret = 2;
                break;
+       case LOCK_USED_IN_RECLAIM_FS:
+               if (!valid_state(curr, this, new_bit, LOCK_HELD_OVER_RECLAIM_FS))
+                       return 0;
+               if (!valid_state(curr, this, new_bit,
+                                LOCK_HELD_OVER_RECLAIM_FS_READ))
+                       return 0;
+               /*
+                * just marked it reclaim-fs-safe, check that this lock
+                * took no reclaim-fs-unsafe lock in the past:
+                */
+               if (!check_usage_forwards(curr, this,
+                                         LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs"))
+                       return 0;
+#if STRICT_READ_CHECKS
+               /*
+                * just marked it reclaim-fs-safe, check that this lock
+                * took no reclaim-fs-unsafe-read lock in the past:
+                */
+               if (!check_usage_forwards(curr, this,
+                               LOCK_HELD_OVER_RECLAIM_FS_READ, "reclaim-fs-read"))
+                       return 0;
+#endif
+               if (reclaim_verbose(hlock_class(this)))
+                       ret = 2;
+               break;
        case LOCK_USED_IN_HARDIRQ_READ:
                if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
                        return 0;
                if (softirq_verbose(hlock_class(this)))
                        ret = 2;
                break;
+       case LOCK_USED_IN_RECLAIM_FS_READ:
+               if (!valid_state(curr, this, new_bit, LOCK_HELD_OVER_RECLAIM_FS))
+                       return 0;
+               /*
+                * just marked it reclaim-fs-read-safe, check that this lock
+                * took no reclaim-fs-unsafe lock in the past:
+                */
+               if (!check_usage_forwards(curr, this,
+                                         LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs"))
+                       return 0;
+               if (reclaim_verbose(hlock_class(this)))
+                       ret = 2;
+               break;
        case LOCK_ENABLED_HARDIRQS:
                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
                        return 0;
                if (softirq_verbose(hlock_class(this)))
                        ret = 2;
                break;
+       case LOCK_HELD_OVER_RECLAIM_FS:
+               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS))
+                       return 0;
+               if (!valid_state(curr, this, new_bit,
+                                LOCK_USED_IN_RECLAIM_FS_READ))
+                       return 0;
+               /*
+                * just marked it reclaim-fs-unsafe, check that no reclaim-fs-safe
+                * lock in the system ever took it in the past:
+                */
+               if (!check_usage_backwards(curr, this,
+                                          LOCK_USED_IN_RECLAIM_FS, "reclaim-fs"))
+                       return 0;
+#if STRICT_READ_CHECKS
+               /*
+                * just marked it softirq-unsafe, check that no
+                * softirq-safe-read lock in the system ever took
+                * it in the past:
+                */
+               if (!check_usage_backwards(curr, this,
+                                  LOCK_USED_IN_RECLAIM_FS_READ, "reclaim-fs-read"))
+                       return 0;
+#endif
+               if (reclaim_verbose(hlock_class(this)))
+                       ret = 2;
+               break;
        case LOCK_ENABLED_HARDIRQS_READ:
                if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
                        return 0;
                if (softirq_verbose(hlock_class(this)))
                        ret = 2;
                break;
+       case LOCK_HELD_OVER_RECLAIM_FS_READ:
+               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS))
+                       return 0;
+#if STRICT_READ_CHECKS
+               /*
+                * just marked it reclaim-fs-read-unsafe, check that no
+                * reclaim-fs-safe lock in the system ever took it in the past:
+                */
+               if (!check_usage_backwards(curr, this,
+                                          LOCK_USED_IN_RECLAIM_FS, "reclaim-fs"))
+                       return 0;
+#endif
+               if (reclaim_verbose(hlock_class(this)))
+                       ret = 2;
+               break;
        default:
                WARN_ON(1);
                break;
        return ret;
 }
 
+enum mark_type {
+       HARDIRQ,
+       SOFTIRQ,
+       RECLAIM_FS,
+};
+
 /*
  * Mark all held locks with a usage bit:
  */
 static int
-mark_held_locks(struct task_struct *curr, int hardirq)
+mark_held_locks(struct task_struct *curr, enum mark_type mark)
 {
        enum lock_usage_bit usage_bit;
        struct held_lock *hlock;
        for (i = 0; i < curr->lockdep_depth; i++) {
                hlock = curr->held_locks + i;
 
-               if (hardirq) {
+               switch (mark) {
+               case HARDIRQ:
                        if (hlock->read)
                                usage_bit = LOCK_ENABLED_HARDIRQS_READ;
                        else
                                usage_bit = LOCK_ENABLED_HARDIRQS;
-               } else {
+                       break;
+
+               case SOFTIRQ:
                        if (hlock->read)
                                usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
                        else
                                usage_bit = LOCK_ENABLED_SOFTIRQS;
+                       break;
+
+               case RECLAIM_FS:
+                       if (hlock->read)
+                               usage_bit = LOCK_HELD_OVER_RECLAIM_FS_READ;
+                       else
+                               usage_bit = LOCK_HELD_OVER_RECLAIM_FS;
+                       break;
+
+               default:
+                       BUG();
                }
+
                if (!mark_lock(curr, hlock, usage_bit))
                        return 0;
        }
         * We are going to turn hardirqs on, so set the
         * usage bit for all held locks:
         */
-       if (!mark_held_locks(curr, 1))
+       if (!mark_held_locks(curr, HARDIRQ))
                return;
        /*
         * If we have softirqs enabled, then set the usage
         * this bit from being set before)
         */
        if (curr->softirqs_enabled)
-               if (!mark_held_locks(curr, 0))
+               if (!mark_held_locks(curr, SOFTIRQ))
                        return;
 
        curr->hardirq_enable_ip = ip;
         * enabled too:
         */
        if (curr->hardirqs_enabled)
-               mark_held_locks(curr, 0);
+               mark_held_locks(curr, SOFTIRQ);
 }
 
 /*
                debug_atomic_inc(&redundant_softirqs_off);
 }
 
+void lockdep_trace_alloc(gfp_t gfp_mask)
+{
+       struct task_struct *curr = current;
+
+       if (unlikely(!debug_locks))
+               return;
+
+       /* no reclaim without waiting on it */
+       if (!(gfp_mask & __GFP_WAIT))
+               return;
+
+       /* this guy won't enter reclaim */
+       if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
+               return;
+
+       /* We're only interested __GFP_FS allocations for now */
+       if (!(gfp_mask & __GFP_FS))
+               return;
+
+       if (DEBUG_LOCKS_WARN_ON(irqs_disabled()))
+               return;
+
+       mark_held_locks(curr, RECLAIM_FS);
+}
+
 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
 {
        /*
                }
        }
 
+       /*
+        * We reuse the irq context infrastructure more broadly as a general
+        * context checking code. This tests GFP_FS recursion (a lock taken
+        * during reclaim for a GFP_FS allocation is held over a GFP_FS
+        * allocation).
+        */
+       if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
+               if (hlock->read) {
+                       if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
+                                       return 0;
+               } else {
+                       if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
+                                       return 0;
+               }
+       }
+
        return 1;
 }
 
        case LOCK_ENABLED_SOFTIRQS:
        case LOCK_ENABLED_HARDIRQS_READ:
        case LOCK_ENABLED_SOFTIRQS_READ:
+       case LOCK_USED_IN_RECLAIM_FS:
+       case LOCK_USED_IN_RECLAIM_FS_READ:
+       case LOCK_HELD_OVER_RECLAIM_FS:
+       case LOCK_HELD_OVER_RECLAIM_FS_READ:
                ret = mark_lock_irq(curr, this, new_bit);
                if (!ret)
                        return 0;
 }
 EXPORT_SYMBOL_GPL(lock_release);
 
+void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
+{
+       current->lockdep_reclaim_gfp = gfp_mask;
+}
+
+void lockdep_clear_current_reclaim_state(void)
+{
+       current->lockdep_reclaim_gfp = 0;
+}
+
 #ifdef CONFIG_LOCK_STAT
 static int
 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,