struct rw_semaphore     rw_sem; /* slowpath */
        struct rcuwait          writer; /* blocked writer */
        int                     readers_block;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
 };
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)  .dep_map = { .name = #lockname },
+#else
+#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
 #define __DEFINE_PERCPU_RWSEM(name, is_static)                         \
 static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name);         \
 is_static struct percpu_rw_semaphore name = {                          \
        .read_count = &__percpu_rwsem_rc_##name,                        \
        .rw_sem = __RWSEM_INITIALIZER(name.rw_sem),                     \
        .writer = __RCUWAIT_INITIALIZER(name.writer),                   \
+       __PERCPU_RWSEM_DEP_MAP_INIT(name)                               \
 }
+
 #define DEFINE_PERCPU_RWSEM(name)              \
        __DEFINE_PERCPU_RWSEM(name, /* not static */)
 #define DEFINE_STATIC_PERCPU_RWSEM(name)       \
 {
        might_sleep();
 
-       rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
+       rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
 
        preempt_disable();
        /*
         */
 
        if (ret)
-               rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
+               rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
 
        return ret;
 }
 
 static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
 {
+       rwsem_release(&sem->dep_map, _RET_IP_);
+
        preempt_disable();
        /*
         * Same as in percpu_down_read().
        else
                __percpu_up_read(sem); /* Unconditional memory barrier */
        preempt_enable();
-
-       rwsem_release(&sem->rw_sem.dep_map, _RET_IP_);
 }
 
 extern void percpu_down_write(struct percpu_rw_semaphore *);
        __percpu_init_rwsem(sem, #sem, &rwsem_key);             \
 })
 
-#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
-
-#define percpu_rwsem_assert_held(sem)                          \
-       lockdep_assert_held(&(sem)->rw_sem)
+#define percpu_rwsem_is_held(sem)      lockdep_is_held(sem)
+#define percpu_rwsem_assert_held(sem)  lockdep_assert_held(sem)
 
 static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
                                        bool read, unsigned long ip)
 {
-       lock_release(&sem->rw_sem.dep_map, ip);
+       lock_release(&sem->dep_map, ip);
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
        if (!read)
                atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN);
 static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
                                        bool read, unsigned long ip)
 {
-       lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
+       lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip);
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
        if (!read)
                atomic_long_set(&sem->rw_sem.owner, (long)current);
 
 #include "rwsem.h"
 
 int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
-                       const char *name, struct lock_class_key *rwsem_key)
+                       const char *name, struct lock_class_key *key)
 {
        sem->read_count = alloc_percpu(int);
        if (unlikely(!sem->read_count))
 
        /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
        rcu_sync_init(&sem->rss);
-       __init_rwsem(&sem->rw_sem, name, rwsem_key);
+       init_rwsem(&sem->rw_sem);
        rcuwait_init(&sem->writer);
        sem->readers_block = 0;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       debug_check_no_locks_freed((void *)sem, sizeof(*sem));
+       lockdep_init_map(&sem->dep_map, name, key, 0);
+#endif
        return 0;
 }
 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
 
 void percpu_down_write(struct percpu_rw_semaphore *sem)
 {
+       rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
+
        /* Notify readers to take the slow path. */
        rcu_sync_enter(&sem->rss);
 
-       down_write(&sem->rw_sem);
+       __down_write(&sem->rw_sem);
 
        /*
         * Notify new readers to block; up until now, and thus throughout the
 
 void percpu_up_write(struct percpu_rw_semaphore *sem)
 {
+       rwsem_release(&sem->dep_map, _RET_IP_);
+
        /*
         * Signal the writer is done, no fast path yet.
         *
        /*
         * Release the write lock, this will allow readers back in the game.
         */
-       up_write(&sem->rw_sem);
+       __up_write(&sem->rw_sem);
 
        /*
         * Once this completes (at least one RCU-sched grace period hence) the