#define unix_state_rlock(s)    spin_lock(&unix_sk(s)->lock)
 #define unix_state_runlock(s)  spin_unlock(&unix_sk(s)->lock)
 #define unix_state_wlock(s)    spin_lock(&unix_sk(s)->lock)
+#define unix_state_wlock_nested(s) \
+                               spin_lock_nested(&unix_sk(s)->lock, \
+                               SINGLE_DEPTH_NESTING)
 #define unix_state_wunlock(s)  spin_unlock(&unix_sk(s)->lock)
 
 #ifdef __KERNEL__
 
        .obj_size = sizeof(struct unix_sock),
 };
 
+/*
+ * AF_UNIX sockets do not interact with hardware, hence they
+ * dont trigger interrupts - so it's safe for them to have
+ * bh-unsafe locking for their sk_receive_queue.lock. Split off
+ * this special lock-class by reinitializing the spinlock key:
+ */
+static struct lock_class_key af_unix_sk_receive_queue_lock_key;
+
 static struct sock * unix_create1(struct socket *sock)
 {
        struct sock *sk = NULL;
        atomic_inc(&unix_nr_socks);
 
        sock_init_data(sock,sk);
+       lockdep_set_class(&sk->sk_receive_queue.lock,
+                               &af_unix_sk_receive_queue_lock_key);
 
        sk->sk_write_space      = unix_write_space;
        sk->sk_max_ack_backlog  = sysctl_unix_max_dgram_qlen;
                goto out_unlock;
        }
 
-       unix_state_wlock(sk);
+       unix_state_wlock_nested(sk);
 
        if (sk->sk_state != st) {
                unix_state_wunlock(sk);