read_unlock(&hci_sk_list.lock);
 }
 
+static void queue_monitor_skb(struct sk_buff *skb)
+{
+       struct sock *sk;
+
+       BT_DBG("len %d", skb->len);
+
+       read_lock(&hci_sk_list.lock);
+
+       sk_for_each(sk, &hci_sk_list.head) {
+               struct sk_buff *nskb;
+
+               if (sk->sk_state != BT_BOUND)
+                       continue;
+
+               if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
+                       continue;
+
+               nskb = skb_clone(skb, GFP_ATOMIC);
+               if (!nskb)
+                       continue;
+
+               if (sock_queue_rcv_skb(sk, nskb))
+                       kfree_skb(nskb);
+       }
+
+       read_unlock(&hci_sk_list.lock);
+}
+
 /* Send frame to monitor socket */
 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       struct sock *sk;
        struct sk_buff *skb_copy = NULL;
        struct hci_mon_hdr *hdr;
        __le16 opcode;
        hdr->index = cpu_to_le16(hdev->id);
        hdr->len = cpu_to_le16(skb->len);
 
-       read_lock(&hci_sk_list.lock);
-
-       sk_for_each(sk, &hci_sk_list.head) {
-               struct sk_buff *nskb;
-
-               if (sk->sk_state != BT_BOUND)
-                       continue;
-
-               if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
-                       continue;
-
-               nskb = skb_clone(skb_copy, GFP_ATOMIC);
-               if (!nskb)
-                       continue;
-
-               if (sock_queue_rcv_skb(sk, nskb))
-                       kfree_skb(nskb);
-       }
-
-       read_unlock(&hci_sk_list.lock);
-
+       queue_monitor_skb(skb_copy);
        kfree_skb(skb_copy);
 }