bdaddr_t bdaddr;
 } __packed;
 
+#define HCI_OP_READ_DATA_BLOCK_SIZE    0x100a
+struct hci_rp_read_data_block_size {
+       __u8     status;
+       __le16   max_acl_len;
+       __le16   block_len;
+       __le16   num_blocks;
+} __packed;
+
 #define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY        0x0c1c
 struct hci_cp_write_page_scan_activity {
        __le16   interval;
 
        hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
 }
 
+static void hci_cc_read_data_block_size(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       struct hci_rp_read_data_block_size *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       if (rp->status)
+               return;
+
+       hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
+       hdev->block_len = __le16_to_cpu(rp->block_len);
+       hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
+
+       hdev->block_cnt = hdev->num_blocks;
+
+       BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
+                                       hdev->block_cnt, hdev->block_len);
+
+       hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
+}
+
 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
 {
        __u8 status = *((__u8 *) skb->data);
                hci_cc_read_bd_addr(hdev, skb);
                break;
 
+       case HCI_OP_READ_DATA_BLOCK_SIZE:
+               hci_cc_read_data_block_size(hdev, skb);
+               break;
+
        case HCI_OP_WRITE_CA_TIMEOUT:
                hci_cc_write_ca_timeout(hdev, skb);
                break;