@@ -41,6 +41,13 @@ config BT_HCIBTUSB_AUTOSUSPEND
This can be overridden by passing btusb.enable_autosuspend=[y|n]
on the kernel commandline.
+config BT_HCIBTUSB_INTERVAL
+ bool "Enable notification of USB polling interval"
+ depends on BT_HCIBTUSB
+ help
+ Say Y here to enable notification of USB polling interval for
+ Bluetooth USB devices by default.
+
config BT_HCIBTUSB_BCM
bool "Broadcom protocol support"
depends on BT_HCIBTUSB
@@ -30,7 +30,7 @@
static bool disable_scofix;
static bool force_scofix;
static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
-
+static bool enable_interval = IS_ENABLED(CONFIG_BT_HCIBTUSB_INTERVAL);
static bool reset = true;
static struct usb_driver btusb_driver;
@@ -711,7 +711,14 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
if (!hci_skb_expect(skb)) {
/* Complete frame */
- hci_recv_frame(data->hdev, skb);
+ if (enable_interval)
+ /* TODO: Calculate polling interval based on
+ * endpoint bInterval?
+ */
+ hci_recv_acl(data->hdev, skb,
+ msecs_to_jiffies(1));
+ else
+ hci_recv_frame(data->hdev, skb);
skb = NULL;
}
}
@@ -3866,7 +3873,7 @@ static int btusb_probe(struct usb_interface *intf,
data->recv_bulk = btusb_recv_bulk_intel;
set_bit(BTUSB_BOOTLOADER, &data->flags);
} else {
- data->recv_event = hci_recv_frame;
+ data->recv_event = hci_recv_evt;
data->recv_bulk = btusb_recv_bulk;
}
@@ -4335,6 +4342,9 @@ MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size");
module_param(enable_autosuspend, bool, 0644);
MODULE_PARM_DESC(enable_autosuspend, "Enable USB autosuspend by default");
+module_param(enable_interval, bool, 0644);
+MODULE_PARM_DESC(enable_interval, "Enable USB polling interval by default");
+
module_param(reset, bool, 0644);
MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
@@ -444,7 +444,7 @@ struct hci_dev {
struct delayed_work cmd_timer;
- struct work_struct rx_work;
+ struct delayed_work rx_work;
struct work_struct cmd_work;
struct work_struct tx_work;
@@ -457,6 +457,7 @@ struct hci_dev {
struct delayed_work le_scan_restart;
struct sk_buff_head rx_q;
+ struct sk_buff_head ev_q;
struct sk_buff_head raw_q;
struct sk_buff_head cmd_q;
@@ -1190,6 +1191,9 @@ int hci_suspend_dev(struct hci_dev *hdev);
int hci_resume_dev(struct hci_dev *hdev);
int hci_reset_dev(struct hci_dev *hdev);
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
+int hci_recv_acl(struct hci_dev *hdev, struct sk_buff *skb,
+ unsigned int interval);
+int hci_recv_evt(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
__printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
__printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
@@ -1591,10 +1591,11 @@ static int hci_dev_do_open(struct hci_dev *hdev)
/* Init failed, cleanup */
flush_work(&hdev->tx_work);
flush_work(&hdev->cmd_work);
- flush_work(&hdev->rx_work);
+ cancel_delayed_work(&hdev->rx_work);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q);
+ skb_queue_purge(&hdev->ev_q);
if (hdev->flush)
hdev->flush(hdev);
@@ -1719,7 +1720,7 @@ int hci_dev_do_close(struct hci_dev *hdev)
/* Flush RX and TX works */
flush_work(&hdev->tx_work);
- flush_work(&hdev->rx_work);
+ cancel_delayed_work(&hdev->rx_work);
if (hdev->discov_timeout > 0) {
hdev->discov_timeout = 0;
@@ -1784,6 +1785,7 @@ int hci_dev_do_close(struct hci_dev *hdev)
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
+ skb_queue_purge(&hdev->ev_q);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->raw_q);
@@ -1855,6 +1857,7 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
+ skb_queue_purge(&hdev->ev_q);
skb_queue_purge(&hdev->cmd_q);
/* Avoid potential lockdep warnings from the *_flush() calls by
@@ -3603,16 +3606,17 @@ struct hci_dev *hci_alloc_dev(void)
INIT_LIST_HEAD(&hdev->adv_instances);
INIT_LIST_HEAD(&hdev->blocked_keys);
- INIT_WORK(&hdev->rx_work, hci_rx_work);
INIT_WORK(&hdev->cmd_work, hci_cmd_work);
INIT_WORK(&hdev->tx_work, hci_tx_work);
INIT_WORK(&hdev->power_on, hci_power_on);
INIT_WORK(&hdev->error_reset, hci_error_reset);
INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
+ INIT_DELAYED_WORK(&hdev->rx_work, hci_rx_work);
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
skb_queue_head_init(&hdev->rx_q);
+ skb_queue_head_init(&hdev->ev_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
@@ -3856,8 +3860,8 @@ int hci_reset_dev(struct hci_dev *hdev)
}
EXPORT_SYMBOL(hci_reset_dev);
-/* Receive frame from HCI drivers */
-int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
+static int hci_rx_queue(struct hci_dev *hdev, struct sk_buff *skb,
+ struct sk_buff_head *queue, unsigned int interval)
{
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
&& !test_bit(HCI_INIT, &hdev->flags))) {
@@ -3879,13 +3883,35 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
/* Time stamp */
__net_timestamp(skb);
- skb_queue_tail(&hdev->rx_q, skb);
- queue_work(hdev->workqueue, &hdev->rx_work);
+ skb_queue_tail(queue, skb);
+
+ queue_delayed_work(hdev->workqueue, &hdev->rx_work, interval);
return 0;
}
+
+/* Receive frame from HCI drivers */
+int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ return hci_rx_queue(hdev, skb, &hdev->rx_q, 0);
+}
EXPORT_SYMBOL(hci_recv_frame);
+/* Receive ACL frame from HCI drivers */
+int hci_recv_acl(struct hci_dev *hdev, struct sk_buff *skb,
+ unsigned int interval)
+{
+ return hci_rx_queue(hdev, skb, &hdev->rx_q, interval);
+}
+EXPORT_SYMBOL(hci_recv_acl);
+
+/* Receive Event frame from HCI drivers */
+int hci_recv_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ return hci_rx_queue(hdev, skb, &hdev->ev_q, 0);
+}
+EXPORT_SYMBOL(hci_recv_evt);
+
/* Receive diagnostic message from HCI drivers */
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
{
@@ -3896,7 +3922,7 @@ int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
__net_timestamp(skb);
skb_queue_tail(&hdev->rx_q, skb);
- queue_work(hdev->workqueue, &hdev->rx_work);
+ queue_delayed_work(hdev->workqueue, &hdev->rx_work, 0);
return 0;
}
@@ -4825,14 +4851,11 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
}
-static void hci_rx_work(struct work_struct *work)
+static void hci_rx_dequeue(struct hci_dev *hdev, struct sk_buff_head *queue)
{
- struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
struct sk_buff *skb;
- BT_DBG("%s", hdev->name);
-
- while ((skb = skb_dequeue(&hdev->rx_q))) {
+ while ((skb = skb_dequeue(queue))) {
/* Send copy to monitor */
hci_send_to_monitor(hdev, skb);
@@ -4888,6 +4911,17 @@ static void hci_rx_work(struct work_struct *work)
}
}
+static void hci_rx_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work.work);
+
+ BT_DBG("%s", hdev->name);
+
+ /* Process HCI event packets so states changes are synchronized first */
+ hci_rx_dequeue(hdev, &hdev->ev_q);
+ hci_rx_dequeue(hdev, &hdev->rx_q);
+}
+
static void hci_cmd_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);