@@ -302,6 +302,13 @@ struct amp_assoc {
#define HCI_MAX_PAGES 3
+typedef void (*cmd_sync_work_func_t)(struct hci_dev *hdev);
+
+struct cmd_sync_work_entry {
+ struct list_head list;
+ cmd_sync_work_func_t func;
+};
+
struct hci_dev {
struct list_head list;
struct mutex lock;
@@ -463,6 +470,9 @@ struct hci_dev {
struct work_struct power_on;
struct delayed_work power_off;
struct work_struct error_reset;
+ struct work_struct cmd_sync_work;
+ struct list_head cmd_sync_work_list;
+ struct mutex cmd_sync_work_lock;
__u16 discov_timeout;
struct delayed_work discov_off;
@@ -1687,6 +1697,8 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
+void hci_cmd_sync_queue(struct hci_dev *hdev, cmd_sync_work_func_t func);
+
u32 hci_conn_get_phy(struct hci_conn *conn);
/* ----- HCI Sockets ----- */
@@ -2325,6 +2325,50 @@ static void hci_error_reset(struct work_struct *work)
hci_dev_do_open(hdev);
}
+static void hci_cmd_sync_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
+ struct cmd_sync_work_entry *entry;
+ cmd_sync_work_func_t func;
+
+ bt_dev_dbg(hdev, "");
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ entry = list_first_entry(&hdev->cmd_sync_work_list,
+ struct cmd_sync_work_entry, list);
+ if (entry) {
+ list_del(&entry->list);
+ func = entry->func;
+ kfree(entry);
+ } else {
+ func = NULL;
+ }
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ if (func) {
+ hci_req_sync_lock(hdev);
+ func(hdev);
+ hci_req_sync_unlock(hdev);
+ }
+}
+
+void hci_cmd_sync_queue(struct hci_dev *hdev, cmd_sync_work_func_t func)
+{
+ struct cmd_sync_work_entry *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ entry->func = func;
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
+}
+
void hci_uuids_clear(struct hci_dev *hdev)
{
struct bt_uuid *uuid, *tmp;
@@ -3821,6 +3865,10 @@ struct hci_dev *hci_alloc_dev(void)
INIT_WORK(&hdev->error_reset, hci_error_reset);
INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
+ INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
+ INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
+ mutex_init(&hdev->cmd_sync_work_lock);
+
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
skb_queue_head_init(&hdev->rx_q);
The usage of __hci_cmd_sync() within the hdev->setup() callback allows for a nice and simple serialized execution of HCI commands. More importantly it allows for result processing before issueing the next command. With the current usage of hci_req_run() it is possible to batch up commands and execute them, but it is impossible to react to their results or errors. This is an attempt to generalize the hdev->setup() handling and provide a simple way of running multiple HCI commands from a single function context. There are multiple struct work that are decdicated to certain tasks already used right now. It is add a lot of bloat to hci_dev struct and extra handling code. So it might be possible to put all of these behind a common HCI command infrastructure and just execute the HCI commands from the same work context in a serialized fashion. For example updating the white list and resolving list can be done now without having to know the list size ahead of time. Also preparing for suspend or resume shouldn't require a state machine anymore. There are other tasks that should be simplified as well. Signed-off-by: Marcel Holtmann <marcel@holtmann.org> --- include/net/bluetooth/hci_core.h | 12 ++++++++ net/bluetooth/hci_core.c | 48 ++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+)