1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <linux/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48 #include "hci_codec.h"
49
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
53
54 /* HCI device list */
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
57
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
61
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
64
65 /* Get HCI device by index.
66 * Device is held on return. */
__hci_dev_get(int index,int * srcu_index)67 static struct hci_dev *__hci_dev_get(int index, int *srcu_index)
68 {
69 struct hci_dev *hdev = NULL, *d;
70
71 BT_DBG("%d", index);
72
73 if (index < 0)
74 return NULL;
75
76 read_lock(&hci_dev_list_lock);
77 list_for_each_entry(d, &hci_dev_list, list) {
78 if (d->id == index) {
79 hdev = hci_dev_hold(d);
80 if (srcu_index)
81 *srcu_index = srcu_read_lock(&d->srcu);
82 break;
83 }
84 }
85 read_unlock(&hci_dev_list_lock);
86 return hdev;
87 }
88
hci_dev_get(int index)89 struct hci_dev *hci_dev_get(int index)
90 {
91 return __hci_dev_get(index, NULL);
92 }
93
hci_dev_get_srcu(int index,int * srcu_index)94 static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index)
95 {
96 return __hci_dev_get(index, srcu_index);
97 }
98
hci_dev_put_srcu(struct hci_dev * hdev,int srcu_index)99 static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index)
100 {
101 srcu_read_unlock(&hdev->srcu, srcu_index);
102 hci_dev_put(hdev);
103 }
104
105 /* ---- Inquiry support ---- */
106
hci_discovery_active(struct hci_dev * hdev)107 bool hci_discovery_active(struct hci_dev *hdev)
108 {
109 struct discovery_state *discov = &hdev->discovery;
110
111 switch (discov->state) {
112 case DISCOVERY_FINDING:
113 case DISCOVERY_RESOLVING:
114 return true;
115
116 default:
117 return false;
118 }
119 }
120 EXPORT_SYMBOL(hci_discovery_active);
121
hci_discovery_set_state(struct hci_dev * hdev,int state)122 void hci_discovery_set_state(struct hci_dev *hdev, int state)
123 {
124 int old_state = hdev->discovery.state;
125
126 if (old_state == state)
127 return;
128
129 hdev->discovery.state = state;
130
131 switch (state) {
132 case DISCOVERY_STOPPED:
133 hci_update_passive_scan(hdev);
134
135 if (old_state != DISCOVERY_STARTING)
136 mgmt_discovering(hdev, 0);
137 break;
138 case DISCOVERY_STARTING:
139 break;
140 case DISCOVERY_FINDING:
141 mgmt_discovering(hdev, 1);
142 break;
143 case DISCOVERY_RESOLVING:
144 break;
145 case DISCOVERY_STOPPING:
146 break;
147 }
148
149 bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
150 }
151
hci_inquiry_cache_flush(struct hci_dev * hdev)152 void hci_inquiry_cache_flush(struct hci_dev *hdev)
153 {
154 struct discovery_state *cache = &hdev->discovery;
155 struct inquiry_entry *p, *n;
156
157 list_for_each_entry_safe(p, n, &cache->all, all) {
158 list_del(&p->all);
159 kfree(p);
160 }
161
162 INIT_LIST_HEAD(&cache->unknown);
163 INIT_LIST_HEAD(&cache->resolve);
164 }
165
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)166 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
167 bdaddr_t *bdaddr)
168 {
169 struct discovery_state *cache = &hdev->discovery;
170 struct inquiry_entry *e;
171
172 BT_DBG("cache %p, %pMR", cache, bdaddr);
173
174 list_for_each_entry(e, &cache->all, all) {
175 if (!bacmp(&e->data.bdaddr, bdaddr))
176 return e;
177 }
178
179 return NULL;
180 }
181
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)182 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
183 bdaddr_t *bdaddr)
184 {
185 struct discovery_state *cache = &hdev->discovery;
186 struct inquiry_entry *e;
187
188 BT_DBG("cache %p, %pMR", cache, bdaddr);
189
190 list_for_each_entry(e, &cache->unknown, list) {
191 if (!bacmp(&e->data.bdaddr, bdaddr))
192 return e;
193 }
194
195 return NULL;
196 }
197
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)198 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
199 bdaddr_t *bdaddr,
200 int state)
201 {
202 struct discovery_state *cache = &hdev->discovery;
203 struct inquiry_entry *e;
204
205 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
206
207 list_for_each_entry(e, &cache->resolve, list) {
208 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
209 return e;
210 if (!bacmp(&e->data.bdaddr, bdaddr))
211 return e;
212 }
213
214 return NULL;
215 }
216
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)217 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
218 struct inquiry_entry *ie)
219 {
220 struct discovery_state *cache = &hdev->discovery;
221 struct list_head *pos = &cache->resolve;
222 struct inquiry_entry *p;
223
224 list_del(&ie->list);
225
226 list_for_each_entry(p, &cache->resolve, list) {
227 if (p->name_state != NAME_PENDING &&
228 abs(p->data.rssi) >= abs(ie->data.rssi))
229 break;
230 pos = &p->list;
231 }
232
233 list_add(&ie->list, pos);
234 }
235
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)236 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
237 bool name_known)
238 {
239 struct discovery_state *cache = &hdev->discovery;
240 struct inquiry_entry *ie;
241 u32 flags = 0;
242
243 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
244
245 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
246
247 if (!data->ssp_mode)
248 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
249
250 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
251 if (ie) {
252 if (!ie->data.ssp_mode)
253 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
254
255 if (ie->name_state == NAME_NEEDED &&
256 data->rssi != ie->data.rssi) {
257 ie->data.rssi = data->rssi;
258 hci_inquiry_cache_update_resolve(hdev, ie);
259 }
260
261 goto update;
262 }
263
264 /* Entry not in the cache. Add new one. */
265 ie = kzalloc_obj(*ie);
266 if (!ie) {
267 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
268 goto done;
269 }
270
271 list_add(&ie->all, &cache->all);
272
273 if (name_known) {
274 ie->name_state = NAME_KNOWN;
275 } else {
276 ie->name_state = NAME_NOT_KNOWN;
277 list_add(&ie->list, &cache->unknown);
278 }
279
280 update:
281 if (name_known && ie->name_state != NAME_KNOWN &&
282 ie->name_state != NAME_PENDING) {
283 ie->name_state = NAME_KNOWN;
284 list_del(&ie->list);
285 }
286
287 memcpy(&ie->data, data, sizeof(*data));
288 ie->timestamp = jiffies;
289 cache->timestamp = jiffies;
290
291 if (ie->name_state == NAME_NOT_KNOWN)
292 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
293
294 done:
295 return flags;
296 }
297
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)298 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
299 {
300 struct discovery_state *cache = &hdev->discovery;
301 struct inquiry_info *info = (struct inquiry_info *) buf;
302 struct inquiry_entry *e;
303 int copied = 0;
304
305 list_for_each_entry(e, &cache->all, all) {
306 struct inquiry_data *data = &e->data;
307
308 if (copied >= num)
309 break;
310
311 bacpy(&info->bdaddr, &data->bdaddr);
312 info->pscan_rep_mode = data->pscan_rep_mode;
313 info->pscan_period_mode = data->pscan_period_mode;
314 info->pscan_mode = data->pscan_mode;
315 memcpy(info->dev_class, data->dev_class, 3);
316 info->clock_offset = data->clock_offset;
317
318 info++;
319 copied++;
320 }
321
322 BT_DBG("cache %p, copied %d", cache, copied);
323 return copied;
324 }
325
hci_inquiry(void __user * arg)326 int hci_inquiry(void __user *arg)
327 {
328 __u8 __user *ptr = arg;
329 struct hci_inquiry_req ir;
330 struct hci_dev *hdev;
331 int err = 0, do_inquiry = 0, max_rsp;
332 __u8 *buf;
333
334 if (copy_from_user(&ir, ptr, sizeof(ir)))
335 return -EFAULT;
336
337 hdev = hci_dev_get(ir.dev_id);
338 if (!hdev)
339 return -ENODEV;
340
341 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
342 err = -EBUSY;
343 goto done;
344 }
345
346 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
347 err = -EOPNOTSUPP;
348 goto done;
349 }
350
351 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
352 err = -EOPNOTSUPP;
353 goto done;
354 }
355
356 /* Restrict maximum inquiry length to 60 seconds */
357 if (ir.length > 60) {
358 err = -EINVAL;
359 goto done;
360 }
361
362 hci_dev_lock(hdev);
363 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
364 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
365 hci_inquiry_cache_flush(hdev);
366 do_inquiry = 1;
367 }
368 hci_dev_unlock(hdev);
369
370 if (do_inquiry) {
371 hci_req_sync_lock(hdev);
372 err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
373 hci_req_sync_unlock(hdev);
374
375 if (err < 0)
376 goto done;
377
378 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
379 * cleared). If it is interrupted by a signal, return -EINTR.
380 */
381 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
382 TASK_INTERRUPTIBLE)) {
383 err = -EINTR;
384 goto done;
385 }
386 }
387
388 /* for unlimited number of responses we will use buffer with
389 * 255 entries
390 */
391 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
392
393 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
394 * copy it to the user space.
395 */
396 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
397 if (!buf) {
398 err = -ENOMEM;
399 goto done;
400 }
401
402 hci_dev_lock(hdev);
403 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
404 hci_dev_unlock(hdev);
405
406 BT_DBG("num_rsp %d", ir.num_rsp);
407
408 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
409 ptr += sizeof(ir);
410 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
411 ir.num_rsp))
412 err = -EFAULT;
413 } else
414 err = -EFAULT;
415
416 kfree(buf);
417
418 done:
419 hci_dev_put(hdev);
420 return err;
421 }
422
hci_dev_do_open(struct hci_dev * hdev)423 static int hci_dev_do_open(struct hci_dev *hdev)
424 {
425 int ret = 0;
426
427 BT_DBG("%s %p", hdev->name, hdev);
428
429 hci_req_sync_lock(hdev);
430
431 ret = hci_dev_open_sync(hdev);
432
433 hci_req_sync_unlock(hdev);
434 return ret;
435 }
436
437 /* ---- HCI ioctl helpers ---- */
438
hci_dev_open(__u16 dev)439 int hci_dev_open(__u16 dev)
440 {
441 struct hci_dev *hdev;
442 int err;
443
444 hdev = hci_dev_get(dev);
445 if (!hdev)
446 return -ENODEV;
447
448 /* Devices that are marked as unconfigured can only be powered
449 * up as user channel. Trying to bring them up as normal devices
450 * will result into a failure. Only user channel operation is
451 * possible.
452 *
453 * When this function is called for a user channel, the flag
454 * HCI_USER_CHANNEL will be set first before attempting to
455 * open the device.
456 */
457 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
458 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
459 err = -EOPNOTSUPP;
460 goto done;
461 }
462
463 /* We need to ensure that no other power on/off work is pending
464 * before proceeding to call hci_dev_do_open. This is
465 * particularly important if the setup procedure has not yet
466 * completed.
467 */
468 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
469 cancel_delayed_work(&hdev->power_off);
470
471 /* After this call it is guaranteed that the setup procedure
472 * has finished. This means that error conditions like RFKILL
473 * or no valid public or static random address apply.
474 */
475 flush_workqueue(hdev->req_workqueue);
476
477 /* For controllers not using the management interface and that
478 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
479 * so that pairing works for them. Once the management interface
480 * is in use this bit will be cleared again and userspace has
481 * to explicitly enable it.
482 */
483 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
484 !hci_dev_test_flag(hdev, HCI_MGMT))
485 hci_dev_set_flag(hdev, HCI_BONDABLE);
486
487 err = hci_dev_do_open(hdev);
488
489 done:
490 hci_dev_put(hdev);
491 return err;
492 }
493
hci_dev_do_close(struct hci_dev * hdev)494 int hci_dev_do_close(struct hci_dev *hdev)
495 {
496 int err;
497
498 BT_DBG("%s %p", hdev->name, hdev);
499
500 hci_req_sync_lock(hdev);
501
502 err = hci_dev_close_sync(hdev);
503
504 hci_req_sync_unlock(hdev);
505
506 return err;
507 }
508
hci_dev_close(__u16 dev)509 int hci_dev_close(__u16 dev)
510 {
511 struct hci_dev *hdev;
512 int err;
513
514 hdev = hci_dev_get(dev);
515 if (!hdev)
516 return -ENODEV;
517
518 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
519 err = -EBUSY;
520 goto done;
521 }
522
523 cancel_work_sync(&hdev->power_on);
524 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
525 cancel_delayed_work(&hdev->power_off);
526
527 err = hci_dev_do_close(hdev);
528
529 done:
530 hci_dev_put(hdev);
531 return err;
532 }
533
hci_dev_do_reset(struct hci_dev * hdev)534 static int hci_dev_do_reset(struct hci_dev *hdev)
535 {
536 int ret;
537
538 BT_DBG("%s %p", hdev->name, hdev);
539
540 hci_req_sync_lock(hdev);
541
542 /* Drop queues */
543 skb_queue_purge(&hdev->rx_q);
544 skb_queue_purge(&hdev->cmd_q);
545
546 /* Cancel these to avoid queueing non-chained pending work */
547 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
548 /* Wait for
549 *
550 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
551 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
552 *
553 * inside RCU section to see the flag or complete scheduling.
554 */
555 synchronize_rcu();
556 /* Explicitly cancel works in case scheduled after setting the flag. */
557 cancel_delayed_work(&hdev->cmd_timer);
558 cancel_delayed_work(&hdev->ncmd_timer);
559
560 /* Avoid potential lockdep warnings from the *_flush() calls by
561 * ensuring the workqueue is empty up front.
562 */
563 drain_workqueue(hdev->workqueue);
564
565 hci_dev_lock(hdev);
566 hci_inquiry_cache_flush(hdev);
567 hci_conn_hash_flush(hdev);
568 hci_dev_unlock(hdev);
569
570 if (hdev->flush)
571 hdev->flush(hdev);
572
573 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
574
575 atomic_set(&hdev->cmd_cnt, 1);
576 hdev->acl_cnt = 0;
577 hdev->sco_cnt = 0;
578 hdev->le_cnt = 0;
579 hdev->iso_cnt = 0;
580
581 ret = hci_reset_sync(hdev);
582
583 hci_req_sync_unlock(hdev);
584 return ret;
585 }
586
hci_dev_reset(__u16 dev)587 int hci_dev_reset(__u16 dev)
588 {
589 struct hci_dev *hdev;
590 int err, srcu_index;
591
592 hdev = hci_dev_get_srcu(dev, &srcu_index);
593 if (!hdev)
594 return -ENODEV;
595
596 if (!test_bit(HCI_UP, &hdev->flags)) {
597 err = -ENETDOWN;
598 goto done;
599 }
600
601 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
602 err = -EBUSY;
603 goto done;
604 }
605
606 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
607 err = -EOPNOTSUPP;
608 goto done;
609 }
610
611 err = hci_dev_do_reset(hdev);
612
613 done:
614 hci_dev_put_srcu(hdev, srcu_index);
615 return err;
616 }
617
hci_dev_reset_stat(__u16 dev)618 int hci_dev_reset_stat(__u16 dev)
619 {
620 struct hci_dev *hdev;
621 int ret = 0;
622
623 hdev = hci_dev_get(dev);
624 if (!hdev)
625 return -ENODEV;
626
627 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
628 ret = -EBUSY;
629 goto done;
630 }
631
632 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
633 ret = -EOPNOTSUPP;
634 goto done;
635 }
636
637 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
638
639 done:
640 hci_dev_put(hdev);
641 return ret;
642 }
643
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)644 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
645 {
646 bool conn_changed, discov_changed;
647
648 BT_DBG("%s scan 0x%02x", hdev->name, scan);
649
650 if ((scan & SCAN_PAGE))
651 conn_changed = !hci_dev_test_and_set_flag(hdev,
652 HCI_CONNECTABLE);
653 else
654 conn_changed = hci_dev_test_and_clear_flag(hdev,
655 HCI_CONNECTABLE);
656
657 if ((scan & SCAN_INQUIRY)) {
658 discov_changed = !hci_dev_test_and_set_flag(hdev,
659 HCI_DISCOVERABLE);
660 } else {
661 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
662 discov_changed = hci_dev_test_and_clear_flag(hdev,
663 HCI_DISCOVERABLE);
664 }
665
666 if (!hci_dev_test_flag(hdev, HCI_MGMT))
667 return;
668
669 if (conn_changed || discov_changed) {
670 /* In case this was disabled through mgmt */
671 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
672
673 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
674 hci_update_adv_data(hdev, hdev->cur_adv_instance);
675
676 mgmt_new_settings(hdev);
677 }
678 }
679
hci_dev_cmd(unsigned int cmd,void __user * arg)680 int hci_dev_cmd(unsigned int cmd, void __user *arg)
681 {
682 struct hci_dev *hdev;
683 struct hci_dev_req dr;
684 __le16 policy;
685 int err = 0;
686
687 if (copy_from_user(&dr, arg, sizeof(dr)))
688 return -EFAULT;
689
690 hdev = hci_dev_get(dr.dev_id);
691 if (!hdev)
692 return -ENODEV;
693
694 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
695 err = -EBUSY;
696 goto done;
697 }
698
699 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
700 err = -EOPNOTSUPP;
701 goto done;
702 }
703
704 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
705 err = -EOPNOTSUPP;
706 goto done;
707 }
708
709 switch (cmd) {
710 case HCISETAUTH:
711 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
712 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
713 break;
714
715 case HCISETENCRYPT:
716 if (!lmp_encrypt_capable(hdev)) {
717 err = -EOPNOTSUPP;
718 break;
719 }
720
721 if (!test_bit(HCI_AUTH, &hdev->flags)) {
722 /* Auth must be enabled first */
723 err = hci_cmd_sync_status(hdev,
724 HCI_OP_WRITE_AUTH_ENABLE,
725 1, &dr.dev_opt,
726 HCI_CMD_TIMEOUT);
727 if (err)
728 break;
729 }
730
731 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
732 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
733 break;
734
735 case HCISETSCAN:
736 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
737 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
738
739 /* Ensure that the connectable and discoverable states
740 * get correctly modified as this was a non-mgmt change.
741 */
742 if (!err)
743 hci_update_passive_scan_state(hdev, dr.dev_opt);
744 break;
745
746 case HCISETLINKPOL:
747 policy = cpu_to_le16(dr.dev_opt);
748
749 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
750 2, &policy, HCI_CMD_TIMEOUT);
751 break;
752
753 case HCISETLINKMODE:
754 hdev->link_mode = ((__u16) dr.dev_opt) &
755 (HCI_LM_MASTER | HCI_LM_ACCEPT);
756 break;
757
758 case HCISETPTYPE:
759 if (hdev->pkt_type == (__u16) dr.dev_opt)
760 break;
761
762 hdev->pkt_type = (__u16) dr.dev_opt;
763 mgmt_phy_configuration_changed(hdev, NULL);
764 break;
765
766 case HCISETACLMTU:
767 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
768 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
769 break;
770
771 case HCISETSCOMTU:
772 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
773 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
774 break;
775
776 default:
777 err = -EINVAL;
778 break;
779 }
780
781 done:
782 hci_dev_put(hdev);
783 return err;
784 }
785
hci_get_dev_list(void __user * arg)786 int hci_get_dev_list(void __user *arg)
787 {
788 struct hci_dev *hdev;
789 struct hci_dev_list_req *dl;
790 struct hci_dev_req *dr;
791 int n = 0, err;
792 __u16 dev_num;
793
794 if (get_user(dev_num, (__u16 __user *) arg))
795 return -EFAULT;
796
797 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
798 return -EINVAL;
799
800 dl = kzalloc_flex(*dl, dev_req, dev_num);
801 if (!dl)
802 return -ENOMEM;
803
804 dl->dev_num = dev_num;
805 dr = dl->dev_req;
806
807 read_lock(&hci_dev_list_lock);
808 list_for_each_entry(hdev, &hci_dev_list, list) {
809 unsigned long flags = hdev->flags;
810
811 /* When the auto-off is configured it means the transport
812 * is running, but in that case still indicate that the
813 * device is actually down.
814 */
815 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
816 flags &= ~BIT(HCI_UP);
817
818 dr[n].dev_id = hdev->id;
819 dr[n].dev_opt = flags;
820
821 if (++n >= dev_num)
822 break;
823 }
824 read_unlock(&hci_dev_list_lock);
825
826 dl->dev_num = n;
827 err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
828 kfree(dl);
829
830 return err ? -EFAULT : 0;
831 }
832
hci_get_dev_info(void __user * arg)833 int hci_get_dev_info(void __user *arg)
834 {
835 struct hci_dev *hdev;
836 struct hci_dev_info di;
837 unsigned long flags;
838 int err = 0;
839
840 if (copy_from_user(&di, arg, sizeof(di)))
841 return -EFAULT;
842
843 hdev = hci_dev_get(di.dev_id);
844 if (!hdev)
845 return -ENODEV;
846
847 /* When the auto-off is configured it means the transport
848 * is running, but in that case still indicate that the
849 * device is actually down.
850 */
851 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
852 flags = hdev->flags & ~BIT(HCI_UP);
853 else
854 flags = hdev->flags;
855
856 strscpy(di.name, hdev->name, sizeof(di.name));
857 di.bdaddr = hdev->bdaddr;
858 di.type = (hdev->bus & 0x0f);
859 di.flags = flags;
860 di.pkt_type = hdev->pkt_type;
861 if (lmp_bredr_capable(hdev)) {
862 di.acl_mtu = hdev->acl_mtu;
863 di.acl_pkts = hdev->acl_pkts;
864 di.sco_mtu = hdev->sco_mtu;
865 di.sco_pkts = hdev->sco_pkts;
866 } else {
867 di.acl_mtu = hdev->le_mtu;
868 di.acl_pkts = hdev->le_pkts;
869 di.sco_mtu = 0;
870 di.sco_pkts = 0;
871 }
872 di.link_policy = hdev->link_policy;
873 di.link_mode = hdev->link_mode;
874
875 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876 memcpy(&di.features, &hdev->features, sizeof(di.features));
877
878 if (copy_to_user(arg, &di, sizeof(di)))
879 err = -EFAULT;
880
881 hci_dev_put(hdev);
882
883 return err;
884 }
885
886 /* ---- Interface to HCI drivers ---- */
887
hci_dev_do_poweroff(struct hci_dev * hdev)888 static int hci_dev_do_poweroff(struct hci_dev *hdev)
889 {
890 int err;
891
892 BT_DBG("%s %p", hdev->name, hdev);
893
894 hci_req_sync_lock(hdev);
895
896 err = hci_set_powered_sync(hdev, false);
897
898 hci_req_sync_unlock(hdev);
899
900 return err;
901 }
902
hci_rfkill_set_block(void * data,bool blocked)903 static int hci_rfkill_set_block(void *data, bool blocked)
904 {
905 struct hci_dev *hdev = data;
906 int err;
907
908 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
909
910 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
911 return -EBUSY;
912
913 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
914 return 0;
915
916 if (blocked) {
917 hci_dev_set_flag(hdev, HCI_RFKILLED);
918
919 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
920 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
921 err = hci_dev_do_poweroff(hdev);
922 if (err) {
923 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
924 err);
925
926 /* Make sure the device is still closed even if
927 * anything during power off sequence (eg.
928 * disconnecting devices) failed.
929 */
930 hci_dev_do_close(hdev);
931 }
932 }
933 } else {
934 hci_dev_clear_flag(hdev, HCI_RFKILLED);
935 }
936
937 return 0;
938 }
939
940 static const struct rfkill_ops hci_rfkill_ops = {
941 .set_block = hci_rfkill_set_block,
942 };
943
hci_power_on(struct work_struct * work)944 static void hci_power_on(struct work_struct *work)
945 {
946 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
947 int err;
948
949 BT_DBG("%s", hdev->name);
950
951 if (test_bit(HCI_UP, &hdev->flags) &&
952 hci_dev_test_flag(hdev, HCI_MGMT) &&
953 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
954 cancel_delayed_work(&hdev->power_off);
955 err = hci_powered_update_sync(hdev);
956 mgmt_power_on(hdev, err);
957 return;
958 }
959
960 err = hci_dev_do_open(hdev);
961 if (err < 0) {
962 hci_dev_lock(hdev);
963 mgmt_set_powered_failed(hdev, err);
964 hci_dev_unlock(hdev);
965 return;
966 }
967
968 /* During the HCI setup phase, a few error conditions are
969 * ignored and they need to be checked now. If they are still
970 * valid, it is important to turn the device back off.
971 */
972 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
973 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
974 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
975 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
976 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
977 hci_dev_do_close(hdev);
978 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
979 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
980 HCI_AUTO_OFF_TIMEOUT);
981 }
982
983 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
984 /* For unconfigured devices, set the HCI_RAW flag
985 * so that userspace can easily identify them.
986 */
987 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
988 set_bit(HCI_RAW, &hdev->flags);
989
990 /* For fully configured devices, this will send
991 * the Index Added event. For unconfigured devices,
992 * it will send Unconfigued Index Added event.
993 *
994 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
995 * and no event will be send.
996 */
997 mgmt_index_added(hdev);
998 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
999 /* When the controller is now configured, then it
1000 * is important to clear the HCI_RAW flag.
1001 */
1002 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1003 clear_bit(HCI_RAW, &hdev->flags);
1004
1005 /* Powering on the controller with HCI_CONFIG set only
1006 * happens with the transition from unconfigured to
1007 * configured. This will send the Index Added event.
1008 */
1009 mgmt_index_added(hdev);
1010 }
1011 }
1012
hci_power_off(struct work_struct * work)1013 static void hci_power_off(struct work_struct *work)
1014 {
1015 struct hci_dev *hdev = container_of(work, struct hci_dev,
1016 power_off.work);
1017
1018 BT_DBG("%s", hdev->name);
1019
1020 hci_dev_do_close(hdev);
1021 }
1022
hci_error_reset(struct work_struct * work)1023 static void hci_error_reset(struct work_struct *work)
1024 {
1025 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1026
1027 hci_dev_hold(hdev);
1028 BT_DBG("%s", hdev->name);
1029
1030 if (hdev->hw_error)
1031 hdev->hw_error(hdev, hdev->hw_error_code);
1032 else
1033 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1034
1035 if (!hci_dev_do_close(hdev))
1036 hci_dev_do_open(hdev);
1037
1038 hci_dev_put(hdev);
1039 }
1040
hci_uuids_clear(struct hci_dev * hdev)1041 void hci_uuids_clear(struct hci_dev *hdev)
1042 {
1043 struct bt_uuid *uuid, *tmp;
1044
1045 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1046 list_del(&uuid->list);
1047 kfree(uuid);
1048 }
1049 }
1050
hci_link_keys_clear(struct hci_dev * hdev)1051 void hci_link_keys_clear(struct hci_dev *hdev)
1052 {
1053 struct link_key *key, *tmp;
1054
1055 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1056 list_del_rcu(&key->list);
1057 kfree_rcu(key, rcu);
1058 }
1059 }
1060
hci_smp_ltks_clear(struct hci_dev * hdev)1061 void hci_smp_ltks_clear(struct hci_dev *hdev)
1062 {
1063 struct smp_ltk *k, *tmp;
1064
1065 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1066 list_del_rcu(&k->list);
1067 kfree_rcu(k, rcu);
1068 }
1069 }
1070
hci_smp_irks_clear(struct hci_dev * hdev)1071 void hci_smp_irks_clear(struct hci_dev *hdev)
1072 {
1073 struct smp_irk *k, *tmp;
1074
1075 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1076 list_del_rcu(&k->list);
1077 kfree_rcu(k, rcu);
1078 }
1079 }
1080
hci_blocked_keys_clear(struct hci_dev * hdev)1081 void hci_blocked_keys_clear(struct hci_dev *hdev)
1082 {
1083 struct blocked_key *b, *tmp;
1084
1085 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1086 list_del_rcu(&b->list);
1087 kfree_rcu(b, rcu);
1088 }
1089 }
1090
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1091 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1092 {
1093 bool blocked = false;
1094 struct blocked_key *b;
1095
1096 rcu_read_lock();
1097 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1098 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1099 blocked = true;
1100 break;
1101 }
1102 }
1103
1104 rcu_read_unlock();
1105 return blocked;
1106 }
1107
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1108 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1109 {
1110 struct link_key *k;
1111
1112 rcu_read_lock();
1113 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1114 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1115 rcu_read_unlock();
1116
1117 if (hci_is_blocked_key(hdev,
1118 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1119 k->val)) {
1120 bt_dev_warn_ratelimited(hdev,
1121 "Link key blocked for %pMR",
1122 &k->bdaddr);
1123 return NULL;
1124 }
1125
1126 return k;
1127 }
1128 }
1129 rcu_read_unlock();
1130
1131 return NULL;
1132 }
1133
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1134 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1135 u8 key_type, u8 old_key_type)
1136 {
1137 /* Legacy key */
1138 if (key_type < 0x03)
1139 return true;
1140
1141 /* Debug keys are insecure so don't store them persistently */
1142 if (key_type == HCI_LK_DEBUG_COMBINATION)
1143 return false;
1144
1145 /* Changed combination key and there's no previous one */
1146 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1147 return false;
1148
1149 /* Security mode 3 case */
1150 if (!conn)
1151 return true;
1152
1153 /* BR/EDR key derived using SC from an LE link */
1154 if (conn->type == LE_LINK)
1155 return true;
1156
1157 /* Neither local nor remote side had no-bonding as requirement */
1158 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1159 return true;
1160
1161 /* Local side had dedicated bonding as requirement */
1162 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1163 return true;
1164
1165 /* Remote side had dedicated bonding as requirement */
1166 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1167 return true;
1168
1169 /* If none of the above criteria match, then don't store the key
1170 * persistently */
1171 return false;
1172 }
1173
ltk_role(u8 type)1174 static u8 ltk_role(u8 type)
1175 {
1176 if (type == SMP_LTK)
1177 return HCI_ROLE_MASTER;
1178
1179 return HCI_ROLE_SLAVE;
1180 }
1181
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1182 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1183 u8 addr_type, u8 role)
1184 {
1185 struct smp_ltk *k;
1186
1187 rcu_read_lock();
1188 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1189 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1190 continue;
1191
1192 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1193 rcu_read_unlock();
1194
1195 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1196 k->val)) {
1197 bt_dev_warn_ratelimited(hdev,
1198 "LTK blocked for %pMR",
1199 &k->bdaddr);
1200 return NULL;
1201 }
1202
1203 return k;
1204 }
1205 }
1206 rcu_read_unlock();
1207
1208 return NULL;
1209 }
1210
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1211 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1212 {
1213 struct smp_irk *irk_to_return = NULL;
1214 struct smp_irk *irk;
1215
1216 rcu_read_lock();
1217 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1218 if (!bacmp(&irk->rpa, rpa)) {
1219 irk_to_return = irk;
1220 goto done;
1221 }
1222 }
1223
1224 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1225 if (smp_irk_matches(hdev, irk->val, rpa)) {
1226 bacpy(&irk->rpa, rpa);
1227 irk_to_return = irk;
1228 goto done;
1229 }
1230 }
1231
1232 done:
1233 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1234 irk_to_return->val)) {
1235 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1236 &irk_to_return->bdaddr);
1237 irk_to_return = NULL;
1238 }
1239
1240 rcu_read_unlock();
1241
1242 return irk_to_return;
1243 }
1244
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1245 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1246 u8 addr_type)
1247 {
1248 struct smp_irk *irk_to_return = NULL;
1249 struct smp_irk *irk;
1250
1251 /* Identity Address must be public or static random */
1252 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1253 return NULL;
1254
1255 rcu_read_lock();
1256 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1257 if (addr_type == irk->addr_type &&
1258 bacmp(bdaddr, &irk->bdaddr) == 0) {
1259 irk_to_return = irk;
1260 break;
1261 }
1262 }
1263
1264 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1265 irk_to_return->val)) {
1266 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1267 &irk_to_return->bdaddr);
1268 irk_to_return = NULL;
1269 }
1270
1271 rcu_read_unlock();
1272
1273 return irk_to_return;
1274 }
1275
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1276 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1277 bdaddr_t *bdaddr, u8 *val, u8 type,
1278 u8 pin_len, bool *persistent)
1279 {
1280 struct link_key *key, *old_key;
1281 u8 old_key_type;
1282
1283 old_key = hci_find_link_key(hdev, bdaddr);
1284 if (old_key) {
1285 old_key_type = old_key->type;
1286 key = old_key;
1287 } else {
1288 old_key_type = conn ? conn->key_type : 0xff;
1289 key = kzalloc_obj(*key);
1290 if (!key)
1291 return NULL;
1292 list_add_rcu(&key->list, &hdev->link_keys);
1293 }
1294
1295 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1296
1297 /* Some buggy controller combinations generate a changed
1298 * combination key for legacy pairing even when there's no
1299 * previous key */
1300 if (type == HCI_LK_CHANGED_COMBINATION &&
1301 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1302 type = HCI_LK_COMBINATION;
1303 if (conn)
1304 conn->key_type = type;
1305 }
1306
1307 bacpy(&key->bdaddr, bdaddr);
1308 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1309 key->pin_len = pin_len;
1310
1311 if (type == HCI_LK_CHANGED_COMBINATION)
1312 key->type = old_key_type;
1313 else
1314 key->type = type;
1315
1316 if (persistent)
1317 *persistent = hci_persistent_key(hdev, conn, type,
1318 old_key_type);
1319
1320 return key;
1321 }
1322
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1323 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1324 u8 addr_type, u8 type, u8 authenticated,
1325 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1326 {
1327 struct smp_ltk *key, *old_key;
1328 u8 role = ltk_role(type);
1329
1330 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1331 if (old_key)
1332 key = old_key;
1333 else {
1334 key = kzalloc_obj(*key);
1335 if (!key)
1336 return NULL;
1337 list_add_rcu(&key->list, &hdev->long_term_keys);
1338 }
1339
1340 bacpy(&key->bdaddr, bdaddr);
1341 key->bdaddr_type = addr_type;
1342 memcpy(key->val, tk, sizeof(key->val));
1343 key->authenticated = authenticated;
1344 key->ediv = ediv;
1345 key->rand = rand;
1346 key->enc_size = enc_size;
1347 key->type = type;
1348
1349 return key;
1350 }
1351
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1352 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1353 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1354 {
1355 struct smp_irk *irk;
1356
1357 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1358 if (!irk) {
1359 irk = kzalloc_obj(*irk);
1360 if (!irk)
1361 return NULL;
1362
1363 bacpy(&irk->bdaddr, bdaddr);
1364 irk->addr_type = addr_type;
1365
1366 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1367 }
1368
1369 memcpy(irk->val, val, 16);
1370 bacpy(&irk->rpa, rpa);
1371
1372 return irk;
1373 }
1374
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1375 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1376 {
1377 struct link_key *key;
1378
1379 key = hci_find_link_key(hdev, bdaddr);
1380 if (!key)
1381 return -ENOENT;
1382
1383 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1384
1385 list_del_rcu(&key->list);
1386 kfree_rcu(key, rcu);
1387
1388 return 0;
1389 }
1390
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1391 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1392 {
1393 struct smp_ltk *k, *tmp;
1394 int removed = 0;
1395
1396 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1397 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1398 continue;
1399
1400 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1401
1402 list_del_rcu(&k->list);
1403 kfree_rcu(k, rcu);
1404 removed++;
1405 }
1406
1407 return removed ? 0 : -ENOENT;
1408 }
1409
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1410 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1411 {
1412 struct smp_irk *k, *tmp;
1413
1414 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1415 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1416 continue;
1417
1418 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1419
1420 list_del_rcu(&k->list);
1421 kfree_rcu(k, rcu);
1422 }
1423 }
1424
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1425 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1426 {
1427 struct smp_ltk *k;
1428 struct smp_irk *irk;
1429 u8 addr_type;
1430
1431 if (type == BDADDR_BREDR) {
1432 if (hci_find_link_key(hdev, bdaddr))
1433 return true;
1434 return false;
1435 }
1436
1437 /* Convert to HCI addr type which struct smp_ltk uses */
1438 if (type == BDADDR_LE_PUBLIC)
1439 addr_type = ADDR_LE_DEV_PUBLIC;
1440 else
1441 addr_type = ADDR_LE_DEV_RANDOM;
1442
1443 irk = hci_get_irk(hdev, bdaddr, addr_type);
1444 if (irk) {
1445 bdaddr = &irk->bdaddr;
1446 addr_type = irk->addr_type;
1447 }
1448
1449 rcu_read_lock();
1450 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1451 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1452 rcu_read_unlock();
1453 return true;
1454 }
1455 }
1456 rcu_read_unlock();
1457
1458 return false;
1459 }
1460
1461 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1462 static void hci_cmd_timeout(struct work_struct *work)
1463 {
1464 struct hci_dev *hdev = container_of(work, struct hci_dev,
1465 cmd_timer.work);
1466
1467 if (hdev->req_skb) {
1468 u16 opcode = hci_skb_opcode(hdev->req_skb);
1469
1470 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1471
1472 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1473 } else {
1474 bt_dev_err(hdev, "command tx timeout");
1475 }
1476
1477 if (hdev->reset)
1478 hdev->reset(hdev);
1479
1480 atomic_set(&hdev->cmd_cnt, 1);
1481 queue_work(hdev->workqueue, &hdev->cmd_work);
1482 }
1483
1484 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1485 static void hci_ncmd_timeout(struct work_struct *work)
1486 {
1487 struct hci_dev *hdev = container_of(work, struct hci_dev,
1488 ncmd_timer.work);
1489
1490 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1491
1492 /* During HCI_INIT phase no events can be injected if the ncmd timer
1493 * triggers since the procedure has its own timeout handling.
1494 */
1495 if (test_bit(HCI_INIT, &hdev->flags))
1496 return;
1497
1498 /* This is an irrecoverable state, inject hardware error event */
1499 hci_reset_dev(hdev);
1500 }
1501
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1502 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1503 bdaddr_t *bdaddr, u8 bdaddr_type)
1504 {
1505 struct oob_data *data;
1506
1507 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1508 if (bacmp(bdaddr, &data->bdaddr) != 0)
1509 continue;
1510 if (data->bdaddr_type != bdaddr_type)
1511 continue;
1512 return data;
1513 }
1514
1515 return NULL;
1516 }
1517
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1518 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1519 u8 bdaddr_type)
1520 {
1521 struct oob_data *data;
1522
1523 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1524 if (!data)
1525 return -ENOENT;
1526
1527 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1528
1529 list_del(&data->list);
1530 kfree(data);
1531
1532 return 0;
1533 }
1534
hci_remote_oob_data_clear(struct hci_dev * hdev)1535 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1536 {
1537 struct oob_data *data, *n;
1538
1539 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1540 list_del(&data->list);
1541 kfree(data);
1542 }
1543 }
1544
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1545 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1546 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1547 u8 *hash256, u8 *rand256)
1548 {
1549 struct oob_data *data;
1550
1551 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1552 if (!data) {
1553 data = kmalloc_obj(*data);
1554 if (!data)
1555 return -ENOMEM;
1556
1557 bacpy(&data->bdaddr, bdaddr);
1558 data->bdaddr_type = bdaddr_type;
1559 list_add(&data->list, &hdev->remote_oob_data);
1560 }
1561
1562 if (hash192 && rand192) {
1563 memcpy(data->hash192, hash192, sizeof(data->hash192));
1564 memcpy(data->rand192, rand192, sizeof(data->rand192));
1565 if (hash256 && rand256)
1566 data->present = 0x03;
1567 } else {
1568 memset(data->hash192, 0, sizeof(data->hash192));
1569 memset(data->rand192, 0, sizeof(data->rand192));
1570 if (hash256 && rand256)
1571 data->present = 0x02;
1572 else
1573 data->present = 0x00;
1574 }
1575
1576 if (hash256 && rand256) {
1577 memcpy(data->hash256, hash256, sizeof(data->hash256));
1578 memcpy(data->rand256, rand256, sizeof(data->rand256));
1579 } else {
1580 memset(data->hash256, 0, sizeof(data->hash256));
1581 memset(data->rand256, 0, sizeof(data->rand256));
1582 if (hash192 && rand192)
1583 data->present = 0x01;
1584 }
1585
1586 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1587
1588 return 0;
1589 }
1590
1591 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1592 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1593 {
1594 struct adv_info *adv_instance;
1595
1596 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1597 if (adv_instance->instance == instance)
1598 return adv_instance;
1599 }
1600
1601 return NULL;
1602 }
1603
1604 /* This function requires the caller holds hdev->lock */
hci_find_adv_sid(struct hci_dev * hdev,u8 sid)1605 struct adv_info *hci_find_adv_sid(struct hci_dev *hdev, u8 sid)
1606 {
1607 struct adv_info *adv;
1608
1609 list_for_each_entry(adv, &hdev->adv_instances, list) {
1610 if (adv->sid == sid)
1611 return adv;
1612 }
1613
1614 return NULL;
1615 }
1616
1617 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1618 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1619 {
1620 struct adv_info *cur_instance;
1621
1622 cur_instance = hci_find_adv_instance(hdev, instance);
1623 if (!cur_instance)
1624 return NULL;
1625
1626 if (cur_instance == list_last_entry(&hdev->adv_instances,
1627 struct adv_info, list))
1628 return list_first_entry(&hdev->adv_instances,
1629 struct adv_info, list);
1630 else
1631 return list_next_entry(cur_instance, list);
1632 }
1633
1634 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1635 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1636 {
1637 struct adv_info *adv_instance;
1638
1639 adv_instance = hci_find_adv_instance(hdev, instance);
1640 if (!adv_instance)
1641 return -ENOENT;
1642
1643 BT_DBG("%s removing %dMR", hdev->name, instance);
1644
1645 if (hdev->cur_adv_instance == instance) {
1646 if (hdev->adv_instance_timeout) {
1647 cancel_delayed_work(&hdev->adv_instance_expire);
1648 hdev->adv_instance_timeout = 0;
1649 }
1650 hdev->cur_adv_instance = 0x00;
1651 }
1652
1653 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1654
1655 list_del(&adv_instance->list);
1656 kfree(adv_instance);
1657
1658 hdev->adv_instance_cnt--;
1659
1660 return 0;
1661 }
1662
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1663 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1664 {
1665 struct adv_info *adv_instance, *n;
1666
1667 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1668 adv_instance->rpa_expired = rpa_expired;
1669 }
1670
1671 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1672 void hci_adv_instances_clear(struct hci_dev *hdev)
1673 {
1674 struct adv_info *adv_instance, *n;
1675
1676 if (hdev->adv_instance_timeout) {
1677 disable_delayed_work(&hdev->adv_instance_expire);
1678 hdev->adv_instance_timeout = 0;
1679 }
1680
1681 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1682 disable_delayed_work_sync(&adv_instance->rpa_expired_cb);
1683 list_del(&adv_instance->list);
1684 kfree(adv_instance);
1685 }
1686
1687 hdev->adv_instance_cnt = 0;
1688 hdev->cur_adv_instance = 0x00;
1689 }
1690
adv_instance_rpa_expired(struct work_struct * work)1691 static void adv_instance_rpa_expired(struct work_struct *work)
1692 {
1693 struct adv_info *adv_instance = container_of(work, struct adv_info,
1694 rpa_expired_cb.work);
1695
1696 BT_DBG("");
1697
1698 adv_instance->rpa_expired = true;
1699 }
1700
1701 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1702 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1703 u32 flags, u16 adv_data_len, u8 *adv_data,
1704 u16 scan_rsp_len, u8 *scan_rsp_data,
1705 u16 timeout, u16 duration, s8 tx_power,
1706 u32 min_interval, u32 max_interval,
1707 u8 mesh_handle)
1708 {
1709 struct adv_info *adv;
1710
1711 adv = hci_find_adv_instance(hdev, instance);
1712 if (adv) {
1713 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1714 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1715 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1716 } else {
1717 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1718 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1719 return ERR_PTR(-EOVERFLOW);
1720
1721 adv = kzalloc_obj(*adv);
1722 if (!adv)
1723 return ERR_PTR(-ENOMEM);
1724
1725 adv->pending = true;
1726 adv->instance = instance;
1727
1728 /* If controller support only one set and the instance is set to
1729 * 1 then there is no option other than using handle 0x00.
1730 */
1731 if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1732 adv->handle = 0x00;
1733 else
1734 adv->handle = instance;
1735
1736 list_add(&adv->list, &hdev->adv_instances);
1737 hdev->adv_instance_cnt++;
1738 }
1739
1740 adv->flags = flags;
1741 adv->min_interval = min_interval;
1742 adv->max_interval = max_interval;
1743 adv->tx_power = tx_power;
1744 /* Defining a mesh_handle changes the timing units to ms,
1745 * rather than seconds, and ties the instance to the requested
1746 * mesh_tx queue.
1747 */
1748 adv->mesh = mesh_handle;
1749
1750 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1751 scan_rsp_len, scan_rsp_data);
1752
1753 adv->timeout = timeout;
1754 adv->remaining_time = timeout;
1755
1756 if (duration == 0)
1757 adv->duration = hdev->def_multi_adv_rotation_duration;
1758 else
1759 adv->duration = duration;
1760
1761 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1762
1763 BT_DBG("%s for %dMR", hdev->name, instance);
1764
1765 return adv;
1766 }
1767
1768 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u8 sid,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1769 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u8 sid,
1770 u32 flags, u8 data_len, u8 *data,
1771 u32 min_interval, u32 max_interval)
1772 {
1773 struct adv_info *adv;
1774
1775 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1776 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1777 min_interval, max_interval, 0);
1778 if (IS_ERR(adv))
1779 return adv;
1780
1781 adv->sid = sid;
1782 adv->periodic = true;
1783 adv->per_adv_data_len = data_len;
1784
1785 if (data)
1786 memcpy(adv->per_adv_data, data, data_len);
1787
1788 return adv;
1789 }
1790
1791 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1792 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1793 u16 adv_data_len, u8 *adv_data,
1794 u16 scan_rsp_len, u8 *scan_rsp_data)
1795 {
1796 struct adv_info *adv;
1797
1798 adv = hci_find_adv_instance(hdev, instance);
1799
1800 /* If advertisement doesn't exist, we can't modify its data */
1801 if (!adv)
1802 return -ENOENT;
1803
1804 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1805 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1806 memcpy(adv->adv_data, adv_data, adv_data_len);
1807 adv->adv_data_len = adv_data_len;
1808 adv->adv_data_changed = true;
1809 }
1810
1811 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1812 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1813 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1814 adv->scan_rsp_len = scan_rsp_len;
1815 adv->scan_rsp_changed = true;
1816 }
1817
1818 /* Mark as changed if there are flags which would affect it */
1819 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1820 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1821 adv->scan_rsp_changed = true;
1822
1823 return 0;
1824 }
1825
1826 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1827 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1828 {
1829 u32 flags;
1830 struct adv_info *adv;
1831
1832 if (instance == 0x00) {
1833 /* Instance 0 always manages the "Tx Power" and "Flags"
1834 * fields
1835 */
1836 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1837
1838 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1839 * corresponds to the "connectable" instance flag.
1840 */
1841 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1842 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1843
1844 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1845 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1846 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1847 flags |= MGMT_ADV_FLAG_DISCOV;
1848
1849 return flags;
1850 }
1851
1852 adv = hci_find_adv_instance(hdev, instance);
1853
1854 /* Return 0 when we got an invalid instance identifier. */
1855 if (!adv)
1856 return 0;
1857
1858 return adv->flags;
1859 }
1860
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1861 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1862 {
1863 struct adv_info *adv;
1864
1865 /* Instance 0x00 always set local name */
1866 if (instance == 0x00)
1867 return true;
1868
1869 adv = hci_find_adv_instance(hdev, instance);
1870 if (!adv)
1871 return false;
1872
1873 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1874 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1875 return true;
1876
1877 return adv->scan_rsp_len ? true : false;
1878 }
1879
1880 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1881 void hci_adv_monitors_clear(struct hci_dev *hdev)
1882 {
1883 struct adv_monitor *monitor;
1884 int handle;
1885
1886 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1887 hci_free_adv_monitor(hdev, monitor);
1888
1889 idr_destroy(&hdev->adv_monitors_idr);
1890 }
1891
1892 /* Frees the monitor structure and do some bookkeepings.
1893 * This function requires the caller holds hdev->lock.
1894 */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1895 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1896 {
1897 struct adv_pattern *pattern;
1898 struct adv_pattern *tmp;
1899
1900 if (!monitor)
1901 return;
1902
1903 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1904 list_del(&pattern->list);
1905 kfree(pattern);
1906 }
1907
1908 if (monitor->handle)
1909 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1910
1911 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED)
1912 hdev->adv_monitors_cnt--;
1913
1914 kfree(monitor);
1915 }
1916
1917 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1918 * also attempts to forward the request to the controller.
1919 * This function requires the caller holds hci_req_sync_lock.
1920 */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1921 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1922 {
1923 int min, max, handle;
1924 int status = 0;
1925
1926 if (!monitor)
1927 return -EINVAL;
1928
1929 hci_dev_lock(hdev);
1930
1931 min = HCI_MIN_ADV_MONITOR_HANDLE;
1932 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1933 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1934 GFP_KERNEL);
1935
1936 hci_dev_unlock(hdev);
1937
1938 if (handle < 0)
1939 return handle;
1940
1941 monitor->handle = handle;
1942
1943 if (!hdev_is_powered(hdev))
1944 return status;
1945
1946 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1947 case HCI_ADV_MONITOR_EXT_NONE:
1948 bt_dev_dbg(hdev, "add monitor %d status %d",
1949 monitor->handle, status);
1950 /* Message was not forwarded to controller - not an error */
1951 break;
1952
1953 case HCI_ADV_MONITOR_EXT_MSFT:
1954 status = msft_add_monitor_pattern(hdev, monitor);
1955 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1956 handle, status);
1957 break;
1958 }
1959
1960 return status;
1961 }
1962
1963 /* Attempts to tell the controller and free the monitor. If somehow the
1964 * controller doesn't have a corresponding handle, remove anyway.
1965 * This function requires the caller holds hci_req_sync_lock.
1966 */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1967 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1968 struct adv_monitor *monitor)
1969 {
1970 int status = 0;
1971 int handle;
1972
1973 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1974 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1975 bt_dev_dbg(hdev, "remove monitor %d status %d",
1976 monitor->handle, status);
1977 goto free_monitor;
1978
1979 case HCI_ADV_MONITOR_EXT_MSFT:
1980 handle = monitor->handle;
1981 status = msft_remove_monitor(hdev, monitor);
1982 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1983 handle, status);
1984 break;
1985 }
1986
1987 /* In case no matching handle registered, just free the monitor */
1988 if (status == -ENOENT)
1989 goto free_monitor;
1990
1991 return status;
1992
1993 free_monitor:
1994 if (status == -ENOENT)
1995 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1996 monitor->handle);
1997 hci_free_adv_monitor(hdev, monitor);
1998
1999 return status;
2000 }
2001
2002 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)2003 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2004 {
2005 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2006
2007 if (!monitor)
2008 return -EINVAL;
2009
2010 return hci_remove_adv_monitor(hdev, monitor);
2011 }
2012
2013 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)2014 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2015 {
2016 struct adv_monitor *monitor;
2017 int idr_next_id = 0;
2018 int status = 0;
2019
2020 while (1) {
2021 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2022 if (!monitor)
2023 break;
2024
2025 status = hci_remove_adv_monitor(hdev, monitor);
2026 if (status)
2027 return status;
2028
2029 idr_next_id++;
2030 }
2031
2032 return status;
2033 }
2034
2035 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2036 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2037 {
2038 return !idr_is_empty(&hdev->adv_monitors_idr);
2039 }
2040
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2041 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2042 {
2043 if (msft_monitor_supported(hdev))
2044 return HCI_ADV_MONITOR_EXT_MSFT;
2045
2046 return HCI_ADV_MONITOR_EXT_NONE;
2047 }
2048
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2049 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2050 bdaddr_t *bdaddr, u8 type)
2051 {
2052 struct bdaddr_list *b;
2053
2054 list_for_each_entry(b, bdaddr_list, list) {
2055 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2056 return b;
2057 }
2058
2059 return NULL;
2060 }
2061
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2062 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2063 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2064 u8 type)
2065 {
2066 struct bdaddr_list_with_irk *b;
2067
2068 list_for_each_entry(b, bdaddr_list, list) {
2069 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2070 return b;
2071 }
2072
2073 return NULL;
2074 }
2075
2076 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2077 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2078 bdaddr_t *bdaddr, u8 type)
2079 {
2080 struct bdaddr_list_with_flags *b;
2081
2082 list_for_each_entry(b, bdaddr_list, list) {
2083 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2084 return b;
2085 }
2086
2087 return NULL;
2088 }
2089
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2090 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2091 {
2092 struct bdaddr_list *b, *n;
2093
2094 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2095 list_del(&b->list);
2096 kfree(b);
2097 }
2098 }
2099
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2100 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2101 {
2102 struct bdaddr_list *entry;
2103
2104 if (!bacmp(bdaddr, BDADDR_ANY))
2105 return -EBADF;
2106
2107 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2108 return -EEXIST;
2109
2110 entry = kzalloc_obj(*entry);
2111 if (!entry)
2112 return -ENOMEM;
2113
2114 bacpy(&entry->bdaddr, bdaddr);
2115 entry->bdaddr_type = type;
2116
2117 list_add(&entry->list, list);
2118
2119 return 0;
2120 }
2121
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2122 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2123 u8 type, u8 *peer_irk, u8 *local_irk)
2124 {
2125 struct bdaddr_list_with_irk *entry;
2126
2127 if (!bacmp(bdaddr, BDADDR_ANY))
2128 return -EBADF;
2129
2130 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2131 return -EEXIST;
2132
2133 entry = kzalloc_obj(*entry);
2134 if (!entry)
2135 return -ENOMEM;
2136
2137 bacpy(&entry->bdaddr, bdaddr);
2138 entry->bdaddr_type = type;
2139
2140 if (peer_irk)
2141 memcpy(entry->peer_irk, peer_irk, 16);
2142
2143 if (local_irk)
2144 memcpy(entry->local_irk, local_irk, 16);
2145
2146 list_add(&entry->list, list);
2147
2148 return 0;
2149 }
2150
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2151 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2152 u8 type, u32 flags)
2153 {
2154 struct bdaddr_list_with_flags *entry;
2155
2156 if (!bacmp(bdaddr, BDADDR_ANY))
2157 return -EBADF;
2158
2159 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2160 return -EEXIST;
2161
2162 entry = kzalloc_obj(*entry);
2163 if (!entry)
2164 return -ENOMEM;
2165
2166 bacpy(&entry->bdaddr, bdaddr);
2167 entry->bdaddr_type = type;
2168 entry->flags = flags;
2169
2170 list_add(&entry->list, list);
2171
2172 return 0;
2173 }
2174
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2175 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2176 {
2177 struct bdaddr_list *entry;
2178
2179 if (!bacmp(bdaddr, BDADDR_ANY)) {
2180 hci_bdaddr_list_clear(list);
2181 return 0;
2182 }
2183
2184 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2185 if (!entry)
2186 return -ENOENT;
2187
2188 list_del(&entry->list);
2189 kfree(entry);
2190
2191 return 0;
2192 }
2193
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2194 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2195 u8 type)
2196 {
2197 struct bdaddr_list_with_irk *entry;
2198
2199 if (!bacmp(bdaddr, BDADDR_ANY)) {
2200 hci_bdaddr_list_clear(list);
2201 return 0;
2202 }
2203
2204 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2205 if (!entry)
2206 return -ENOENT;
2207
2208 list_del(&entry->list);
2209 kfree(entry);
2210
2211 return 0;
2212 }
2213
2214 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2215 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2216 bdaddr_t *addr, u8 addr_type)
2217 {
2218 struct hci_conn_params *params;
2219
2220 list_for_each_entry(params, &hdev->le_conn_params, list) {
2221 if (bacmp(¶ms->addr, addr) == 0 &&
2222 params->addr_type == addr_type) {
2223 return params;
2224 }
2225 }
2226
2227 return NULL;
2228 }
2229
2230 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2231 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2232 bdaddr_t *addr, u8 addr_type)
2233 {
2234 struct hci_conn_params *param;
2235
2236 rcu_read_lock();
2237
2238 list_for_each_entry_rcu(param, list, action) {
2239 if (bacmp(¶m->addr, addr) == 0 &&
2240 param->addr_type == addr_type) {
2241 rcu_read_unlock();
2242 return param;
2243 }
2244 }
2245
2246 rcu_read_unlock();
2247
2248 return NULL;
2249 }
2250
2251 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2252 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2253 {
2254 if (list_empty(¶m->action))
2255 return;
2256
2257 list_del_rcu(¶m->action);
2258 synchronize_rcu();
2259 INIT_LIST_HEAD(¶m->action);
2260 }
2261
2262 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2263 void hci_pend_le_list_add(struct hci_conn_params *param,
2264 struct list_head *list)
2265 {
2266 list_add_rcu(¶m->action, list);
2267 }
2268
2269 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2270 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2271 bdaddr_t *addr, u8 addr_type)
2272 {
2273 struct hci_conn_params *params;
2274
2275 params = hci_conn_params_lookup(hdev, addr, addr_type);
2276 if (params)
2277 return params;
2278
2279 params = kzalloc_obj(*params);
2280 if (!params) {
2281 bt_dev_err(hdev, "out of memory");
2282 return NULL;
2283 }
2284
2285 bacpy(¶ms->addr, addr);
2286 params->addr_type = addr_type;
2287
2288 list_add(¶ms->list, &hdev->le_conn_params);
2289 INIT_LIST_HEAD(¶ms->action);
2290
2291 params->conn_min_interval = hdev->le_conn_min_interval;
2292 params->conn_max_interval = hdev->le_conn_max_interval;
2293 params->conn_latency = hdev->le_conn_latency;
2294 params->supervision_timeout = hdev->le_supv_timeout;
2295 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2296
2297 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2298
2299 return params;
2300 }
2301
hci_conn_params_free(struct hci_conn_params * params)2302 void hci_conn_params_free(struct hci_conn_params *params)
2303 {
2304 hci_pend_le_list_del_init(params);
2305
2306 if (params->conn) {
2307 hci_conn_drop(params->conn);
2308 hci_conn_put(params->conn);
2309 }
2310
2311 list_del(¶ms->list);
2312 kfree(params);
2313 }
2314
2315 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2316 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2317 {
2318 struct hci_conn_params *params;
2319
2320 params = hci_conn_params_lookup(hdev, addr, addr_type);
2321 if (!params)
2322 return;
2323
2324 hci_conn_params_free(params);
2325
2326 hci_update_passive_scan(hdev);
2327
2328 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2329 }
2330
2331 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2332 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2333 {
2334 struct hci_conn_params *params, *tmp;
2335
2336 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2337 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2338 continue;
2339
2340 /* If trying to establish one time connection to disabled
2341 * device, leave the params, but mark them as just once.
2342 */
2343 if (params->explicit_connect) {
2344 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2345 continue;
2346 }
2347
2348 hci_conn_params_free(params);
2349 }
2350
2351 BT_DBG("All LE disabled connection parameters were removed");
2352 }
2353
2354 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2355 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2356 {
2357 struct hci_conn_params *params, *tmp;
2358
2359 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2360 hci_conn_params_free(params);
2361
2362 BT_DBG("All LE connection parameters were removed");
2363 }
2364
2365 /* Copy the Identity Address of the controller.
2366 *
2367 * If the controller has a public BD_ADDR, then by default use that one.
2368 * If this is a LE only controller without a public address, default to
2369 * the static random address.
2370 *
2371 * For debugging purposes it is possible to force controllers with a
2372 * public address to use the static random address instead.
2373 *
2374 * In case BR/EDR has been disabled on a dual-mode controller and
2375 * userspace has configured a static address, then that address
2376 * becomes the identity address instead of the public BR/EDR address.
2377 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2378 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2379 u8 *bdaddr_type)
2380 {
2381 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2382 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2383 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2384 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2385 bacpy(bdaddr, &hdev->static_addr);
2386 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2387 } else {
2388 bacpy(bdaddr, &hdev->bdaddr);
2389 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2390 }
2391 }
2392
hci_clear_wake_reason(struct hci_dev * hdev)2393 static void hci_clear_wake_reason(struct hci_dev *hdev)
2394 {
2395 hci_dev_lock(hdev);
2396
2397 hdev->wake_reason = 0;
2398 bacpy(&hdev->wake_addr, BDADDR_ANY);
2399 hdev->wake_addr_type = 0;
2400
2401 hci_dev_unlock(hdev);
2402 }
2403
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2404 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2405 void *data)
2406 {
2407 struct hci_dev *hdev =
2408 container_of(nb, struct hci_dev, suspend_notifier);
2409 int ret = 0;
2410
2411 /* Userspace has full control of this device. Do nothing. */
2412 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2413 return NOTIFY_DONE;
2414
2415 /* To avoid a potential race with hci_unregister_dev. */
2416 hci_dev_hold(hdev);
2417
2418 switch (action) {
2419 case PM_HIBERNATION_PREPARE:
2420 case PM_SUSPEND_PREPARE:
2421 ret = hci_suspend_dev(hdev);
2422 break;
2423 case PM_POST_HIBERNATION:
2424 case PM_POST_SUSPEND:
2425 ret = hci_resume_dev(hdev);
2426 break;
2427 }
2428
2429 if (ret)
2430 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2431 action, ret);
2432
2433 hci_dev_put(hdev);
2434 return NOTIFY_DONE;
2435 }
2436
2437 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2438 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2439 {
2440 struct hci_dev *hdev;
2441 unsigned int alloc_size;
2442
2443 alloc_size = sizeof(*hdev);
2444 if (sizeof_priv) {
2445 /* Fixme: May need ALIGN-ment? */
2446 alloc_size += sizeof_priv;
2447 }
2448
2449 hdev = kzalloc(alloc_size, GFP_KERNEL);
2450 if (!hdev)
2451 return NULL;
2452
2453 if (init_srcu_struct(&hdev->srcu)) {
2454 kfree(hdev);
2455 return NULL;
2456 }
2457
2458 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2459 hdev->esco_type = (ESCO_HV1);
2460 hdev->link_mode = (HCI_LM_ACCEPT);
2461 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2462 hdev->io_capability = 0x03; /* No Input No Output */
2463 hdev->manufacturer = 0xffff; /* Default to internal use */
2464 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2465 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2466 hdev->adv_instance_cnt = 0;
2467 hdev->cur_adv_instance = 0x00;
2468 hdev->adv_instance_timeout = 0;
2469
2470 hdev->advmon_allowlist_duration = 300;
2471 hdev->advmon_no_filter_duration = 500;
2472 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2473
2474 hdev->sniff_max_interval = 800;
2475 hdev->sniff_min_interval = 80;
2476
2477 hdev->le_adv_channel_map = 0x07;
2478 hdev->le_adv_min_interval = 0x0800;
2479 hdev->le_adv_max_interval = 0x0800;
2480 hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2481 hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2482 hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2483 hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2484 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2485 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2486 hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2487 hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2488 hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2489 hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2490 hdev->le_conn_min_interval = 0x0018;
2491 hdev->le_conn_max_interval = 0x0028;
2492 hdev->le_conn_latency = 0x0000;
2493 hdev->le_supv_timeout = 0x002a;
2494 hdev->le_def_tx_len = 0x001b;
2495 hdev->le_def_tx_time = 0x0148;
2496 hdev->le_max_tx_len = 0x001b;
2497 hdev->le_max_tx_time = 0x0148;
2498 hdev->le_max_rx_len = 0x001b;
2499 hdev->le_max_rx_time = 0x0148;
2500 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2501 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2502 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2503 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2504 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2505 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2506 hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2507 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2508 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2509
2510 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2511 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2512 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2513 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2514 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2515 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2516
2517 /* default 1.28 sec page scan */
2518 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2519 hdev->def_page_scan_int = 0x0800;
2520 hdev->def_page_scan_window = 0x0012;
2521
2522 mutex_init(&hdev->lock);
2523 mutex_init(&hdev->req_lock);
2524 mutex_init(&hdev->mgmt_pending_lock);
2525
2526 ida_init(&hdev->unset_handle_ida);
2527
2528 INIT_LIST_HEAD(&hdev->mesh_pending);
2529 INIT_LIST_HEAD(&hdev->mgmt_pending);
2530 INIT_LIST_HEAD(&hdev->reject_list);
2531 INIT_LIST_HEAD(&hdev->accept_list);
2532 INIT_LIST_HEAD(&hdev->uuids);
2533 INIT_LIST_HEAD(&hdev->link_keys);
2534 INIT_LIST_HEAD(&hdev->long_term_keys);
2535 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2536 INIT_LIST_HEAD(&hdev->remote_oob_data);
2537 INIT_LIST_HEAD(&hdev->le_accept_list);
2538 INIT_LIST_HEAD(&hdev->le_resolv_list);
2539 INIT_LIST_HEAD(&hdev->le_conn_params);
2540 INIT_LIST_HEAD(&hdev->pend_le_conns);
2541 INIT_LIST_HEAD(&hdev->pend_le_reports);
2542 INIT_LIST_HEAD(&hdev->conn_hash.list);
2543 INIT_LIST_HEAD(&hdev->adv_instances);
2544 INIT_LIST_HEAD(&hdev->blocked_keys);
2545 INIT_LIST_HEAD(&hdev->monitored_devices);
2546
2547 INIT_LIST_HEAD(&hdev->local_codecs);
2548 INIT_WORK(&hdev->rx_work, hci_rx_work);
2549 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2550 INIT_WORK(&hdev->tx_work, hci_tx_work);
2551 INIT_WORK(&hdev->power_on, hci_power_on);
2552 INIT_WORK(&hdev->error_reset, hci_error_reset);
2553
2554 hci_cmd_sync_init(hdev);
2555
2556 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2557
2558 skb_queue_head_init(&hdev->rx_q);
2559 skb_queue_head_init(&hdev->cmd_q);
2560 skb_queue_head_init(&hdev->raw_q);
2561
2562 init_waitqueue_head(&hdev->req_wait_q);
2563
2564 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2565 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2566
2567 hci_devcd_setup(hdev);
2568
2569 hci_init_sysfs(hdev);
2570 discovery_init(hdev);
2571
2572 return hdev;
2573 }
2574 EXPORT_SYMBOL(hci_alloc_dev_priv);
2575
2576 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2577 void hci_free_dev(struct hci_dev *hdev)
2578 {
2579 /* will free via device release */
2580 put_device(&hdev->dev);
2581 }
2582 EXPORT_SYMBOL(hci_free_dev);
2583
2584 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2585 int hci_register_dev(struct hci_dev *hdev)
2586 {
2587 int id, error;
2588
2589 if (!hdev->open || !hdev->close || !hdev->send)
2590 return -EINVAL;
2591
2592 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2593 if (id < 0)
2594 return id;
2595
2596 error = dev_set_name(&hdev->dev, "hci%u", id);
2597 if (error)
2598 return error;
2599
2600 hdev->name = dev_name(&hdev->dev);
2601 hdev->id = id;
2602
2603 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2604
2605 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2606 if (!hdev->workqueue) {
2607 error = -ENOMEM;
2608 goto err;
2609 }
2610
2611 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2612 hdev->name);
2613 if (!hdev->req_workqueue) {
2614 destroy_workqueue(hdev->workqueue);
2615 error = -ENOMEM;
2616 goto err;
2617 }
2618
2619 if (!IS_ERR_OR_NULL(bt_debugfs))
2620 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2621
2622 error = device_add(&hdev->dev);
2623 if (error < 0)
2624 goto err_wqueue;
2625
2626 hci_leds_init(hdev);
2627
2628 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2629 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2630 hdev);
2631 if (hdev->rfkill) {
2632 if (rfkill_register(hdev->rfkill) < 0) {
2633 rfkill_destroy(hdev->rfkill);
2634 hdev->rfkill = NULL;
2635 }
2636 }
2637
2638 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2639 hci_dev_set_flag(hdev, HCI_RFKILLED);
2640
2641 hci_dev_set_flag(hdev, HCI_SETUP);
2642 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2643
2644 /* Assume BR/EDR support until proven otherwise (such as
2645 * through reading supported features during init.
2646 */
2647 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2648
2649 write_lock(&hci_dev_list_lock);
2650 list_add(&hdev->list, &hci_dev_list);
2651 write_unlock(&hci_dev_list_lock);
2652
2653 /* Devices that are marked for raw-only usage are unconfigured
2654 * and should not be included in normal operation.
2655 */
2656 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
2657 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2658
2659 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2660 * callback.
2661 */
2662 if (hdev->wakeup)
2663 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2664
2665 hci_sock_dev_event(hdev, HCI_DEV_REG);
2666 hci_dev_hold(hdev);
2667
2668 error = hci_register_suspend_notifier(hdev);
2669 if (error)
2670 BT_WARN("register suspend notifier failed error:%d\n", error);
2671
2672 queue_work(hdev->req_workqueue, &hdev->power_on);
2673
2674 idr_init(&hdev->adv_monitors_idr);
2675 msft_register(hdev);
2676
2677 return id;
2678
2679 err_wqueue:
2680 debugfs_remove_recursive(hdev->debugfs);
2681 destroy_workqueue(hdev->workqueue);
2682 destroy_workqueue(hdev->req_workqueue);
2683 err:
2684 ida_free(&hci_index_ida, hdev->id);
2685
2686 return error;
2687 }
2688 EXPORT_SYMBOL(hci_register_dev);
2689
2690 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2691 void hci_unregister_dev(struct hci_dev *hdev)
2692 {
2693 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2694
2695 mutex_lock(&hdev->unregister_lock);
2696 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2697 mutex_unlock(&hdev->unregister_lock);
2698
2699 write_lock(&hci_dev_list_lock);
2700 list_del(&hdev->list);
2701 write_unlock(&hci_dev_list_lock);
2702
2703 synchronize_srcu(&hdev->srcu);
2704 cleanup_srcu_struct(&hdev->srcu);
2705
2706 disable_work_sync(&hdev->rx_work);
2707 disable_work_sync(&hdev->cmd_work);
2708 disable_work_sync(&hdev->tx_work);
2709 disable_work_sync(&hdev->power_on);
2710 disable_work_sync(&hdev->error_reset);
2711
2712 hci_cmd_sync_clear(hdev);
2713
2714 hci_unregister_suspend_notifier(hdev);
2715
2716 hci_dev_do_close(hdev);
2717
2718 if (!test_bit(HCI_INIT, &hdev->flags) &&
2719 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2720 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2721 hci_dev_lock(hdev);
2722 mgmt_index_removed(hdev);
2723 hci_dev_unlock(hdev);
2724 }
2725
2726 /* mgmt_index_removed should take care of emptying the
2727 * pending list */
2728 BUG_ON(!list_empty(&hdev->mgmt_pending));
2729
2730 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2731
2732 if (hdev->rfkill) {
2733 rfkill_unregister(hdev->rfkill);
2734 rfkill_destroy(hdev->rfkill);
2735 }
2736
2737 device_del(&hdev->dev);
2738 /* Actual cleanup is deferred until hci_release_dev(). */
2739 hci_dev_put(hdev);
2740 }
2741 EXPORT_SYMBOL(hci_unregister_dev);
2742
2743 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2744 void hci_release_dev(struct hci_dev *hdev)
2745 {
2746 debugfs_remove_recursive(hdev->debugfs);
2747 kfree_const(hdev->hw_info);
2748 kfree_const(hdev->fw_info);
2749
2750 destroy_workqueue(hdev->workqueue);
2751 destroy_workqueue(hdev->req_workqueue);
2752
2753 hci_dev_lock(hdev);
2754 hci_bdaddr_list_clear(&hdev->reject_list);
2755 hci_bdaddr_list_clear(&hdev->accept_list);
2756 hci_uuids_clear(hdev);
2757 hci_link_keys_clear(hdev);
2758 hci_smp_ltks_clear(hdev);
2759 hci_smp_irks_clear(hdev);
2760 hci_remote_oob_data_clear(hdev);
2761 hci_adv_instances_clear(hdev);
2762 hci_adv_monitors_clear(hdev);
2763 hci_bdaddr_list_clear(&hdev->le_accept_list);
2764 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2765 hci_conn_params_clear_all(hdev);
2766 hci_discovery_filter_clear(hdev);
2767 hci_blocked_keys_clear(hdev);
2768 hci_codec_list_clear(&hdev->local_codecs);
2769 msft_release(hdev);
2770 hci_dev_unlock(hdev);
2771
2772 ida_destroy(&hdev->unset_handle_ida);
2773 ida_free(&hci_index_ida, hdev->id);
2774 kfree_skb(hdev->sent_cmd);
2775 kfree_skb(hdev->req_skb);
2776 kfree_skb(hdev->recv_event);
2777 kfree(hdev);
2778 }
2779 EXPORT_SYMBOL(hci_release_dev);
2780
hci_register_suspend_notifier(struct hci_dev * hdev)2781 int hci_register_suspend_notifier(struct hci_dev *hdev)
2782 {
2783 int ret = 0;
2784
2785 if (!hdev->suspend_notifier.notifier_call &&
2786 !hci_test_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER)) {
2787 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2788 ret = register_pm_notifier(&hdev->suspend_notifier);
2789 }
2790
2791 return ret;
2792 }
2793
hci_unregister_suspend_notifier(struct hci_dev * hdev)2794 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2795 {
2796 int ret = 0;
2797
2798 if (hdev->suspend_notifier.notifier_call) {
2799 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2800 if (!ret)
2801 hdev->suspend_notifier.notifier_call = NULL;
2802 }
2803
2804 return ret;
2805 }
2806
2807 /* Cancel ongoing command synchronously:
2808 *
2809 * - Cancel command timer
2810 * - Reset command counter
2811 * - Cancel command request
2812 */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2813 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2814 {
2815 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2816
2817 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
2818 disable_delayed_work_sync(&hdev->cmd_timer);
2819 disable_delayed_work_sync(&hdev->ncmd_timer);
2820 } else {
2821 cancel_delayed_work_sync(&hdev->cmd_timer);
2822 cancel_delayed_work_sync(&hdev->ncmd_timer);
2823 }
2824
2825 atomic_set(&hdev->cmd_cnt, 1);
2826
2827 hci_cmd_sync_cancel_sync(hdev, err);
2828 }
2829
2830 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2831 int hci_suspend_dev(struct hci_dev *hdev)
2832 {
2833 int ret;
2834
2835 bt_dev_dbg(hdev, "");
2836
2837 /* Suspend should only act on when powered. */
2838 if (!hdev_is_powered(hdev) ||
2839 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2840 return 0;
2841
2842 /* If powering down don't attempt to suspend */
2843 if (mgmt_powering_down(hdev))
2844 return 0;
2845
2846 /* Cancel potentially blocking sync operation before suspend */
2847 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2848
2849 hci_req_sync_lock(hdev);
2850 ret = hci_suspend_sync(hdev);
2851 hci_req_sync_unlock(hdev);
2852
2853 hci_clear_wake_reason(hdev);
2854 mgmt_suspending(hdev, hdev->suspend_state);
2855
2856 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2857 return ret;
2858 }
2859 EXPORT_SYMBOL(hci_suspend_dev);
2860
2861 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2862 int hci_resume_dev(struct hci_dev *hdev)
2863 {
2864 int ret;
2865
2866 bt_dev_dbg(hdev, "");
2867
2868 /* Resume should only act on when powered. */
2869 if (!hdev_is_powered(hdev) ||
2870 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2871 return 0;
2872
2873 /* If powering down don't attempt to resume */
2874 if (mgmt_powering_down(hdev))
2875 return 0;
2876
2877 hci_req_sync_lock(hdev);
2878 ret = hci_resume_sync(hdev);
2879 hci_req_sync_unlock(hdev);
2880
2881 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2882 hdev->wake_addr_type);
2883
2884 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2885 return ret;
2886 }
2887 EXPORT_SYMBOL(hci_resume_dev);
2888
2889 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2890 int hci_reset_dev(struct hci_dev *hdev)
2891 {
2892 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2893 struct sk_buff *skb;
2894
2895 skb = bt_skb_alloc(3, GFP_ATOMIC);
2896 if (!skb)
2897 return -ENOMEM;
2898
2899 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2900 skb_put_data(skb, hw_err, 3);
2901
2902 bt_dev_err(hdev, "Injecting HCI hardware error event");
2903
2904 /* Send Hardware Error to upper stack */
2905 return hci_recv_frame(hdev, skb);
2906 }
2907 EXPORT_SYMBOL(hci_reset_dev);
2908
hci_dev_classify_pkt_type(struct hci_dev * hdev,struct sk_buff * skb)2909 static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
2910 {
2911 if (hdev->classify_pkt_type)
2912 return hdev->classify_pkt_type(hdev, skb);
2913
2914 return hci_skb_pkt_type(skb);
2915 }
2916
2917 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2918 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2919 {
2920 u8 dev_pkt_type;
2921
2922 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2923 && !test_bit(HCI_INIT, &hdev->flags))) {
2924 kfree_skb(skb);
2925 return -ENXIO;
2926 }
2927
2928 /* Check if the driver agree with packet type classification */
2929 dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
2930 if (hci_skb_pkt_type(skb) != dev_pkt_type) {
2931 hci_skb_pkt_type(skb) = dev_pkt_type;
2932 }
2933
2934 switch (hci_skb_pkt_type(skb)) {
2935 case HCI_EVENT_PKT:
2936 break;
2937 case HCI_ACLDATA_PKT:
2938 /* Detect if ISO packet has been sent as ACL */
2939 if (hci_conn_num(hdev, CIS_LINK) ||
2940 hci_conn_num(hdev, BIS_LINK) ||
2941 hci_conn_num(hdev, PA_LINK)) {
2942 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2943 __u8 type;
2944
2945 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2946 if (type == CIS_LINK || type == BIS_LINK ||
2947 type == PA_LINK)
2948 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2949 }
2950 break;
2951 case HCI_SCODATA_PKT:
2952 break;
2953 case HCI_ISODATA_PKT:
2954 break;
2955 case HCI_DRV_PKT:
2956 break;
2957 default:
2958 kfree_skb(skb);
2959 return -EINVAL;
2960 }
2961
2962 /* Incoming skb */
2963 bt_cb(skb)->incoming = 1;
2964
2965 /* Time stamp */
2966 __net_timestamp(skb);
2967
2968 skb_queue_tail(&hdev->rx_q, skb);
2969 queue_work(hdev->workqueue, &hdev->rx_work);
2970
2971 return 0;
2972 }
2973 EXPORT_SYMBOL(hci_recv_frame);
2974
2975 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2976 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2977 {
2978 /* Mark as diagnostic packet */
2979 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2980
2981 /* Time stamp */
2982 __net_timestamp(skb);
2983
2984 skb_queue_tail(&hdev->rx_q, skb);
2985 queue_work(hdev->workqueue, &hdev->rx_work);
2986
2987 return 0;
2988 }
2989 EXPORT_SYMBOL(hci_recv_diag);
2990
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2991 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2992 {
2993 va_list vargs;
2994
2995 va_start(vargs, fmt);
2996 kfree_const(hdev->hw_info);
2997 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2998 va_end(vargs);
2999 }
3000 EXPORT_SYMBOL(hci_set_hw_info);
3001
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)3002 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3003 {
3004 va_list vargs;
3005
3006 va_start(vargs, fmt);
3007 kfree_const(hdev->fw_info);
3008 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3009 va_end(vargs);
3010 }
3011 EXPORT_SYMBOL(hci_set_fw_info);
3012
3013 /* ---- Interface to upper protocols ---- */
3014
hci_register_cb(struct hci_cb * cb)3015 int hci_register_cb(struct hci_cb *cb)
3016 {
3017 BT_DBG("%p name %s", cb, cb->name);
3018
3019 mutex_lock(&hci_cb_list_lock);
3020 list_add_tail(&cb->list, &hci_cb_list);
3021 mutex_unlock(&hci_cb_list_lock);
3022
3023 return 0;
3024 }
3025 EXPORT_SYMBOL(hci_register_cb);
3026
hci_unregister_cb(struct hci_cb * cb)3027 int hci_unregister_cb(struct hci_cb *cb)
3028 {
3029 BT_DBG("%p name %s", cb, cb->name);
3030
3031 mutex_lock(&hci_cb_list_lock);
3032 list_del(&cb->list);
3033 mutex_unlock(&hci_cb_list_lock);
3034
3035 return 0;
3036 }
3037 EXPORT_SYMBOL(hci_unregister_cb);
3038
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3039 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3040 {
3041 int err;
3042
3043 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3044 skb->len);
3045
3046 /* Time stamp */
3047 __net_timestamp(skb);
3048
3049 /* Send copy to monitor */
3050 hci_send_to_monitor(hdev, skb);
3051
3052 if (atomic_read(&hdev->promisc)) {
3053 /* Send copy to the sockets */
3054 hci_send_to_sock(hdev, skb);
3055 }
3056
3057 /* Get rid of skb owner, prior to sending to the driver. */
3058 skb_orphan(skb);
3059
3060 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3061 kfree_skb(skb);
3062 return -EINVAL;
3063 }
3064
3065 if (hci_skb_pkt_type(skb) == HCI_DRV_PKT) {
3066 /* Intercept HCI Drv packet here and don't go with hdev->send
3067 * callback.
3068 */
3069 err = hci_drv_process_cmd(hdev, skb);
3070 kfree_skb(skb);
3071 return err;
3072 }
3073
3074 err = hdev->send(hdev, skb);
3075 if (err < 0) {
3076 bt_dev_err(hdev, "sending frame failed (%d)", err);
3077 kfree_skb(skb);
3078 return err;
3079 }
3080
3081 return 0;
3082 }
3083
hci_send_conn_frame(struct hci_dev * hdev,struct hci_conn * conn,struct sk_buff * skb)3084 static int hci_send_conn_frame(struct hci_dev *hdev, struct hci_conn *conn,
3085 struct sk_buff *skb)
3086 {
3087 hci_conn_tx_queue(conn, skb);
3088 return hci_send_frame(hdev, skb);
3089 }
3090
3091 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3092 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3093 const void *param)
3094 {
3095 struct sk_buff *skb;
3096
3097 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3098
3099 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3100 if (!skb) {
3101 bt_dev_err(hdev, "no memory for command");
3102 return -ENOMEM;
3103 }
3104
3105 /* Stand-alone HCI commands must be flagged as
3106 * single-command requests.
3107 */
3108 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3109
3110 skb_queue_tail(&hdev->cmd_q, skb);
3111 queue_work(hdev->workqueue, &hdev->cmd_work);
3112
3113 return 0;
3114 }
3115
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3116 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3117 const void *param)
3118 {
3119 struct sk_buff *skb;
3120
3121 if (hci_opcode_ogf(opcode) != 0x3f) {
3122 /* A controller receiving a command shall respond with either
3123 * a Command Status Event or a Command Complete Event.
3124 * Therefore, all standard HCI commands must be sent via the
3125 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3126 * Some vendors do not comply with this rule for vendor-specific
3127 * commands and do not return any event. We want to support
3128 * unresponded commands for such cases only.
3129 */
3130 bt_dev_err(hdev, "unresponded command not supported");
3131 return -EINVAL;
3132 }
3133
3134 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3135 if (!skb) {
3136 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3137 opcode);
3138 return -ENOMEM;
3139 }
3140
3141 hci_send_frame(hdev, skb);
3142
3143 return 0;
3144 }
3145 EXPORT_SYMBOL(__hci_cmd_send);
3146
3147 /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3148 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3149 {
3150 struct hci_command_hdr *hdr;
3151
3152 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3153 return NULL;
3154
3155 hdr = (void *)skb->data;
3156
3157 if (hdr->opcode != cpu_to_le16(opcode))
3158 return NULL;
3159
3160 return skb->data + HCI_COMMAND_HDR_SIZE;
3161 }
3162
3163 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3164 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3165 {
3166 void *data;
3167
3168 /* Check if opcode matches last sent command */
3169 data = hci_cmd_data(hdev->sent_cmd, opcode);
3170 if (!data)
3171 /* Check if opcode matches last request */
3172 data = hci_cmd_data(hdev->req_skb, opcode);
3173
3174 return data;
3175 }
3176
3177 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3178 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3179 {
3180 struct hci_event_hdr *hdr;
3181 int offset;
3182
3183 if (!hdev->recv_event)
3184 return NULL;
3185
3186 hdr = (void *)hdev->recv_event->data;
3187 offset = sizeof(*hdr);
3188
3189 if (hdr->evt != event) {
3190 /* In case of LE metaevent check the subevent match */
3191 if (hdr->evt == HCI_EV_LE_META) {
3192 struct hci_ev_le_meta *ev;
3193
3194 ev = (void *)hdev->recv_event->data + offset;
3195 offset += sizeof(*ev);
3196 if (ev->subevent == event)
3197 goto found;
3198 }
3199 return NULL;
3200 }
3201
3202 found:
3203 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3204
3205 return hdev->recv_event->data + offset;
3206 }
3207
3208 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3209 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3210 {
3211 struct hci_acl_hdr *hdr;
3212 int len = skb->len;
3213
3214 skb_push(skb, HCI_ACL_HDR_SIZE);
3215 skb_reset_transport_header(skb);
3216 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3217 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3218 hdr->dlen = cpu_to_le16(len);
3219 }
3220
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3221 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3222 struct sk_buff *skb, __u16 flags)
3223 {
3224 struct hci_conn *conn = chan->conn;
3225 struct hci_dev *hdev = conn->hdev;
3226 struct sk_buff *list;
3227
3228 skb->len = skb_headlen(skb);
3229 skb->data_len = 0;
3230
3231 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3232
3233 hci_add_acl_hdr(skb, conn->handle, flags);
3234
3235 list = skb_shinfo(skb)->frag_list;
3236 if (!list) {
3237 /* Non fragmented */
3238 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3239
3240 skb_queue_tail(queue, skb);
3241 } else {
3242 /* Fragmented */
3243 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3244
3245 skb_shinfo(skb)->frag_list = NULL;
3246
3247 /* Queue all fragments atomically. We need to use spin_lock_bh
3248 * here because of 6LoWPAN links, as there this function is
3249 * called from softirq and using normal spin lock could cause
3250 * deadlocks.
3251 */
3252 spin_lock_bh(&queue->lock);
3253
3254 __skb_queue_tail(queue, skb);
3255
3256 flags &= ~ACL_START;
3257 flags |= ACL_CONT;
3258 do {
3259 skb = list; list = list->next;
3260
3261 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3262 hci_add_acl_hdr(skb, conn->handle, flags);
3263
3264 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3265
3266 __skb_queue_tail(queue, skb);
3267 } while (list);
3268
3269 spin_unlock_bh(&queue->lock);
3270 }
3271
3272 bt_dev_dbg(hdev, "chan %p queued %d", chan, skb_queue_len(queue));
3273 }
3274
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3275 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3276 {
3277 struct hci_dev *hdev = chan->conn->hdev;
3278
3279 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3280
3281 hci_queue_acl(chan, &chan->data_q, skb, flags);
3282
3283 queue_work(hdev->workqueue, &hdev->tx_work);
3284 }
3285
3286 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3287 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3288 {
3289 struct hci_dev *hdev = conn->hdev;
3290 struct hci_sco_hdr hdr;
3291
3292 BT_DBG("%s len %d", hdev->name, skb->len);
3293
3294 hdr.handle = cpu_to_le16(conn->handle);
3295 hdr.dlen = skb->len;
3296
3297 skb_push(skb, HCI_SCO_HDR_SIZE);
3298 skb_reset_transport_header(skb);
3299 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3300
3301 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3302
3303 skb_queue_tail(&conn->data_q, skb);
3304
3305 bt_dev_dbg(hdev, "hcon %p queued %d", conn,
3306 skb_queue_len(&conn->data_q));
3307
3308 queue_work(hdev->workqueue, &hdev->tx_work);
3309 }
3310
3311 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3312 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3313 {
3314 struct hci_iso_hdr *hdr;
3315 int len = skb->len;
3316
3317 skb_push(skb, HCI_ISO_HDR_SIZE);
3318 skb_reset_transport_header(skb);
3319 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3320 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3321 hdr->dlen = cpu_to_le16(len);
3322 }
3323
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3324 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3325 struct sk_buff *skb)
3326 {
3327 struct hci_dev *hdev = conn->hdev;
3328 struct sk_buff *list;
3329 __u16 flags;
3330
3331 skb->len = skb_headlen(skb);
3332 skb->data_len = 0;
3333
3334 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3335
3336 list = skb_shinfo(skb)->frag_list;
3337
3338 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3339 hci_add_iso_hdr(skb, conn->handle, flags);
3340
3341 if (!list) {
3342 /* Non fragmented */
3343 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3344
3345 skb_queue_tail(queue, skb);
3346 } else {
3347 /* Fragmented */
3348 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3349
3350 skb_shinfo(skb)->frag_list = NULL;
3351
3352 __skb_queue_tail(queue, skb);
3353
3354 do {
3355 skb = list; list = list->next;
3356
3357 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3358 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3359 0x00);
3360 hci_add_iso_hdr(skb, conn->handle, flags);
3361
3362 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3363
3364 __skb_queue_tail(queue, skb);
3365 } while (list);
3366 }
3367
3368 bt_dev_dbg(hdev, "hcon %p queued %d", conn, skb_queue_len(queue));
3369 }
3370
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3371 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3372 {
3373 struct hci_dev *hdev = conn->hdev;
3374
3375 BT_DBG("%s len %d", hdev->name, skb->len);
3376
3377 hci_queue_iso(conn, &conn->data_q, skb);
3378
3379 queue_work(hdev->workqueue, &hdev->tx_work);
3380 }
3381
3382 /* ---- HCI TX task (outgoing data) ---- */
3383
3384 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3385 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3386 {
3387 struct hci_dev *hdev;
3388 int cnt, q;
3389
3390 if (!conn) {
3391 *quote = 0;
3392 return;
3393 }
3394
3395 hdev = conn->hdev;
3396
3397 switch (conn->type) {
3398 case ACL_LINK:
3399 cnt = hdev->acl_cnt;
3400 break;
3401 case SCO_LINK:
3402 case ESCO_LINK:
3403 cnt = hdev->sco_cnt;
3404 break;
3405 case LE_LINK:
3406 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3407 break;
3408 case CIS_LINK:
3409 case BIS_LINK:
3410 case PA_LINK:
3411 cnt = hdev->iso_cnt;
3412 break;
3413 default:
3414 cnt = 0;
3415 bt_dev_err(hdev, "unknown link type %d", conn->type);
3416 }
3417
3418 q = cnt / num;
3419 *quote = q ? q : 1;
3420 }
3421
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3422 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3423 int *quote)
3424 {
3425 struct hci_conn_hash *h = &hdev->conn_hash;
3426 struct hci_conn *conn = NULL, *c;
3427 unsigned int num = 0, min = ~0;
3428
3429 /* We don't have to lock device here. Connections are always
3430 * added and removed with TX task disabled. */
3431
3432 rcu_read_lock();
3433
3434 list_for_each_entry_rcu(c, &h->list, list) {
3435 if (c->type != type ||
3436 skb_queue_empty(&c->data_q))
3437 continue;
3438
3439 bt_dev_dbg(hdev, "hcon %p state %s queued %d", c,
3440 state_to_string(c->state),
3441 skb_queue_len(&c->data_q));
3442
3443 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3444 continue;
3445
3446 num++;
3447
3448 if (c->sent < min) {
3449 min = c->sent;
3450 conn = c;
3451 }
3452
3453 if (hci_conn_num(hdev, type) == num)
3454 break;
3455 }
3456
3457 rcu_read_unlock();
3458
3459 hci_quote_sent(conn, num, quote);
3460
3461 BT_DBG("conn %p quote %d", conn, *quote);
3462 return conn;
3463 }
3464
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3465 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3466 {
3467 struct hci_conn_hash *h = &hdev->conn_hash;
3468 struct hci_conn *c;
3469
3470 bt_dev_err(hdev, "link tx timeout");
3471
3472 hci_dev_lock(hdev);
3473
3474 /* Kill stalled connections */
3475 list_for_each_entry(c, &h->list, list) {
3476 if (c->type == type && c->sent) {
3477 bt_dev_err(hdev, "killing stalled connection %pMR",
3478 &c->dst);
3479 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3480 }
3481 }
3482
3483 hci_dev_unlock(hdev);
3484 }
3485
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3486 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3487 int *quote)
3488 {
3489 struct hci_conn_hash *h = &hdev->conn_hash;
3490 struct hci_chan *chan = NULL;
3491 unsigned int num = 0, min = ~0, cur_prio = 0;
3492 struct hci_conn *conn;
3493 int conn_num = 0;
3494
3495 BT_DBG("%s", hdev->name);
3496
3497 rcu_read_lock();
3498
3499 list_for_each_entry_rcu(conn, &h->list, list) {
3500 struct hci_chan *tmp;
3501
3502 if (conn->type != type)
3503 continue;
3504
3505 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3506 continue;
3507
3508 conn_num++;
3509
3510 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3511 struct sk_buff *skb;
3512
3513 if (skb_queue_empty(&tmp->data_q))
3514 continue;
3515
3516 skb = skb_peek(&tmp->data_q);
3517 if (skb->priority < cur_prio)
3518 continue;
3519
3520 if (skb->priority > cur_prio) {
3521 num = 0;
3522 min = ~0;
3523 cur_prio = skb->priority;
3524 }
3525
3526 num++;
3527
3528 if (conn->sent < min) {
3529 min = conn->sent;
3530 chan = tmp;
3531 }
3532 }
3533
3534 if (hci_conn_num(hdev, type) == conn_num)
3535 break;
3536 }
3537
3538 rcu_read_unlock();
3539
3540 if (!chan)
3541 return NULL;
3542
3543 hci_quote_sent(chan->conn, num, quote);
3544
3545 BT_DBG("chan %p quote %d", chan, *quote);
3546 return chan;
3547 }
3548
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3549 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3550 {
3551 struct hci_conn_hash *h = &hdev->conn_hash;
3552 struct hci_conn *conn;
3553 int num = 0;
3554
3555 BT_DBG("%s", hdev->name);
3556
3557 rcu_read_lock();
3558
3559 list_for_each_entry_rcu(conn, &h->list, list) {
3560 struct hci_chan *chan;
3561
3562 if (conn->type != type)
3563 continue;
3564
3565 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3566 continue;
3567
3568 num++;
3569
3570 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3571 struct sk_buff *skb;
3572
3573 if (chan->sent) {
3574 chan->sent = 0;
3575 continue;
3576 }
3577
3578 if (skb_queue_empty(&chan->data_q))
3579 continue;
3580
3581 skb = skb_peek(&chan->data_q);
3582 if (skb->priority >= HCI_PRIO_MAX - 1)
3583 continue;
3584
3585 skb->priority = HCI_PRIO_MAX - 1;
3586
3587 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3588 skb->priority);
3589 }
3590
3591 if (hci_conn_num(hdev, type) == num)
3592 break;
3593 }
3594
3595 rcu_read_unlock();
3596
3597 }
3598
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3599 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3600 {
3601 unsigned long timeout;
3602
3603 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3604 return;
3605
3606 switch (type) {
3607 case ACL_LINK:
3608 /* tx timeout must be longer than maximum link supervision
3609 * timeout (40.9 seconds)
3610 */
3611 timeout = hdev->acl_last_tx + HCI_ACL_TX_TIMEOUT;
3612 break;
3613 case LE_LINK:
3614 /* tx timeout must be longer than maximum link supervision
3615 * timeout (40.9 seconds)
3616 */
3617 timeout = hdev->le_last_tx + HCI_ACL_TX_TIMEOUT;
3618 break;
3619 case CIS_LINK:
3620 case BIS_LINK:
3621 case PA_LINK:
3622 /* tx timeout must be longer than the maximum transport latency
3623 * (8.388607 seconds)
3624 */
3625 timeout = hdev->iso_last_tx + HCI_ISO_TX_TIMEOUT;
3626 break;
3627 default:
3628 return;
3629 }
3630
3631 if (!cnt && time_after(jiffies, timeout))
3632 hci_link_tx_to(hdev, type);
3633 }
3634
3635 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev,__u8 type)3636 static void hci_sched_sco(struct hci_dev *hdev, __u8 type)
3637 {
3638 struct hci_conn *conn;
3639 struct sk_buff *skb;
3640 int quote, *cnt;
3641 unsigned int pkts = hdev->sco_pkts;
3642
3643 bt_dev_dbg(hdev, "type %u", type);
3644
3645 if (!hci_conn_num(hdev, type) || !pkts)
3646 return;
3647
3648 /* Use sco_pkts if flow control has not been enabled which will limit
3649 * the amount of buffer sent in a row.
3650 */
3651 if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
3652 cnt = &pkts;
3653 else
3654 cnt = &hdev->sco_cnt;
3655
3656 while (*cnt && (conn = hci_low_sent(hdev, type, "e))) {
3657 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3658 BT_DBG("skb %p len %d", skb, skb->len);
3659 hci_send_conn_frame(hdev, conn, skb);
3660
3661 conn->sent++;
3662 if (conn->sent == ~0)
3663 conn->sent = 0;
3664 (*cnt)--;
3665 }
3666 }
3667
3668 /* Rescheduled if all packets were sent and flow control is not enabled
3669 * as there could be more packets queued that could not be sent and
3670 * since no HCI_EV_NUM_COMP_PKTS event will be generated the reschedule
3671 * needs to be forced.
3672 */
3673 if (!pkts && !hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
3674 queue_work(hdev->workqueue, &hdev->tx_work);
3675 }
3676
hci_sched_acl_pkt(struct hci_dev * hdev)3677 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3678 {
3679 unsigned int cnt = hdev->acl_cnt;
3680 struct hci_chan *chan;
3681 struct sk_buff *skb;
3682 int quote;
3683
3684 __check_timeout(hdev, cnt, ACL_LINK);
3685
3686 while (hdev->acl_cnt &&
3687 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3688 u32 priority = (skb_peek(&chan->data_q))->priority;
3689 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3690 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3691 skb->len, skb->priority);
3692
3693 /* Stop if priority has changed */
3694 if (skb->priority < priority)
3695 break;
3696
3697 skb = skb_dequeue(&chan->data_q);
3698
3699 hci_conn_enter_active_mode(chan->conn,
3700 bt_cb(skb)->force_active);
3701
3702 hci_send_conn_frame(hdev, chan->conn, skb);
3703 hdev->acl_last_tx = jiffies;
3704
3705 hdev->acl_cnt--;
3706 chan->sent++;
3707 chan->conn->sent++;
3708
3709 /* Send pending SCO packets right away */
3710 hci_sched_sco(hdev, SCO_LINK);
3711 hci_sched_sco(hdev, ESCO_LINK);
3712 }
3713 }
3714
3715 if (cnt != hdev->acl_cnt)
3716 hci_prio_recalculate(hdev, ACL_LINK);
3717 }
3718
hci_sched_acl(struct hci_dev * hdev)3719 static void hci_sched_acl(struct hci_dev *hdev)
3720 {
3721 BT_DBG("%s", hdev->name);
3722
3723 /* No ACL link over BR/EDR controller */
3724 if (!hci_conn_num(hdev, ACL_LINK))
3725 return;
3726
3727 hci_sched_acl_pkt(hdev);
3728 }
3729
hci_sched_le(struct hci_dev * hdev)3730 static void hci_sched_le(struct hci_dev *hdev)
3731 {
3732 struct hci_chan *chan;
3733 struct sk_buff *skb;
3734 int quote, *cnt, tmp;
3735
3736 BT_DBG("%s", hdev->name);
3737
3738 if (!hci_conn_num(hdev, LE_LINK))
3739 return;
3740
3741 cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3742
3743 __check_timeout(hdev, *cnt, LE_LINK);
3744
3745 tmp = *cnt;
3746 while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3747 u32 priority = (skb_peek(&chan->data_q))->priority;
3748 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3749 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3750 skb->len, skb->priority);
3751
3752 /* Stop if priority has changed */
3753 if (skb->priority < priority)
3754 break;
3755
3756 skb = skb_dequeue(&chan->data_q);
3757
3758 hci_send_conn_frame(hdev, chan->conn, skb);
3759 hdev->le_last_tx = jiffies;
3760
3761 (*cnt)--;
3762 chan->sent++;
3763 chan->conn->sent++;
3764
3765 /* Send pending SCO packets right away */
3766 hci_sched_sco(hdev, SCO_LINK);
3767 hci_sched_sco(hdev, ESCO_LINK);
3768 }
3769 }
3770
3771 if (*cnt != tmp)
3772 hci_prio_recalculate(hdev, LE_LINK);
3773 }
3774
3775 /* Schedule iso */
hci_sched_iso(struct hci_dev * hdev,__u8 type)3776 static void hci_sched_iso(struct hci_dev *hdev, __u8 type)
3777 {
3778 struct hci_conn *conn;
3779 struct sk_buff *skb;
3780 int quote, *cnt;
3781
3782 BT_DBG("%s", hdev->name);
3783
3784 if (!hci_conn_num(hdev, type))
3785 return;
3786
3787 cnt = &hdev->iso_cnt;
3788
3789 __check_timeout(hdev, *cnt, type);
3790
3791 while (*cnt && (conn = hci_low_sent(hdev, type, "e))) {
3792 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3793 BT_DBG("skb %p len %d", skb, skb->len);
3794
3795 hci_send_conn_frame(hdev, conn, skb);
3796 hdev->iso_last_tx = jiffies;
3797
3798 conn->sent++;
3799 if (conn->sent == ~0)
3800 conn->sent = 0;
3801 (*cnt)--;
3802 }
3803 }
3804 }
3805
hci_tx_work(struct work_struct * work)3806 static void hci_tx_work(struct work_struct *work)
3807 {
3808 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3809 struct sk_buff *skb;
3810
3811 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3812 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3813
3814 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3815 /* Schedule queues and send stuff to HCI driver */
3816 hci_sched_sco(hdev, SCO_LINK);
3817 hci_sched_sco(hdev, ESCO_LINK);
3818 hci_sched_iso(hdev, CIS_LINK);
3819 hci_sched_iso(hdev, BIS_LINK);
3820 hci_sched_iso(hdev, PA_LINK);
3821 hci_sched_acl(hdev);
3822 hci_sched_le(hdev);
3823 }
3824
3825 /* Send next queued raw (unknown type) packet */
3826 while ((skb = skb_dequeue(&hdev->raw_q)))
3827 hci_send_frame(hdev, skb);
3828 }
3829
3830 /* ----- HCI RX task (incoming data processing) ----- */
3831
3832 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3833 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3834 {
3835 struct hci_acl_hdr *hdr;
3836 __u16 handle, flags;
3837 int err;
3838
3839 hdr = skb_pull_data(skb, sizeof(*hdr));
3840 if (!hdr) {
3841 bt_dev_err(hdev, "ACL packet too small");
3842 kfree_skb(skb);
3843 return;
3844 }
3845
3846 handle = __le16_to_cpu(hdr->handle);
3847 flags = hci_flags(handle);
3848 handle = hci_handle(handle);
3849
3850 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3851 handle, flags);
3852
3853 hdev->stat.acl_rx++;
3854
3855 err = l2cap_recv_acldata(hdev, handle, skb, flags);
3856 if (err == -ENOENT)
3857 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3858 handle);
3859 else if (err)
3860 bt_dev_dbg(hdev, "ACL packet recv for handle %d failed: %d",
3861 handle, err);
3862 }
3863
3864 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3865 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3866 {
3867 struct hci_sco_hdr *hdr;
3868 __u16 handle, flags;
3869 int err;
3870
3871 hdr = skb_pull_data(skb, sizeof(*hdr));
3872 if (!hdr) {
3873 bt_dev_err(hdev, "SCO packet too small");
3874 kfree_skb(skb);
3875 return;
3876 }
3877
3878 handle = __le16_to_cpu(hdr->handle);
3879 flags = hci_flags(handle);
3880 handle = hci_handle(handle);
3881
3882 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3883 handle, flags);
3884
3885 hdev->stat.sco_rx++;
3886
3887 hci_skb_pkt_status(skb) = flags & 0x03;
3888
3889 err = sco_recv_scodata(hdev, handle, skb);
3890 if (err == -ENOENT)
3891 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3892 handle);
3893 else if (err)
3894 bt_dev_dbg(hdev, "SCO packet recv for handle %d failed: %d",
3895 handle, err);
3896 }
3897
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3898 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3899 {
3900 struct hci_iso_hdr *hdr;
3901 __u16 handle, flags;
3902 int err;
3903
3904 hdr = skb_pull_data(skb, sizeof(*hdr));
3905 if (!hdr) {
3906 bt_dev_err(hdev, "ISO packet too small");
3907 kfree_skb(skb);
3908 return;
3909 }
3910
3911 handle = __le16_to_cpu(hdr->handle);
3912 flags = hci_flags(handle);
3913 handle = hci_handle(handle);
3914
3915 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3916 handle, flags);
3917
3918 err = iso_recv(hdev, handle, skb, flags);
3919 if (err == -ENOENT)
3920 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3921 handle);
3922 else if (err)
3923 bt_dev_dbg(hdev, "ISO packet recv for handle %d failed: %d",
3924 handle, err);
3925 }
3926
hci_req_is_complete(struct hci_dev * hdev)3927 static bool hci_req_is_complete(struct hci_dev *hdev)
3928 {
3929 struct sk_buff *skb;
3930
3931 skb = skb_peek(&hdev->cmd_q);
3932 if (!skb)
3933 return true;
3934
3935 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3936 }
3937
hci_resend_last(struct hci_dev * hdev)3938 static void hci_resend_last(struct hci_dev *hdev)
3939 {
3940 struct hci_command_hdr *sent;
3941 struct sk_buff *skb;
3942 u16 opcode;
3943
3944 if (!hdev->sent_cmd)
3945 return;
3946
3947 sent = (void *) hdev->sent_cmd->data;
3948 opcode = __le16_to_cpu(sent->opcode);
3949 if (opcode == HCI_OP_RESET)
3950 return;
3951
3952 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3953 if (!skb)
3954 return;
3955
3956 skb_queue_head(&hdev->cmd_q, skb);
3957 queue_work(hdev->workqueue, &hdev->cmd_work);
3958 }
3959
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3960 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3961 hci_req_complete_t *req_complete,
3962 hci_req_complete_skb_t *req_complete_skb)
3963 {
3964 struct sk_buff *skb;
3965 unsigned long flags;
3966
3967 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3968
3969 /* If the completed command doesn't match the last one that was
3970 * sent we need to do special handling of it.
3971 */
3972 if (!hci_sent_cmd_data(hdev, opcode)) {
3973 /* Some CSR based controllers generate a spontaneous
3974 * reset complete event during init and any pending
3975 * command will never be completed. In such a case we
3976 * need to resend whatever was the last sent
3977 * command.
3978 */
3979 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3980 hci_resend_last(hdev);
3981
3982 return;
3983 }
3984
3985 /* If we reach this point this event matches the last command sent */
3986 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3987
3988 /* If the command succeeded and there's still more commands in
3989 * this request the request is not yet complete.
3990 */
3991 if (!status && !hci_req_is_complete(hdev))
3992 return;
3993
3994 skb = hdev->req_skb;
3995
3996 /* If this was the last command in a request the complete
3997 * callback would be found in hdev->req_skb instead of the
3998 * command queue (hdev->cmd_q).
3999 */
4000 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
4001 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4002 return;
4003 }
4004
4005 if (skb && bt_cb(skb)->hci.req_complete) {
4006 *req_complete = bt_cb(skb)->hci.req_complete;
4007 return;
4008 }
4009
4010 /* Remove all pending commands belonging to this request */
4011 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4012 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4013 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4014 __skb_queue_head(&hdev->cmd_q, skb);
4015 break;
4016 }
4017
4018 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4019 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4020 else
4021 *req_complete = bt_cb(skb)->hci.req_complete;
4022 dev_kfree_skb_irq(skb);
4023 }
4024 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4025 }
4026
hci_rx_work(struct work_struct * work)4027 static void hci_rx_work(struct work_struct *work)
4028 {
4029 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4030 struct sk_buff *skb;
4031
4032 BT_DBG("%s", hdev->name);
4033
4034 /* The kcov_remote functions used for collecting packet parsing
4035 * coverage information from this background thread and associate
4036 * the coverage with the syscall's thread which originally injected
4037 * the packet. This helps fuzzing the kernel.
4038 */
4039 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4040 kcov_remote_start_common(skb_get_kcov_handle(skb));
4041
4042 /* Send copy to monitor */
4043 hci_send_to_monitor(hdev, skb);
4044
4045 if (atomic_read(&hdev->promisc)) {
4046 /* Send copy to the sockets */
4047 hci_send_to_sock(hdev, skb);
4048 }
4049
4050 /* If the device has been opened in HCI_USER_CHANNEL,
4051 * the userspace has exclusive access to device.
4052 * When device is HCI_INIT, we still need to process
4053 * the data packets to the driver in order
4054 * to complete its setup().
4055 */
4056 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4057 !test_bit(HCI_INIT, &hdev->flags)) {
4058 kfree_skb(skb);
4059 continue;
4060 }
4061
4062 if (test_bit(HCI_INIT, &hdev->flags)) {
4063 /* Don't process data packets in this states. */
4064 switch (hci_skb_pkt_type(skb)) {
4065 case HCI_ACLDATA_PKT:
4066 case HCI_SCODATA_PKT:
4067 case HCI_ISODATA_PKT:
4068 kfree_skb(skb);
4069 continue;
4070 }
4071 }
4072
4073 /* Process frame */
4074 switch (hci_skb_pkt_type(skb)) {
4075 case HCI_EVENT_PKT:
4076 BT_DBG("%s Event packet", hdev->name);
4077 hci_event_packet(hdev, skb);
4078 break;
4079
4080 case HCI_ACLDATA_PKT:
4081 BT_DBG("%s ACL data packet", hdev->name);
4082 hci_acldata_packet(hdev, skb);
4083 break;
4084
4085 case HCI_SCODATA_PKT:
4086 BT_DBG("%s SCO data packet", hdev->name);
4087 hci_scodata_packet(hdev, skb);
4088 break;
4089
4090 case HCI_ISODATA_PKT:
4091 BT_DBG("%s ISO data packet", hdev->name);
4092 hci_isodata_packet(hdev, skb);
4093 break;
4094
4095 default:
4096 kfree_skb(skb);
4097 break;
4098 }
4099 }
4100 }
4101
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4102 static int hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4103 {
4104 int err;
4105
4106 bt_dev_dbg(hdev, "skb %p", skb);
4107
4108 kfree_skb(hdev->sent_cmd);
4109
4110 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4111 if (!hdev->sent_cmd) {
4112 skb_queue_head(&hdev->cmd_q, skb);
4113 queue_work(hdev->workqueue, &hdev->cmd_work);
4114 return -EINVAL;
4115 }
4116
4117 if (hci_skb_opcode(skb) != HCI_OP_NOP) {
4118 err = hci_send_frame(hdev, skb);
4119 if (err < 0) {
4120 hci_cmd_sync_cancel_sync(hdev, -err);
4121 return err;
4122 }
4123 atomic_dec(&hdev->cmd_cnt);
4124 } else {
4125 err = -ENODATA;
4126 kfree_skb(skb);
4127 }
4128
4129 if (hdev->req_status == HCI_REQ_PEND &&
4130 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4131 kfree_skb(hdev->req_skb);
4132 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4133 }
4134
4135 return err;
4136 }
4137
hci_cmd_work(struct work_struct * work)4138 static void hci_cmd_work(struct work_struct *work)
4139 {
4140 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4141 struct sk_buff *skb;
4142 int err;
4143
4144 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4145 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4146
4147 /* Send queued commands */
4148 if (atomic_read(&hdev->cmd_cnt)) {
4149 skb = skb_dequeue(&hdev->cmd_q);
4150 if (!skb)
4151 return;
4152
4153 err = hci_send_cmd_sync(hdev, skb);
4154 if (err)
4155 return;
4156
4157 rcu_read_lock();
4158 if (test_bit(HCI_RESET, &hdev->flags) ||
4159 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4160 cancel_delayed_work(&hdev->cmd_timer);
4161 else
4162 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4163 HCI_CMD_TIMEOUT);
4164 rcu_read_unlock();
4165 }
4166 }
4167