1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <linux/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48 #include "hci_codec.h"
49
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
53
54 /* HCI device list */
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
57
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
61
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
64
65 /* Get HCI device by index.
66 * Device is held on return. */
__hci_dev_get(int index,int * srcu_index)67 static struct hci_dev *__hci_dev_get(int index, int *srcu_index)
68 {
69 struct hci_dev *hdev = NULL, *d;
70
71 BT_DBG("%d", index);
72
73 if (index < 0)
74 return NULL;
75
76 read_lock(&hci_dev_list_lock);
77 list_for_each_entry(d, &hci_dev_list, list) {
78 if (d->id == index) {
79 hdev = hci_dev_hold(d);
80 if (srcu_index)
81 *srcu_index = srcu_read_lock(&d->srcu);
82 break;
83 }
84 }
85 read_unlock(&hci_dev_list_lock);
86 return hdev;
87 }
88
hci_dev_get(int index)89 struct hci_dev *hci_dev_get(int index)
90 {
91 return __hci_dev_get(index, NULL);
92 }
93
hci_dev_get_srcu(int index,int * srcu_index)94 static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index)
95 {
96 return __hci_dev_get(index, srcu_index);
97 }
98
hci_dev_put_srcu(struct hci_dev * hdev,int srcu_index)99 static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index)
100 {
101 srcu_read_unlock(&hdev->srcu, srcu_index);
102 hci_dev_put(hdev);
103 }
104
105 /* ---- Inquiry support ---- */
106
hci_discovery_active(struct hci_dev * hdev)107 bool hci_discovery_active(struct hci_dev *hdev)
108 {
109 struct discovery_state *discov = &hdev->discovery;
110
111 switch (discov->state) {
112 case DISCOVERY_FINDING:
113 case DISCOVERY_RESOLVING:
114 return true;
115
116 default:
117 return false;
118 }
119 }
120
hci_discovery_set_state(struct hci_dev * hdev,int state)121 void hci_discovery_set_state(struct hci_dev *hdev, int state)
122 {
123 int old_state = hdev->discovery.state;
124
125 if (old_state == state)
126 return;
127
128 hdev->discovery.state = state;
129
130 switch (state) {
131 case DISCOVERY_STOPPED:
132 hci_update_passive_scan(hdev);
133
134 if (old_state != DISCOVERY_STARTING)
135 mgmt_discovering(hdev, 0);
136 break;
137 case DISCOVERY_STARTING:
138 break;
139 case DISCOVERY_FINDING:
140 mgmt_discovering(hdev, 1);
141 break;
142 case DISCOVERY_RESOLVING:
143 break;
144 case DISCOVERY_STOPPING:
145 break;
146 }
147
148 bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
149 }
150
hci_inquiry_cache_flush(struct hci_dev * hdev)151 void hci_inquiry_cache_flush(struct hci_dev *hdev)
152 {
153 struct discovery_state *cache = &hdev->discovery;
154 struct inquiry_entry *p, *n;
155
156 list_for_each_entry_safe(p, n, &cache->all, all) {
157 list_del(&p->all);
158 kfree(p);
159 }
160
161 INIT_LIST_HEAD(&cache->unknown);
162 INIT_LIST_HEAD(&cache->resolve);
163 }
164
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)165 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
166 bdaddr_t *bdaddr)
167 {
168 struct discovery_state *cache = &hdev->discovery;
169 struct inquiry_entry *e;
170
171 BT_DBG("cache %p, %pMR", cache, bdaddr);
172
173 list_for_each_entry(e, &cache->all, all) {
174 if (!bacmp(&e->data.bdaddr, bdaddr))
175 return e;
176 }
177
178 return NULL;
179 }
180
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)181 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
182 bdaddr_t *bdaddr)
183 {
184 struct discovery_state *cache = &hdev->discovery;
185 struct inquiry_entry *e;
186
187 BT_DBG("cache %p, %pMR", cache, bdaddr);
188
189 list_for_each_entry(e, &cache->unknown, list) {
190 if (!bacmp(&e->data.bdaddr, bdaddr))
191 return e;
192 }
193
194 return NULL;
195 }
196
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)197 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
198 bdaddr_t *bdaddr,
199 int state)
200 {
201 struct discovery_state *cache = &hdev->discovery;
202 struct inquiry_entry *e;
203
204 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
205
206 list_for_each_entry(e, &cache->resolve, list) {
207 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
208 return e;
209 if (!bacmp(&e->data.bdaddr, bdaddr))
210 return e;
211 }
212
213 return NULL;
214 }
215
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)216 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
217 struct inquiry_entry *ie)
218 {
219 struct discovery_state *cache = &hdev->discovery;
220 struct list_head *pos = &cache->resolve;
221 struct inquiry_entry *p;
222
223 list_del(&ie->list);
224
225 list_for_each_entry(p, &cache->resolve, list) {
226 if (p->name_state != NAME_PENDING &&
227 abs(p->data.rssi) >= abs(ie->data.rssi))
228 break;
229 pos = &p->list;
230 }
231
232 list_add(&ie->list, pos);
233 }
234
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)235 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
236 bool name_known)
237 {
238 struct discovery_state *cache = &hdev->discovery;
239 struct inquiry_entry *ie;
240 u32 flags = 0;
241
242 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
243
244 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
245
246 if (!data->ssp_mode)
247 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
248
249 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
250 if (ie) {
251 if (!ie->data.ssp_mode)
252 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
253
254 if (ie->name_state == NAME_NEEDED &&
255 data->rssi != ie->data.rssi) {
256 ie->data.rssi = data->rssi;
257 hci_inquiry_cache_update_resolve(hdev, ie);
258 }
259
260 goto update;
261 }
262
263 /* Entry not in the cache. Add new one. */
264 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
265 if (!ie) {
266 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
267 goto done;
268 }
269
270 list_add(&ie->all, &cache->all);
271
272 if (name_known) {
273 ie->name_state = NAME_KNOWN;
274 } else {
275 ie->name_state = NAME_NOT_KNOWN;
276 list_add(&ie->list, &cache->unknown);
277 }
278
279 update:
280 if (name_known && ie->name_state != NAME_KNOWN &&
281 ie->name_state != NAME_PENDING) {
282 ie->name_state = NAME_KNOWN;
283 list_del(&ie->list);
284 }
285
286 memcpy(&ie->data, data, sizeof(*data));
287 ie->timestamp = jiffies;
288 cache->timestamp = jiffies;
289
290 if (ie->name_state == NAME_NOT_KNOWN)
291 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
292
293 done:
294 return flags;
295 }
296
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)297 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
298 {
299 struct discovery_state *cache = &hdev->discovery;
300 struct inquiry_info *info = (struct inquiry_info *) buf;
301 struct inquiry_entry *e;
302 int copied = 0;
303
304 list_for_each_entry(e, &cache->all, all) {
305 struct inquiry_data *data = &e->data;
306
307 if (copied >= num)
308 break;
309
310 bacpy(&info->bdaddr, &data->bdaddr);
311 info->pscan_rep_mode = data->pscan_rep_mode;
312 info->pscan_period_mode = data->pscan_period_mode;
313 info->pscan_mode = data->pscan_mode;
314 memcpy(info->dev_class, data->dev_class, 3);
315 info->clock_offset = data->clock_offset;
316
317 info++;
318 copied++;
319 }
320
321 BT_DBG("cache %p, copied %d", cache, copied);
322 return copied;
323 }
324
hci_inquiry(void __user * arg)325 int hci_inquiry(void __user *arg)
326 {
327 __u8 __user *ptr = arg;
328 struct hci_inquiry_req ir;
329 struct hci_dev *hdev;
330 int err = 0, do_inquiry = 0, max_rsp;
331 __u8 *buf;
332
333 if (copy_from_user(&ir, ptr, sizeof(ir)))
334 return -EFAULT;
335
336 hdev = hci_dev_get(ir.dev_id);
337 if (!hdev)
338 return -ENODEV;
339
340 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
341 err = -EBUSY;
342 goto done;
343 }
344
345 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
346 err = -EOPNOTSUPP;
347 goto done;
348 }
349
350 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
351 err = -EOPNOTSUPP;
352 goto done;
353 }
354
355 /* Restrict maximum inquiry length to 60 seconds */
356 if (ir.length > 60) {
357 err = -EINVAL;
358 goto done;
359 }
360
361 hci_dev_lock(hdev);
362 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
363 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
364 hci_inquiry_cache_flush(hdev);
365 do_inquiry = 1;
366 }
367 hci_dev_unlock(hdev);
368
369 if (do_inquiry) {
370 hci_req_sync_lock(hdev);
371 err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
372 hci_req_sync_unlock(hdev);
373
374 if (err < 0)
375 goto done;
376
377 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
378 * cleared). If it is interrupted by a signal, return -EINTR.
379 */
380 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
381 TASK_INTERRUPTIBLE)) {
382 err = -EINTR;
383 goto done;
384 }
385 }
386
387 /* for unlimited number of responses we will use buffer with
388 * 255 entries
389 */
390 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
391
392 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
393 * copy it to the user space.
394 */
395 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
396 if (!buf) {
397 err = -ENOMEM;
398 goto done;
399 }
400
401 hci_dev_lock(hdev);
402 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
403 hci_dev_unlock(hdev);
404
405 BT_DBG("num_rsp %d", ir.num_rsp);
406
407 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
408 ptr += sizeof(ir);
409 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
410 ir.num_rsp))
411 err = -EFAULT;
412 } else
413 err = -EFAULT;
414
415 kfree(buf);
416
417 done:
418 hci_dev_put(hdev);
419 return err;
420 }
421
hci_dev_do_open(struct hci_dev * hdev)422 static int hci_dev_do_open(struct hci_dev *hdev)
423 {
424 int ret = 0;
425
426 BT_DBG("%s %p", hdev->name, hdev);
427
428 hci_req_sync_lock(hdev);
429
430 ret = hci_dev_open_sync(hdev);
431
432 hci_req_sync_unlock(hdev);
433 return ret;
434 }
435
436 /* ---- HCI ioctl helpers ---- */
437
hci_dev_open(__u16 dev)438 int hci_dev_open(__u16 dev)
439 {
440 struct hci_dev *hdev;
441 int err;
442
443 hdev = hci_dev_get(dev);
444 if (!hdev)
445 return -ENODEV;
446
447 /* Devices that are marked as unconfigured can only be powered
448 * up as user channel. Trying to bring them up as normal devices
449 * will result into a failure. Only user channel operation is
450 * possible.
451 *
452 * When this function is called for a user channel, the flag
453 * HCI_USER_CHANNEL will be set first before attempting to
454 * open the device.
455 */
456 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
457 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
458 err = -EOPNOTSUPP;
459 goto done;
460 }
461
462 /* We need to ensure that no other power on/off work is pending
463 * before proceeding to call hci_dev_do_open. This is
464 * particularly important if the setup procedure has not yet
465 * completed.
466 */
467 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
468 cancel_delayed_work(&hdev->power_off);
469
470 /* After this call it is guaranteed that the setup procedure
471 * has finished. This means that error conditions like RFKILL
472 * or no valid public or static random address apply.
473 */
474 flush_workqueue(hdev->req_workqueue);
475
476 /* For controllers not using the management interface and that
477 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
478 * so that pairing works for them. Once the management interface
479 * is in use this bit will be cleared again and userspace has
480 * to explicitly enable it.
481 */
482 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
483 !hci_dev_test_flag(hdev, HCI_MGMT))
484 hci_dev_set_flag(hdev, HCI_BONDABLE);
485
486 err = hci_dev_do_open(hdev);
487
488 done:
489 hci_dev_put(hdev);
490 return err;
491 }
492
hci_dev_do_close(struct hci_dev * hdev)493 int hci_dev_do_close(struct hci_dev *hdev)
494 {
495 int err;
496
497 BT_DBG("%s %p", hdev->name, hdev);
498
499 hci_req_sync_lock(hdev);
500
501 err = hci_dev_close_sync(hdev);
502
503 hci_req_sync_unlock(hdev);
504
505 return err;
506 }
507
hci_dev_close(__u16 dev)508 int hci_dev_close(__u16 dev)
509 {
510 struct hci_dev *hdev;
511 int err;
512
513 hdev = hci_dev_get(dev);
514 if (!hdev)
515 return -ENODEV;
516
517 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
518 err = -EBUSY;
519 goto done;
520 }
521
522 cancel_work_sync(&hdev->power_on);
523 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
524 cancel_delayed_work(&hdev->power_off);
525
526 err = hci_dev_do_close(hdev);
527
528 done:
529 hci_dev_put(hdev);
530 return err;
531 }
532
hci_dev_do_reset(struct hci_dev * hdev)533 static int hci_dev_do_reset(struct hci_dev *hdev)
534 {
535 int ret;
536
537 BT_DBG("%s %p", hdev->name, hdev);
538
539 hci_req_sync_lock(hdev);
540
541 /* Drop queues */
542 skb_queue_purge(&hdev->rx_q);
543 skb_queue_purge(&hdev->cmd_q);
544
545 /* Cancel these to avoid queueing non-chained pending work */
546 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
547 /* Wait for
548 *
549 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
550 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
551 *
552 * inside RCU section to see the flag or complete scheduling.
553 */
554 synchronize_rcu();
555 /* Explicitly cancel works in case scheduled after setting the flag. */
556 cancel_delayed_work(&hdev->cmd_timer);
557 cancel_delayed_work(&hdev->ncmd_timer);
558
559 /* Avoid potential lockdep warnings from the *_flush() calls by
560 * ensuring the workqueue is empty up front.
561 */
562 drain_workqueue(hdev->workqueue);
563
564 hci_dev_lock(hdev);
565 hci_inquiry_cache_flush(hdev);
566 hci_conn_hash_flush(hdev);
567 hci_dev_unlock(hdev);
568
569 if (hdev->flush)
570 hdev->flush(hdev);
571
572 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
573
574 atomic_set(&hdev->cmd_cnt, 1);
575 hdev->acl_cnt = 0;
576 hdev->sco_cnt = 0;
577 hdev->le_cnt = 0;
578 hdev->iso_cnt = 0;
579
580 ret = hci_reset_sync(hdev);
581
582 hci_req_sync_unlock(hdev);
583 return ret;
584 }
585
hci_dev_reset(__u16 dev)586 int hci_dev_reset(__u16 dev)
587 {
588 struct hci_dev *hdev;
589 int err, srcu_index;
590
591 hdev = hci_dev_get_srcu(dev, &srcu_index);
592 if (!hdev)
593 return -ENODEV;
594
595 if (!test_bit(HCI_UP, &hdev->flags)) {
596 err = -ENETDOWN;
597 goto done;
598 }
599
600 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
601 err = -EBUSY;
602 goto done;
603 }
604
605 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
606 err = -EOPNOTSUPP;
607 goto done;
608 }
609
610 err = hci_dev_do_reset(hdev);
611
612 done:
613 hci_dev_put_srcu(hdev, srcu_index);
614 return err;
615 }
616
hci_dev_reset_stat(__u16 dev)617 int hci_dev_reset_stat(__u16 dev)
618 {
619 struct hci_dev *hdev;
620 int ret = 0;
621
622 hdev = hci_dev_get(dev);
623 if (!hdev)
624 return -ENODEV;
625
626 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
627 ret = -EBUSY;
628 goto done;
629 }
630
631 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
632 ret = -EOPNOTSUPP;
633 goto done;
634 }
635
636 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
637
638 done:
639 hci_dev_put(hdev);
640 return ret;
641 }
642
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)643 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
644 {
645 bool conn_changed, discov_changed;
646
647 BT_DBG("%s scan 0x%02x", hdev->name, scan);
648
649 if ((scan & SCAN_PAGE))
650 conn_changed = !hci_dev_test_and_set_flag(hdev,
651 HCI_CONNECTABLE);
652 else
653 conn_changed = hci_dev_test_and_clear_flag(hdev,
654 HCI_CONNECTABLE);
655
656 if ((scan & SCAN_INQUIRY)) {
657 discov_changed = !hci_dev_test_and_set_flag(hdev,
658 HCI_DISCOVERABLE);
659 } else {
660 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
661 discov_changed = hci_dev_test_and_clear_flag(hdev,
662 HCI_DISCOVERABLE);
663 }
664
665 if (!hci_dev_test_flag(hdev, HCI_MGMT))
666 return;
667
668 if (conn_changed || discov_changed) {
669 /* In case this was disabled through mgmt */
670 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
671
672 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
673 hci_update_adv_data(hdev, hdev->cur_adv_instance);
674
675 mgmt_new_settings(hdev);
676 }
677 }
678
hci_dev_cmd(unsigned int cmd,void __user * arg)679 int hci_dev_cmd(unsigned int cmd, void __user *arg)
680 {
681 struct hci_dev *hdev;
682 struct hci_dev_req dr;
683 __le16 policy;
684 int err = 0;
685
686 if (copy_from_user(&dr, arg, sizeof(dr)))
687 return -EFAULT;
688
689 hdev = hci_dev_get(dr.dev_id);
690 if (!hdev)
691 return -ENODEV;
692
693 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
694 err = -EBUSY;
695 goto done;
696 }
697
698 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
699 err = -EOPNOTSUPP;
700 goto done;
701 }
702
703 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
704 err = -EOPNOTSUPP;
705 goto done;
706 }
707
708 switch (cmd) {
709 case HCISETAUTH:
710 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
711 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
712 break;
713
714 case HCISETENCRYPT:
715 if (!lmp_encrypt_capable(hdev)) {
716 err = -EOPNOTSUPP;
717 break;
718 }
719
720 if (!test_bit(HCI_AUTH, &hdev->flags)) {
721 /* Auth must be enabled first */
722 err = hci_cmd_sync_status(hdev,
723 HCI_OP_WRITE_AUTH_ENABLE,
724 1, &dr.dev_opt,
725 HCI_CMD_TIMEOUT);
726 if (err)
727 break;
728 }
729
730 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
731 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
732 break;
733
734 case HCISETSCAN:
735 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
736 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
737
738 /* Ensure that the connectable and discoverable states
739 * get correctly modified as this was a non-mgmt change.
740 */
741 if (!err)
742 hci_update_passive_scan_state(hdev, dr.dev_opt);
743 break;
744
745 case HCISETLINKPOL:
746 policy = cpu_to_le16(dr.dev_opt);
747
748 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
749 2, &policy, HCI_CMD_TIMEOUT);
750 break;
751
752 case HCISETLINKMODE:
753 hdev->link_mode = ((__u16) dr.dev_opt) &
754 (HCI_LM_MASTER | HCI_LM_ACCEPT);
755 break;
756
757 case HCISETPTYPE:
758 if (hdev->pkt_type == (__u16) dr.dev_opt)
759 break;
760
761 hdev->pkt_type = (__u16) dr.dev_opt;
762 mgmt_phy_configuration_changed(hdev, NULL);
763 break;
764
765 case HCISETACLMTU:
766 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
767 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
768 break;
769
770 case HCISETSCOMTU:
771 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
772 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
773 break;
774
775 default:
776 err = -EINVAL;
777 break;
778 }
779
780 done:
781 hci_dev_put(hdev);
782 return err;
783 }
784
hci_get_dev_list(void __user * arg)785 int hci_get_dev_list(void __user *arg)
786 {
787 struct hci_dev *hdev;
788 struct hci_dev_list_req *dl;
789 struct hci_dev_req *dr;
790 int n = 0, err;
791 __u16 dev_num;
792
793 if (get_user(dev_num, (__u16 __user *) arg))
794 return -EFAULT;
795
796 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
797 return -EINVAL;
798
799 dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
800 if (!dl)
801 return -ENOMEM;
802
803 dl->dev_num = dev_num;
804 dr = dl->dev_req;
805
806 read_lock(&hci_dev_list_lock);
807 list_for_each_entry(hdev, &hci_dev_list, list) {
808 unsigned long flags = hdev->flags;
809
810 /* When the auto-off is configured it means the transport
811 * is running, but in that case still indicate that the
812 * device is actually down.
813 */
814 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
815 flags &= ~BIT(HCI_UP);
816
817 dr[n].dev_id = hdev->id;
818 dr[n].dev_opt = flags;
819
820 if (++n >= dev_num)
821 break;
822 }
823 read_unlock(&hci_dev_list_lock);
824
825 dl->dev_num = n;
826 err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
827 kfree(dl);
828
829 return err ? -EFAULT : 0;
830 }
831
hci_get_dev_info(void __user * arg)832 int hci_get_dev_info(void __user *arg)
833 {
834 struct hci_dev *hdev;
835 struct hci_dev_info di;
836 unsigned long flags;
837 int err = 0;
838
839 if (copy_from_user(&di, arg, sizeof(di)))
840 return -EFAULT;
841
842 hdev = hci_dev_get(di.dev_id);
843 if (!hdev)
844 return -ENODEV;
845
846 /* When the auto-off is configured it means the transport
847 * is running, but in that case still indicate that the
848 * device is actually down.
849 */
850 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
851 flags = hdev->flags & ~BIT(HCI_UP);
852 else
853 flags = hdev->flags;
854
855 strscpy(di.name, hdev->name, sizeof(di.name));
856 di.bdaddr = hdev->bdaddr;
857 di.type = (hdev->bus & 0x0f);
858 di.flags = flags;
859 di.pkt_type = hdev->pkt_type;
860 if (lmp_bredr_capable(hdev)) {
861 di.acl_mtu = hdev->acl_mtu;
862 di.acl_pkts = hdev->acl_pkts;
863 di.sco_mtu = hdev->sco_mtu;
864 di.sco_pkts = hdev->sco_pkts;
865 } else {
866 di.acl_mtu = hdev->le_mtu;
867 di.acl_pkts = hdev->le_pkts;
868 di.sco_mtu = 0;
869 di.sco_pkts = 0;
870 }
871 di.link_policy = hdev->link_policy;
872 di.link_mode = hdev->link_mode;
873
874 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
875 memcpy(&di.features, &hdev->features, sizeof(di.features));
876
877 if (copy_to_user(arg, &di, sizeof(di)))
878 err = -EFAULT;
879
880 hci_dev_put(hdev);
881
882 return err;
883 }
884
885 /* ---- Interface to HCI drivers ---- */
886
hci_dev_do_poweroff(struct hci_dev * hdev)887 static int hci_dev_do_poweroff(struct hci_dev *hdev)
888 {
889 int err;
890
891 BT_DBG("%s %p", hdev->name, hdev);
892
893 hci_req_sync_lock(hdev);
894
895 err = hci_set_powered_sync(hdev, false);
896
897 hci_req_sync_unlock(hdev);
898
899 return err;
900 }
901
hci_rfkill_set_block(void * data,bool blocked)902 static int hci_rfkill_set_block(void *data, bool blocked)
903 {
904 struct hci_dev *hdev = data;
905 int err;
906
907 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
908
909 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
910 return -EBUSY;
911
912 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
913 return 0;
914
915 if (blocked) {
916 hci_dev_set_flag(hdev, HCI_RFKILLED);
917
918 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
919 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
920 err = hci_dev_do_poweroff(hdev);
921 if (err) {
922 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
923 err);
924
925 /* Make sure the device is still closed even if
926 * anything during power off sequence (eg.
927 * disconnecting devices) failed.
928 */
929 hci_dev_do_close(hdev);
930 }
931 }
932 } else {
933 hci_dev_clear_flag(hdev, HCI_RFKILLED);
934 }
935
936 return 0;
937 }
938
939 static const struct rfkill_ops hci_rfkill_ops = {
940 .set_block = hci_rfkill_set_block,
941 };
942
hci_power_on(struct work_struct * work)943 static void hci_power_on(struct work_struct *work)
944 {
945 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
946 int err;
947
948 BT_DBG("%s", hdev->name);
949
950 if (test_bit(HCI_UP, &hdev->flags) &&
951 hci_dev_test_flag(hdev, HCI_MGMT) &&
952 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
953 cancel_delayed_work(&hdev->power_off);
954 err = hci_powered_update_sync(hdev);
955 mgmt_power_on(hdev, err);
956 return;
957 }
958
959 err = hci_dev_do_open(hdev);
960 if (err < 0) {
961 hci_dev_lock(hdev);
962 mgmt_set_powered_failed(hdev, err);
963 hci_dev_unlock(hdev);
964 return;
965 }
966
967 /* During the HCI setup phase, a few error conditions are
968 * ignored and they need to be checked now. If they are still
969 * valid, it is important to turn the device back off.
970 */
971 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
972 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
973 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
974 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
975 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
976 hci_dev_do_close(hdev);
977 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
978 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
979 HCI_AUTO_OFF_TIMEOUT);
980 }
981
982 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
983 /* For unconfigured devices, set the HCI_RAW flag
984 * so that userspace can easily identify them.
985 */
986 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
987 set_bit(HCI_RAW, &hdev->flags);
988
989 /* For fully configured devices, this will send
990 * the Index Added event. For unconfigured devices,
991 * it will send Unconfigued Index Added event.
992 *
993 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
994 * and no event will be send.
995 */
996 mgmt_index_added(hdev);
997 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
998 /* When the controller is now configured, then it
999 * is important to clear the HCI_RAW flag.
1000 */
1001 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1002 clear_bit(HCI_RAW, &hdev->flags);
1003
1004 /* Powering on the controller with HCI_CONFIG set only
1005 * happens with the transition from unconfigured to
1006 * configured. This will send the Index Added event.
1007 */
1008 mgmt_index_added(hdev);
1009 }
1010 }
1011
hci_power_off(struct work_struct * work)1012 static void hci_power_off(struct work_struct *work)
1013 {
1014 struct hci_dev *hdev = container_of(work, struct hci_dev,
1015 power_off.work);
1016
1017 BT_DBG("%s", hdev->name);
1018
1019 hci_dev_do_close(hdev);
1020 }
1021
hci_error_reset(struct work_struct * work)1022 static void hci_error_reset(struct work_struct *work)
1023 {
1024 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1025
1026 hci_dev_hold(hdev);
1027 BT_DBG("%s", hdev->name);
1028
1029 if (hdev->hw_error)
1030 hdev->hw_error(hdev, hdev->hw_error_code);
1031 else
1032 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1033
1034 if (!hci_dev_do_close(hdev))
1035 hci_dev_do_open(hdev);
1036
1037 hci_dev_put(hdev);
1038 }
1039
hci_uuids_clear(struct hci_dev * hdev)1040 void hci_uuids_clear(struct hci_dev *hdev)
1041 {
1042 struct bt_uuid *uuid, *tmp;
1043
1044 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1045 list_del(&uuid->list);
1046 kfree(uuid);
1047 }
1048 }
1049
hci_link_keys_clear(struct hci_dev * hdev)1050 void hci_link_keys_clear(struct hci_dev *hdev)
1051 {
1052 struct link_key *key, *tmp;
1053
1054 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1055 list_del_rcu(&key->list);
1056 kfree_rcu(key, rcu);
1057 }
1058 }
1059
hci_smp_ltks_clear(struct hci_dev * hdev)1060 void hci_smp_ltks_clear(struct hci_dev *hdev)
1061 {
1062 struct smp_ltk *k, *tmp;
1063
1064 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1065 list_del_rcu(&k->list);
1066 kfree_rcu(k, rcu);
1067 }
1068 }
1069
hci_smp_irks_clear(struct hci_dev * hdev)1070 void hci_smp_irks_clear(struct hci_dev *hdev)
1071 {
1072 struct smp_irk *k, *tmp;
1073
1074 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1075 list_del_rcu(&k->list);
1076 kfree_rcu(k, rcu);
1077 }
1078 }
1079
hci_blocked_keys_clear(struct hci_dev * hdev)1080 void hci_blocked_keys_clear(struct hci_dev *hdev)
1081 {
1082 struct blocked_key *b, *tmp;
1083
1084 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1085 list_del_rcu(&b->list);
1086 kfree_rcu(b, rcu);
1087 }
1088 }
1089
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1090 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1091 {
1092 bool blocked = false;
1093 struct blocked_key *b;
1094
1095 rcu_read_lock();
1096 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1097 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1098 blocked = true;
1099 break;
1100 }
1101 }
1102
1103 rcu_read_unlock();
1104 return blocked;
1105 }
1106
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1107 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1108 {
1109 struct link_key *k;
1110
1111 rcu_read_lock();
1112 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1113 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1114 rcu_read_unlock();
1115
1116 if (hci_is_blocked_key(hdev,
1117 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1118 k->val)) {
1119 bt_dev_warn_ratelimited(hdev,
1120 "Link key blocked for %pMR",
1121 &k->bdaddr);
1122 return NULL;
1123 }
1124
1125 return k;
1126 }
1127 }
1128 rcu_read_unlock();
1129
1130 return NULL;
1131 }
1132
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1133 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1134 u8 key_type, u8 old_key_type)
1135 {
1136 /* Legacy key */
1137 if (key_type < 0x03)
1138 return true;
1139
1140 /* Debug keys are insecure so don't store them persistently */
1141 if (key_type == HCI_LK_DEBUG_COMBINATION)
1142 return false;
1143
1144 /* Changed combination key and there's no previous one */
1145 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1146 return false;
1147
1148 /* Security mode 3 case */
1149 if (!conn)
1150 return true;
1151
1152 /* BR/EDR key derived using SC from an LE link */
1153 if (conn->type == LE_LINK)
1154 return true;
1155
1156 /* Neither local nor remote side had no-bonding as requirement */
1157 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1158 return true;
1159
1160 /* Local side had dedicated bonding as requirement */
1161 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1162 return true;
1163
1164 /* Remote side had dedicated bonding as requirement */
1165 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1166 return true;
1167
1168 /* If none of the above criteria match, then don't store the key
1169 * persistently */
1170 return false;
1171 }
1172
ltk_role(u8 type)1173 static u8 ltk_role(u8 type)
1174 {
1175 if (type == SMP_LTK)
1176 return HCI_ROLE_MASTER;
1177
1178 return HCI_ROLE_SLAVE;
1179 }
1180
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1181 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1182 u8 addr_type, u8 role)
1183 {
1184 struct smp_ltk *k;
1185
1186 rcu_read_lock();
1187 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1188 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1189 continue;
1190
1191 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1192 rcu_read_unlock();
1193
1194 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1195 k->val)) {
1196 bt_dev_warn_ratelimited(hdev,
1197 "LTK blocked for %pMR",
1198 &k->bdaddr);
1199 return NULL;
1200 }
1201
1202 return k;
1203 }
1204 }
1205 rcu_read_unlock();
1206
1207 return NULL;
1208 }
1209
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1210 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1211 {
1212 struct smp_irk *irk_to_return = NULL;
1213 struct smp_irk *irk;
1214
1215 rcu_read_lock();
1216 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1217 if (!bacmp(&irk->rpa, rpa)) {
1218 irk_to_return = irk;
1219 goto done;
1220 }
1221 }
1222
1223 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1224 if (smp_irk_matches(hdev, irk->val, rpa)) {
1225 bacpy(&irk->rpa, rpa);
1226 irk_to_return = irk;
1227 goto done;
1228 }
1229 }
1230
1231 done:
1232 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1233 irk_to_return->val)) {
1234 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1235 &irk_to_return->bdaddr);
1236 irk_to_return = NULL;
1237 }
1238
1239 rcu_read_unlock();
1240
1241 return irk_to_return;
1242 }
1243
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1244 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1245 u8 addr_type)
1246 {
1247 struct smp_irk *irk_to_return = NULL;
1248 struct smp_irk *irk;
1249
1250 /* Identity Address must be public or static random */
1251 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1252 return NULL;
1253
1254 rcu_read_lock();
1255 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1256 if (addr_type == irk->addr_type &&
1257 bacmp(bdaddr, &irk->bdaddr) == 0) {
1258 irk_to_return = irk;
1259 break;
1260 }
1261 }
1262
1263 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1264 irk_to_return->val)) {
1265 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1266 &irk_to_return->bdaddr);
1267 irk_to_return = NULL;
1268 }
1269
1270 rcu_read_unlock();
1271
1272 return irk_to_return;
1273 }
1274
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1275 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1276 bdaddr_t *bdaddr, u8 *val, u8 type,
1277 u8 pin_len, bool *persistent)
1278 {
1279 struct link_key *key, *old_key;
1280 u8 old_key_type;
1281
1282 old_key = hci_find_link_key(hdev, bdaddr);
1283 if (old_key) {
1284 old_key_type = old_key->type;
1285 key = old_key;
1286 } else {
1287 old_key_type = conn ? conn->key_type : 0xff;
1288 key = kzalloc(sizeof(*key), GFP_KERNEL);
1289 if (!key)
1290 return NULL;
1291 list_add_rcu(&key->list, &hdev->link_keys);
1292 }
1293
1294 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1295
1296 /* Some buggy controller combinations generate a changed
1297 * combination key for legacy pairing even when there's no
1298 * previous key */
1299 if (type == HCI_LK_CHANGED_COMBINATION &&
1300 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1301 type = HCI_LK_COMBINATION;
1302 if (conn)
1303 conn->key_type = type;
1304 }
1305
1306 bacpy(&key->bdaddr, bdaddr);
1307 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1308 key->pin_len = pin_len;
1309
1310 if (type == HCI_LK_CHANGED_COMBINATION)
1311 key->type = old_key_type;
1312 else
1313 key->type = type;
1314
1315 if (persistent)
1316 *persistent = hci_persistent_key(hdev, conn, type,
1317 old_key_type);
1318
1319 return key;
1320 }
1321
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1322 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1323 u8 addr_type, u8 type, u8 authenticated,
1324 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1325 {
1326 struct smp_ltk *key, *old_key;
1327 u8 role = ltk_role(type);
1328
1329 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1330 if (old_key)
1331 key = old_key;
1332 else {
1333 key = kzalloc(sizeof(*key), GFP_KERNEL);
1334 if (!key)
1335 return NULL;
1336 list_add_rcu(&key->list, &hdev->long_term_keys);
1337 }
1338
1339 bacpy(&key->bdaddr, bdaddr);
1340 key->bdaddr_type = addr_type;
1341 memcpy(key->val, tk, sizeof(key->val));
1342 key->authenticated = authenticated;
1343 key->ediv = ediv;
1344 key->rand = rand;
1345 key->enc_size = enc_size;
1346 key->type = type;
1347
1348 return key;
1349 }
1350
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1351 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1352 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1353 {
1354 struct smp_irk *irk;
1355
1356 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1357 if (!irk) {
1358 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1359 if (!irk)
1360 return NULL;
1361
1362 bacpy(&irk->bdaddr, bdaddr);
1363 irk->addr_type = addr_type;
1364
1365 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1366 }
1367
1368 memcpy(irk->val, val, 16);
1369 bacpy(&irk->rpa, rpa);
1370
1371 return irk;
1372 }
1373
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1374 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1375 {
1376 struct link_key *key;
1377
1378 key = hci_find_link_key(hdev, bdaddr);
1379 if (!key)
1380 return -ENOENT;
1381
1382 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1383
1384 list_del_rcu(&key->list);
1385 kfree_rcu(key, rcu);
1386
1387 return 0;
1388 }
1389
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1390 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1391 {
1392 struct smp_ltk *k, *tmp;
1393 int removed = 0;
1394
1395 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1396 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1397 continue;
1398
1399 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1400
1401 list_del_rcu(&k->list);
1402 kfree_rcu(k, rcu);
1403 removed++;
1404 }
1405
1406 return removed ? 0 : -ENOENT;
1407 }
1408
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1409 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1410 {
1411 struct smp_irk *k, *tmp;
1412
1413 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1414 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1415 continue;
1416
1417 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1418
1419 list_del_rcu(&k->list);
1420 kfree_rcu(k, rcu);
1421 }
1422 }
1423
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1424 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1425 {
1426 struct smp_ltk *k;
1427 struct smp_irk *irk;
1428 u8 addr_type;
1429
1430 if (type == BDADDR_BREDR) {
1431 if (hci_find_link_key(hdev, bdaddr))
1432 return true;
1433 return false;
1434 }
1435
1436 /* Convert to HCI addr type which struct smp_ltk uses */
1437 if (type == BDADDR_LE_PUBLIC)
1438 addr_type = ADDR_LE_DEV_PUBLIC;
1439 else
1440 addr_type = ADDR_LE_DEV_RANDOM;
1441
1442 irk = hci_get_irk(hdev, bdaddr, addr_type);
1443 if (irk) {
1444 bdaddr = &irk->bdaddr;
1445 addr_type = irk->addr_type;
1446 }
1447
1448 rcu_read_lock();
1449 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1450 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1451 rcu_read_unlock();
1452 return true;
1453 }
1454 }
1455 rcu_read_unlock();
1456
1457 return false;
1458 }
1459
1460 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1461 static void hci_cmd_timeout(struct work_struct *work)
1462 {
1463 struct hci_dev *hdev = container_of(work, struct hci_dev,
1464 cmd_timer.work);
1465
1466 if (hdev->req_skb) {
1467 u16 opcode = hci_skb_opcode(hdev->req_skb);
1468
1469 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1470
1471 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1472 } else {
1473 bt_dev_err(hdev, "command tx timeout");
1474 }
1475
1476 if (hdev->reset)
1477 hdev->reset(hdev);
1478
1479 atomic_set(&hdev->cmd_cnt, 1);
1480 queue_work(hdev->workqueue, &hdev->cmd_work);
1481 }
1482
1483 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1484 static void hci_ncmd_timeout(struct work_struct *work)
1485 {
1486 struct hci_dev *hdev = container_of(work, struct hci_dev,
1487 ncmd_timer.work);
1488
1489 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1490
1491 /* During HCI_INIT phase no events can be injected if the ncmd timer
1492 * triggers since the procedure has its own timeout handling.
1493 */
1494 if (test_bit(HCI_INIT, &hdev->flags))
1495 return;
1496
1497 /* This is an irrecoverable state, inject hardware error event */
1498 hci_reset_dev(hdev);
1499 }
1500
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1501 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1502 bdaddr_t *bdaddr, u8 bdaddr_type)
1503 {
1504 struct oob_data *data;
1505
1506 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1507 if (bacmp(bdaddr, &data->bdaddr) != 0)
1508 continue;
1509 if (data->bdaddr_type != bdaddr_type)
1510 continue;
1511 return data;
1512 }
1513
1514 return NULL;
1515 }
1516
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1517 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1518 u8 bdaddr_type)
1519 {
1520 struct oob_data *data;
1521
1522 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1523 if (!data)
1524 return -ENOENT;
1525
1526 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1527
1528 list_del(&data->list);
1529 kfree(data);
1530
1531 return 0;
1532 }
1533
hci_remote_oob_data_clear(struct hci_dev * hdev)1534 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1535 {
1536 struct oob_data *data, *n;
1537
1538 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1539 list_del(&data->list);
1540 kfree(data);
1541 }
1542 }
1543
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1544 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1545 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1546 u8 *hash256, u8 *rand256)
1547 {
1548 struct oob_data *data;
1549
1550 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1551 if (!data) {
1552 data = kmalloc(sizeof(*data), GFP_KERNEL);
1553 if (!data)
1554 return -ENOMEM;
1555
1556 bacpy(&data->bdaddr, bdaddr);
1557 data->bdaddr_type = bdaddr_type;
1558 list_add(&data->list, &hdev->remote_oob_data);
1559 }
1560
1561 if (hash192 && rand192) {
1562 memcpy(data->hash192, hash192, sizeof(data->hash192));
1563 memcpy(data->rand192, rand192, sizeof(data->rand192));
1564 if (hash256 && rand256)
1565 data->present = 0x03;
1566 } else {
1567 memset(data->hash192, 0, sizeof(data->hash192));
1568 memset(data->rand192, 0, sizeof(data->rand192));
1569 if (hash256 && rand256)
1570 data->present = 0x02;
1571 else
1572 data->present = 0x00;
1573 }
1574
1575 if (hash256 && rand256) {
1576 memcpy(data->hash256, hash256, sizeof(data->hash256));
1577 memcpy(data->rand256, rand256, sizeof(data->rand256));
1578 } else {
1579 memset(data->hash256, 0, sizeof(data->hash256));
1580 memset(data->rand256, 0, sizeof(data->rand256));
1581 if (hash192 && rand192)
1582 data->present = 0x01;
1583 }
1584
1585 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1586
1587 return 0;
1588 }
1589
1590 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1591 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1592 {
1593 struct adv_info *adv_instance;
1594
1595 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1596 if (adv_instance->instance == instance)
1597 return adv_instance;
1598 }
1599
1600 return NULL;
1601 }
1602
1603 /* This function requires the caller holds hdev->lock */
hci_find_adv_sid(struct hci_dev * hdev,u8 sid)1604 struct adv_info *hci_find_adv_sid(struct hci_dev *hdev, u8 sid)
1605 {
1606 struct adv_info *adv;
1607
1608 list_for_each_entry(adv, &hdev->adv_instances, list) {
1609 if (adv->sid == sid)
1610 return adv;
1611 }
1612
1613 return NULL;
1614 }
1615
1616 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1617 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1618 {
1619 struct adv_info *cur_instance;
1620
1621 cur_instance = hci_find_adv_instance(hdev, instance);
1622 if (!cur_instance)
1623 return NULL;
1624
1625 if (cur_instance == list_last_entry(&hdev->adv_instances,
1626 struct adv_info, list))
1627 return list_first_entry(&hdev->adv_instances,
1628 struct adv_info, list);
1629 else
1630 return list_next_entry(cur_instance, list);
1631 }
1632
1633 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1634 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1635 {
1636 struct adv_info *adv_instance;
1637
1638 adv_instance = hci_find_adv_instance(hdev, instance);
1639 if (!adv_instance)
1640 return -ENOENT;
1641
1642 BT_DBG("%s removing %dMR", hdev->name, instance);
1643
1644 if (hdev->cur_adv_instance == instance) {
1645 if (hdev->adv_instance_timeout) {
1646 cancel_delayed_work(&hdev->adv_instance_expire);
1647 hdev->adv_instance_timeout = 0;
1648 }
1649 hdev->cur_adv_instance = 0x00;
1650 }
1651
1652 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1653
1654 list_del(&adv_instance->list);
1655 kfree(adv_instance);
1656
1657 hdev->adv_instance_cnt--;
1658
1659 return 0;
1660 }
1661
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1662 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1663 {
1664 struct adv_info *adv_instance, *n;
1665
1666 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1667 adv_instance->rpa_expired = rpa_expired;
1668 }
1669
1670 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1671 void hci_adv_instances_clear(struct hci_dev *hdev)
1672 {
1673 struct adv_info *adv_instance, *n;
1674
1675 if (hdev->adv_instance_timeout) {
1676 disable_delayed_work(&hdev->adv_instance_expire);
1677 hdev->adv_instance_timeout = 0;
1678 }
1679
1680 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1681 disable_delayed_work_sync(&adv_instance->rpa_expired_cb);
1682 list_del(&adv_instance->list);
1683 kfree(adv_instance);
1684 }
1685
1686 hdev->adv_instance_cnt = 0;
1687 hdev->cur_adv_instance = 0x00;
1688 }
1689
adv_instance_rpa_expired(struct work_struct * work)1690 static void adv_instance_rpa_expired(struct work_struct *work)
1691 {
1692 struct adv_info *adv_instance = container_of(work, struct adv_info,
1693 rpa_expired_cb.work);
1694
1695 BT_DBG("");
1696
1697 adv_instance->rpa_expired = true;
1698 }
1699
1700 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1701 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1702 u32 flags, u16 adv_data_len, u8 *adv_data,
1703 u16 scan_rsp_len, u8 *scan_rsp_data,
1704 u16 timeout, u16 duration, s8 tx_power,
1705 u32 min_interval, u32 max_interval,
1706 u8 mesh_handle)
1707 {
1708 struct adv_info *adv;
1709
1710 adv = hci_find_adv_instance(hdev, instance);
1711 if (adv) {
1712 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1713 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1714 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1715 } else {
1716 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1717 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1718 return ERR_PTR(-EOVERFLOW);
1719
1720 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1721 if (!adv)
1722 return ERR_PTR(-ENOMEM);
1723
1724 adv->pending = true;
1725 adv->instance = instance;
1726
1727 /* If controller support only one set and the instance is set to
1728 * 1 then there is no option other than using handle 0x00.
1729 */
1730 if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1731 adv->handle = 0x00;
1732 else
1733 adv->handle = instance;
1734
1735 list_add(&adv->list, &hdev->adv_instances);
1736 hdev->adv_instance_cnt++;
1737 }
1738
1739 adv->flags = flags;
1740 adv->min_interval = min_interval;
1741 adv->max_interval = max_interval;
1742 adv->tx_power = tx_power;
1743 /* Defining a mesh_handle changes the timing units to ms,
1744 * rather than seconds, and ties the instance to the requested
1745 * mesh_tx queue.
1746 */
1747 adv->mesh = mesh_handle;
1748
1749 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1750 scan_rsp_len, scan_rsp_data);
1751
1752 adv->timeout = timeout;
1753 adv->remaining_time = timeout;
1754
1755 if (duration == 0)
1756 adv->duration = hdev->def_multi_adv_rotation_duration;
1757 else
1758 adv->duration = duration;
1759
1760 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1761
1762 BT_DBG("%s for %dMR", hdev->name, instance);
1763
1764 return adv;
1765 }
1766
1767 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u8 sid,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1768 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u8 sid,
1769 u32 flags, u8 data_len, u8 *data,
1770 u32 min_interval, u32 max_interval)
1771 {
1772 struct adv_info *adv;
1773
1774 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1775 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1776 min_interval, max_interval, 0);
1777 if (IS_ERR(adv))
1778 return adv;
1779
1780 adv->sid = sid;
1781 adv->periodic = true;
1782 adv->per_adv_data_len = data_len;
1783
1784 if (data)
1785 memcpy(adv->per_adv_data, data, data_len);
1786
1787 return adv;
1788 }
1789
1790 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1791 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1792 u16 adv_data_len, u8 *adv_data,
1793 u16 scan_rsp_len, u8 *scan_rsp_data)
1794 {
1795 struct adv_info *adv;
1796
1797 adv = hci_find_adv_instance(hdev, instance);
1798
1799 /* If advertisement doesn't exist, we can't modify its data */
1800 if (!adv)
1801 return -ENOENT;
1802
1803 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1804 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1805 memcpy(adv->adv_data, adv_data, adv_data_len);
1806 adv->adv_data_len = adv_data_len;
1807 adv->adv_data_changed = true;
1808 }
1809
1810 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1811 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1812 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1813 adv->scan_rsp_len = scan_rsp_len;
1814 adv->scan_rsp_changed = true;
1815 }
1816
1817 /* Mark as changed if there are flags which would affect it */
1818 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1819 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1820 adv->scan_rsp_changed = true;
1821
1822 return 0;
1823 }
1824
1825 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1826 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1827 {
1828 u32 flags;
1829 struct adv_info *adv;
1830
1831 if (instance == 0x00) {
1832 /* Instance 0 always manages the "Tx Power" and "Flags"
1833 * fields
1834 */
1835 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1836
1837 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1838 * corresponds to the "connectable" instance flag.
1839 */
1840 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1841 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1842
1843 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1844 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1845 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1846 flags |= MGMT_ADV_FLAG_DISCOV;
1847
1848 return flags;
1849 }
1850
1851 adv = hci_find_adv_instance(hdev, instance);
1852
1853 /* Return 0 when we got an invalid instance identifier. */
1854 if (!adv)
1855 return 0;
1856
1857 return adv->flags;
1858 }
1859
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1860 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1861 {
1862 struct adv_info *adv;
1863
1864 /* Instance 0x00 always set local name */
1865 if (instance == 0x00)
1866 return true;
1867
1868 adv = hci_find_adv_instance(hdev, instance);
1869 if (!adv)
1870 return false;
1871
1872 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1873 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1874 return true;
1875
1876 return adv->scan_rsp_len ? true : false;
1877 }
1878
1879 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1880 void hci_adv_monitors_clear(struct hci_dev *hdev)
1881 {
1882 struct adv_monitor *monitor;
1883 int handle;
1884
1885 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1886 hci_free_adv_monitor(hdev, monitor);
1887
1888 idr_destroy(&hdev->adv_monitors_idr);
1889 }
1890
1891 /* Frees the monitor structure and do some bookkeepings.
1892 * This function requires the caller holds hdev->lock.
1893 */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1894 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1895 {
1896 struct adv_pattern *pattern;
1897 struct adv_pattern *tmp;
1898
1899 if (!monitor)
1900 return;
1901
1902 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1903 list_del(&pattern->list);
1904 kfree(pattern);
1905 }
1906
1907 if (monitor->handle)
1908 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1909
1910 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED)
1911 hdev->adv_monitors_cnt--;
1912
1913 kfree(monitor);
1914 }
1915
1916 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1917 * also attempts to forward the request to the controller.
1918 * This function requires the caller holds hci_req_sync_lock.
1919 */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1920 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1921 {
1922 int min, max, handle;
1923 int status = 0;
1924
1925 if (!monitor)
1926 return -EINVAL;
1927
1928 hci_dev_lock(hdev);
1929
1930 min = HCI_MIN_ADV_MONITOR_HANDLE;
1931 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1932 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1933 GFP_KERNEL);
1934
1935 hci_dev_unlock(hdev);
1936
1937 if (handle < 0)
1938 return handle;
1939
1940 monitor->handle = handle;
1941
1942 if (!hdev_is_powered(hdev))
1943 return status;
1944
1945 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1946 case HCI_ADV_MONITOR_EXT_NONE:
1947 bt_dev_dbg(hdev, "add monitor %d status %d",
1948 monitor->handle, status);
1949 /* Message was not forwarded to controller - not an error */
1950 break;
1951
1952 case HCI_ADV_MONITOR_EXT_MSFT:
1953 status = msft_add_monitor_pattern(hdev, monitor);
1954 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1955 handle, status);
1956 break;
1957 }
1958
1959 return status;
1960 }
1961
1962 /* Attempts to tell the controller and free the monitor. If somehow the
1963 * controller doesn't have a corresponding handle, remove anyway.
1964 * This function requires the caller holds hci_req_sync_lock.
1965 */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1966 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1967 struct adv_monitor *monitor)
1968 {
1969 int status = 0;
1970 int handle;
1971
1972 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1973 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1974 bt_dev_dbg(hdev, "remove monitor %d status %d",
1975 monitor->handle, status);
1976 goto free_monitor;
1977
1978 case HCI_ADV_MONITOR_EXT_MSFT:
1979 handle = monitor->handle;
1980 status = msft_remove_monitor(hdev, monitor);
1981 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1982 handle, status);
1983 break;
1984 }
1985
1986 /* In case no matching handle registered, just free the monitor */
1987 if (status == -ENOENT)
1988 goto free_monitor;
1989
1990 return status;
1991
1992 free_monitor:
1993 if (status == -ENOENT)
1994 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1995 monitor->handle);
1996 hci_free_adv_monitor(hdev, monitor);
1997
1998 return status;
1999 }
2000
2001 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)2002 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2003 {
2004 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2005
2006 if (!monitor)
2007 return -EINVAL;
2008
2009 return hci_remove_adv_monitor(hdev, monitor);
2010 }
2011
2012 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)2013 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2014 {
2015 struct adv_monitor *monitor;
2016 int idr_next_id = 0;
2017 int status = 0;
2018
2019 while (1) {
2020 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2021 if (!monitor)
2022 break;
2023
2024 status = hci_remove_adv_monitor(hdev, monitor);
2025 if (status)
2026 return status;
2027
2028 idr_next_id++;
2029 }
2030
2031 return status;
2032 }
2033
2034 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2035 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2036 {
2037 return !idr_is_empty(&hdev->adv_monitors_idr);
2038 }
2039
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2040 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2041 {
2042 if (msft_monitor_supported(hdev))
2043 return HCI_ADV_MONITOR_EXT_MSFT;
2044
2045 return HCI_ADV_MONITOR_EXT_NONE;
2046 }
2047
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2048 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2049 bdaddr_t *bdaddr, u8 type)
2050 {
2051 struct bdaddr_list *b;
2052
2053 list_for_each_entry(b, bdaddr_list, list) {
2054 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2055 return b;
2056 }
2057
2058 return NULL;
2059 }
2060
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2061 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2062 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2063 u8 type)
2064 {
2065 struct bdaddr_list_with_irk *b;
2066
2067 list_for_each_entry(b, bdaddr_list, list) {
2068 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2069 return b;
2070 }
2071
2072 return NULL;
2073 }
2074
2075 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2076 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2077 bdaddr_t *bdaddr, u8 type)
2078 {
2079 struct bdaddr_list_with_flags *b;
2080
2081 list_for_each_entry(b, bdaddr_list, list) {
2082 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2083 return b;
2084 }
2085
2086 return NULL;
2087 }
2088
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2089 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2090 {
2091 struct bdaddr_list *b, *n;
2092
2093 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2094 list_del(&b->list);
2095 kfree(b);
2096 }
2097 }
2098
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2099 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2100 {
2101 struct bdaddr_list *entry;
2102
2103 if (!bacmp(bdaddr, BDADDR_ANY))
2104 return -EBADF;
2105
2106 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2107 return -EEXIST;
2108
2109 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2110 if (!entry)
2111 return -ENOMEM;
2112
2113 bacpy(&entry->bdaddr, bdaddr);
2114 entry->bdaddr_type = type;
2115
2116 list_add(&entry->list, list);
2117
2118 return 0;
2119 }
2120
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2121 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2122 u8 type, u8 *peer_irk, u8 *local_irk)
2123 {
2124 struct bdaddr_list_with_irk *entry;
2125
2126 if (!bacmp(bdaddr, BDADDR_ANY))
2127 return -EBADF;
2128
2129 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2130 return -EEXIST;
2131
2132 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2133 if (!entry)
2134 return -ENOMEM;
2135
2136 bacpy(&entry->bdaddr, bdaddr);
2137 entry->bdaddr_type = type;
2138
2139 if (peer_irk)
2140 memcpy(entry->peer_irk, peer_irk, 16);
2141
2142 if (local_irk)
2143 memcpy(entry->local_irk, local_irk, 16);
2144
2145 list_add(&entry->list, list);
2146
2147 return 0;
2148 }
2149
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2150 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2151 u8 type, u32 flags)
2152 {
2153 struct bdaddr_list_with_flags *entry;
2154
2155 if (!bacmp(bdaddr, BDADDR_ANY))
2156 return -EBADF;
2157
2158 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2159 return -EEXIST;
2160
2161 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2162 if (!entry)
2163 return -ENOMEM;
2164
2165 bacpy(&entry->bdaddr, bdaddr);
2166 entry->bdaddr_type = type;
2167 entry->flags = flags;
2168
2169 list_add(&entry->list, list);
2170
2171 return 0;
2172 }
2173
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2174 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2175 {
2176 struct bdaddr_list *entry;
2177
2178 if (!bacmp(bdaddr, BDADDR_ANY)) {
2179 hci_bdaddr_list_clear(list);
2180 return 0;
2181 }
2182
2183 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2184 if (!entry)
2185 return -ENOENT;
2186
2187 list_del(&entry->list);
2188 kfree(entry);
2189
2190 return 0;
2191 }
2192
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2193 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2194 u8 type)
2195 {
2196 struct bdaddr_list_with_irk *entry;
2197
2198 if (!bacmp(bdaddr, BDADDR_ANY)) {
2199 hci_bdaddr_list_clear(list);
2200 return 0;
2201 }
2202
2203 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2204 if (!entry)
2205 return -ENOENT;
2206
2207 list_del(&entry->list);
2208 kfree(entry);
2209
2210 return 0;
2211 }
2212
2213 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2214 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2215 bdaddr_t *addr, u8 addr_type)
2216 {
2217 struct hci_conn_params *params;
2218
2219 list_for_each_entry(params, &hdev->le_conn_params, list) {
2220 if (bacmp(¶ms->addr, addr) == 0 &&
2221 params->addr_type == addr_type) {
2222 return params;
2223 }
2224 }
2225
2226 return NULL;
2227 }
2228
2229 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2230 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2231 bdaddr_t *addr, u8 addr_type)
2232 {
2233 struct hci_conn_params *param;
2234
2235 rcu_read_lock();
2236
2237 list_for_each_entry_rcu(param, list, action) {
2238 if (bacmp(¶m->addr, addr) == 0 &&
2239 param->addr_type == addr_type) {
2240 rcu_read_unlock();
2241 return param;
2242 }
2243 }
2244
2245 rcu_read_unlock();
2246
2247 return NULL;
2248 }
2249
2250 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2251 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2252 {
2253 if (list_empty(¶m->action))
2254 return;
2255
2256 list_del_rcu(¶m->action);
2257 synchronize_rcu();
2258 INIT_LIST_HEAD(¶m->action);
2259 }
2260
2261 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2262 void hci_pend_le_list_add(struct hci_conn_params *param,
2263 struct list_head *list)
2264 {
2265 list_add_rcu(¶m->action, list);
2266 }
2267
2268 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2269 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2270 bdaddr_t *addr, u8 addr_type)
2271 {
2272 struct hci_conn_params *params;
2273
2274 params = hci_conn_params_lookup(hdev, addr, addr_type);
2275 if (params)
2276 return params;
2277
2278 params = kzalloc(sizeof(*params), GFP_KERNEL);
2279 if (!params) {
2280 bt_dev_err(hdev, "out of memory");
2281 return NULL;
2282 }
2283
2284 bacpy(¶ms->addr, addr);
2285 params->addr_type = addr_type;
2286
2287 list_add(¶ms->list, &hdev->le_conn_params);
2288 INIT_LIST_HEAD(¶ms->action);
2289
2290 params->conn_min_interval = hdev->le_conn_min_interval;
2291 params->conn_max_interval = hdev->le_conn_max_interval;
2292 params->conn_latency = hdev->le_conn_latency;
2293 params->supervision_timeout = hdev->le_supv_timeout;
2294 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2295
2296 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2297
2298 return params;
2299 }
2300
hci_conn_params_free(struct hci_conn_params * params)2301 void hci_conn_params_free(struct hci_conn_params *params)
2302 {
2303 hci_pend_le_list_del_init(params);
2304
2305 if (params->conn) {
2306 hci_conn_drop(params->conn);
2307 hci_conn_put(params->conn);
2308 }
2309
2310 list_del(¶ms->list);
2311 kfree(params);
2312 }
2313
2314 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2315 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2316 {
2317 struct hci_conn_params *params;
2318
2319 params = hci_conn_params_lookup(hdev, addr, addr_type);
2320 if (!params)
2321 return;
2322
2323 hci_conn_params_free(params);
2324
2325 hci_update_passive_scan(hdev);
2326
2327 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2328 }
2329
2330 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2331 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2332 {
2333 struct hci_conn_params *params, *tmp;
2334
2335 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2336 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2337 continue;
2338
2339 /* If trying to establish one time connection to disabled
2340 * device, leave the params, but mark them as just once.
2341 */
2342 if (params->explicit_connect) {
2343 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2344 continue;
2345 }
2346
2347 hci_conn_params_free(params);
2348 }
2349
2350 BT_DBG("All LE disabled connection parameters were removed");
2351 }
2352
2353 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2354 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2355 {
2356 struct hci_conn_params *params, *tmp;
2357
2358 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2359 hci_conn_params_free(params);
2360
2361 BT_DBG("All LE connection parameters were removed");
2362 }
2363
2364 /* Copy the Identity Address of the controller.
2365 *
2366 * If the controller has a public BD_ADDR, then by default use that one.
2367 * If this is a LE only controller without a public address, default to
2368 * the static random address.
2369 *
2370 * For debugging purposes it is possible to force controllers with a
2371 * public address to use the static random address instead.
2372 *
2373 * In case BR/EDR has been disabled on a dual-mode controller and
2374 * userspace has configured a static address, then that address
2375 * becomes the identity address instead of the public BR/EDR address.
2376 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2377 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2378 u8 *bdaddr_type)
2379 {
2380 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2381 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2382 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2383 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2384 bacpy(bdaddr, &hdev->static_addr);
2385 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2386 } else {
2387 bacpy(bdaddr, &hdev->bdaddr);
2388 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2389 }
2390 }
2391
hci_clear_wake_reason(struct hci_dev * hdev)2392 static void hci_clear_wake_reason(struct hci_dev *hdev)
2393 {
2394 hci_dev_lock(hdev);
2395
2396 hdev->wake_reason = 0;
2397 bacpy(&hdev->wake_addr, BDADDR_ANY);
2398 hdev->wake_addr_type = 0;
2399
2400 hci_dev_unlock(hdev);
2401 }
2402
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2403 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2404 void *data)
2405 {
2406 struct hci_dev *hdev =
2407 container_of(nb, struct hci_dev, suspend_notifier);
2408 int ret = 0;
2409
2410 /* Userspace has full control of this device. Do nothing. */
2411 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2412 return NOTIFY_DONE;
2413
2414 /* To avoid a potential race with hci_unregister_dev. */
2415 hci_dev_hold(hdev);
2416
2417 switch (action) {
2418 case PM_HIBERNATION_PREPARE:
2419 case PM_SUSPEND_PREPARE:
2420 ret = hci_suspend_dev(hdev);
2421 break;
2422 case PM_POST_HIBERNATION:
2423 case PM_POST_SUSPEND:
2424 ret = hci_resume_dev(hdev);
2425 break;
2426 }
2427
2428 if (ret)
2429 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2430 action, ret);
2431
2432 hci_dev_put(hdev);
2433 return NOTIFY_DONE;
2434 }
2435
2436 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2437 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2438 {
2439 struct hci_dev *hdev;
2440 unsigned int alloc_size;
2441
2442 alloc_size = sizeof(*hdev);
2443 if (sizeof_priv) {
2444 /* Fixme: May need ALIGN-ment? */
2445 alloc_size += sizeof_priv;
2446 }
2447
2448 hdev = kzalloc(alloc_size, GFP_KERNEL);
2449 if (!hdev)
2450 return NULL;
2451
2452 if (init_srcu_struct(&hdev->srcu)) {
2453 kfree(hdev);
2454 return NULL;
2455 }
2456
2457 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2458 hdev->esco_type = (ESCO_HV1);
2459 hdev->link_mode = (HCI_LM_ACCEPT);
2460 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2461 hdev->io_capability = 0x03; /* No Input No Output */
2462 hdev->manufacturer = 0xffff; /* Default to internal use */
2463 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2464 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2465 hdev->adv_instance_cnt = 0;
2466 hdev->cur_adv_instance = 0x00;
2467 hdev->adv_instance_timeout = 0;
2468
2469 hdev->advmon_allowlist_duration = 300;
2470 hdev->advmon_no_filter_duration = 500;
2471 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2472
2473 hdev->sniff_max_interval = 800;
2474 hdev->sniff_min_interval = 80;
2475
2476 hdev->le_adv_channel_map = 0x07;
2477 hdev->le_adv_min_interval = 0x0800;
2478 hdev->le_adv_max_interval = 0x0800;
2479 hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2480 hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2481 hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2482 hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2483 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2484 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2485 hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2486 hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2487 hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2488 hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2489 hdev->le_conn_min_interval = 0x0018;
2490 hdev->le_conn_max_interval = 0x0028;
2491 hdev->le_conn_latency = 0x0000;
2492 hdev->le_supv_timeout = 0x002a;
2493 hdev->le_def_tx_len = 0x001b;
2494 hdev->le_def_tx_time = 0x0148;
2495 hdev->le_max_tx_len = 0x001b;
2496 hdev->le_max_tx_time = 0x0148;
2497 hdev->le_max_rx_len = 0x001b;
2498 hdev->le_max_rx_time = 0x0148;
2499 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2500 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2501 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2502 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2503 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2504 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2505 hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2506 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2507 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2508
2509 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2510 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2511 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2512 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2513 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2514 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2515
2516 /* default 1.28 sec page scan */
2517 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2518 hdev->def_page_scan_int = 0x0800;
2519 hdev->def_page_scan_window = 0x0012;
2520
2521 mutex_init(&hdev->lock);
2522 mutex_init(&hdev->req_lock);
2523 mutex_init(&hdev->mgmt_pending_lock);
2524
2525 ida_init(&hdev->unset_handle_ida);
2526
2527 INIT_LIST_HEAD(&hdev->mesh_pending);
2528 INIT_LIST_HEAD(&hdev->mgmt_pending);
2529 INIT_LIST_HEAD(&hdev->reject_list);
2530 INIT_LIST_HEAD(&hdev->accept_list);
2531 INIT_LIST_HEAD(&hdev->uuids);
2532 INIT_LIST_HEAD(&hdev->link_keys);
2533 INIT_LIST_HEAD(&hdev->long_term_keys);
2534 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2535 INIT_LIST_HEAD(&hdev->remote_oob_data);
2536 INIT_LIST_HEAD(&hdev->le_accept_list);
2537 INIT_LIST_HEAD(&hdev->le_resolv_list);
2538 INIT_LIST_HEAD(&hdev->le_conn_params);
2539 INIT_LIST_HEAD(&hdev->pend_le_conns);
2540 INIT_LIST_HEAD(&hdev->pend_le_reports);
2541 INIT_LIST_HEAD(&hdev->conn_hash.list);
2542 INIT_LIST_HEAD(&hdev->adv_instances);
2543 INIT_LIST_HEAD(&hdev->blocked_keys);
2544 INIT_LIST_HEAD(&hdev->monitored_devices);
2545
2546 INIT_LIST_HEAD(&hdev->local_codecs);
2547 INIT_WORK(&hdev->rx_work, hci_rx_work);
2548 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2549 INIT_WORK(&hdev->tx_work, hci_tx_work);
2550 INIT_WORK(&hdev->power_on, hci_power_on);
2551 INIT_WORK(&hdev->error_reset, hci_error_reset);
2552
2553 hci_cmd_sync_init(hdev);
2554
2555 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2556
2557 skb_queue_head_init(&hdev->rx_q);
2558 skb_queue_head_init(&hdev->cmd_q);
2559 skb_queue_head_init(&hdev->raw_q);
2560
2561 init_waitqueue_head(&hdev->req_wait_q);
2562
2563 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2564 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2565
2566 hci_devcd_setup(hdev);
2567
2568 hci_init_sysfs(hdev);
2569 discovery_init(hdev);
2570
2571 return hdev;
2572 }
2573 EXPORT_SYMBOL(hci_alloc_dev_priv);
2574
2575 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2576 void hci_free_dev(struct hci_dev *hdev)
2577 {
2578 /* will free via device release */
2579 put_device(&hdev->dev);
2580 }
2581 EXPORT_SYMBOL(hci_free_dev);
2582
2583 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2584 int hci_register_dev(struct hci_dev *hdev)
2585 {
2586 int id, error;
2587
2588 if (!hdev->open || !hdev->close || !hdev->send)
2589 return -EINVAL;
2590
2591 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2592 if (id < 0)
2593 return id;
2594
2595 error = dev_set_name(&hdev->dev, "hci%u", id);
2596 if (error)
2597 return error;
2598
2599 hdev->name = dev_name(&hdev->dev);
2600 hdev->id = id;
2601
2602 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2603
2604 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2605 if (!hdev->workqueue) {
2606 error = -ENOMEM;
2607 goto err;
2608 }
2609
2610 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2611 hdev->name);
2612 if (!hdev->req_workqueue) {
2613 destroy_workqueue(hdev->workqueue);
2614 error = -ENOMEM;
2615 goto err;
2616 }
2617
2618 if (!IS_ERR_OR_NULL(bt_debugfs))
2619 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2620
2621 error = device_add(&hdev->dev);
2622 if (error < 0)
2623 goto err_wqueue;
2624
2625 hci_leds_init(hdev);
2626
2627 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2628 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2629 hdev);
2630 if (hdev->rfkill) {
2631 if (rfkill_register(hdev->rfkill) < 0) {
2632 rfkill_destroy(hdev->rfkill);
2633 hdev->rfkill = NULL;
2634 }
2635 }
2636
2637 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2638 hci_dev_set_flag(hdev, HCI_RFKILLED);
2639
2640 hci_dev_set_flag(hdev, HCI_SETUP);
2641 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2642
2643 /* Assume BR/EDR support until proven otherwise (such as
2644 * through reading supported features during init.
2645 */
2646 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2647
2648 write_lock(&hci_dev_list_lock);
2649 list_add(&hdev->list, &hci_dev_list);
2650 write_unlock(&hci_dev_list_lock);
2651
2652 /* Devices that are marked for raw-only usage are unconfigured
2653 * and should not be included in normal operation.
2654 */
2655 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
2656 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2657
2658 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2659 * callback.
2660 */
2661 if (hdev->wakeup)
2662 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2663
2664 hci_sock_dev_event(hdev, HCI_DEV_REG);
2665 hci_dev_hold(hdev);
2666
2667 error = hci_register_suspend_notifier(hdev);
2668 if (error)
2669 BT_WARN("register suspend notifier failed error:%d\n", error);
2670
2671 queue_work(hdev->req_workqueue, &hdev->power_on);
2672
2673 idr_init(&hdev->adv_monitors_idr);
2674 msft_register(hdev);
2675
2676 return id;
2677
2678 err_wqueue:
2679 debugfs_remove_recursive(hdev->debugfs);
2680 destroy_workqueue(hdev->workqueue);
2681 destroy_workqueue(hdev->req_workqueue);
2682 err:
2683 ida_free(&hci_index_ida, hdev->id);
2684
2685 return error;
2686 }
2687 EXPORT_SYMBOL(hci_register_dev);
2688
2689 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2690 void hci_unregister_dev(struct hci_dev *hdev)
2691 {
2692 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2693
2694 mutex_lock(&hdev->unregister_lock);
2695 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2696 mutex_unlock(&hdev->unregister_lock);
2697
2698 write_lock(&hci_dev_list_lock);
2699 list_del(&hdev->list);
2700 write_unlock(&hci_dev_list_lock);
2701
2702 synchronize_srcu(&hdev->srcu);
2703 cleanup_srcu_struct(&hdev->srcu);
2704
2705 disable_work_sync(&hdev->rx_work);
2706 disable_work_sync(&hdev->cmd_work);
2707 disable_work_sync(&hdev->tx_work);
2708 disable_work_sync(&hdev->power_on);
2709 disable_work_sync(&hdev->error_reset);
2710
2711 hci_cmd_sync_clear(hdev);
2712
2713 hci_unregister_suspend_notifier(hdev);
2714
2715 hci_dev_do_close(hdev);
2716
2717 if (!test_bit(HCI_INIT, &hdev->flags) &&
2718 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2719 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2720 hci_dev_lock(hdev);
2721 mgmt_index_removed(hdev);
2722 hci_dev_unlock(hdev);
2723 }
2724
2725 /* mgmt_index_removed should take care of emptying the
2726 * pending list */
2727 BUG_ON(!list_empty(&hdev->mgmt_pending));
2728
2729 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2730
2731 if (hdev->rfkill) {
2732 rfkill_unregister(hdev->rfkill);
2733 rfkill_destroy(hdev->rfkill);
2734 }
2735
2736 device_del(&hdev->dev);
2737 /* Actual cleanup is deferred until hci_release_dev(). */
2738 hci_dev_put(hdev);
2739 }
2740 EXPORT_SYMBOL(hci_unregister_dev);
2741
2742 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2743 void hci_release_dev(struct hci_dev *hdev)
2744 {
2745 debugfs_remove_recursive(hdev->debugfs);
2746 kfree_const(hdev->hw_info);
2747 kfree_const(hdev->fw_info);
2748
2749 destroy_workqueue(hdev->workqueue);
2750 destroy_workqueue(hdev->req_workqueue);
2751
2752 hci_dev_lock(hdev);
2753 hci_bdaddr_list_clear(&hdev->reject_list);
2754 hci_bdaddr_list_clear(&hdev->accept_list);
2755 hci_uuids_clear(hdev);
2756 hci_link_keys_clear(hdev);
2757 hci_smp_ltks_clear(hdev);
2758 hci_smp_irks_clear(hdev);
2759 hci_remote_oob_data_clear(hdev);
2760 hci_adv_instances_clear(hdev);
2761 hci_adv_monitors_clear(hdev);
2762 hci_bdaddr_list_clear(&hdev->le_accept_list);
2763 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2764 hci_conn_params_clear_all(hdev);
2765 hci_discovery_filter_clear(hdev);
2766 hci_blocked_keys_clear(hdev);
2767 hci_codec_list_clear(&hdev->local_codecs);
2768 msft_release(hdev);
2769 hci_dev_unlock(hdev);
2770
2771 ida_destroy(&hdev->unset_handle_ida);
2772 ida_free(&hci_index_ida, hdev->id);
2773 kfree_skb(hdev->sent_cmd);
2774 kfree_skb(hdev->req_skb);
2775 kfree_skb(hdev->recv_event);
2776 kfree(hdev);
2777 }
2778 EXPORT_SYMBOL(hci_release_dev);
2779
hci_register_suspend_notifier(struct hci_dev * hdev)2780 int hci_register_suspend_notifier(struct hci_dev *hdev)
2781 {
2782 int ret = 0;
2783
2784 if (!hdev->suspend_notifier.notifier_call &&
2785 !hci_test_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER)) {
2786 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2787 ret = register_pm_notifier(&hdev->suspend_notifier);
2788 }
2789
2790 return ret;
2791 }
2792
hci_unregister_suspend_notifier(struct hci_dev * hdev)2793 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2794 {
2795 int ret = 0;
2796
2797 if (hdev->suspend_notifier.notifier_call) {
2798 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2799 if (!ret)
2800 hdev->suspend_notifier.notifier_call = NULL;
2801 }
2802
2803 return ret;
2804 }
2805
2806 /* Cancel ongoing command synchronously:
2807 *
2808 * - Cancel command timer
2809 * - Reset command counter
2810 * - Cancel command request
2811 */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2812 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2813 {
2814 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2815
2816 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
2817 disable_delayed_work_sync(&hdev->cmd_timer);
2818 disable_delayed_work_sync(&hdev->ncmd_timer);
2819 } else {
2820 cancel_delayed_work_sync(&hdev->cmd_timer);
2821 cancel_delayed_work_sync(&hdev->ncmd_timer);
2822 }
2823
2824 atomic_set(&hdev->cmd_cnt, 1);
2825
2826 hci_cmd_sync_cancel_sync(hdev, err);
2827 }
2828
2829 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2830 int hci_suspend_dev(struct hci_dev *hdev)
2831 {
2832 int ret;
2833
2834 bt_dev_dbg(hdev, "");
2835
2836 /* Suspend should only act on when powered. */
2837 if (!hdev_is_powered(hdev) ||
2838 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2839 return 0;
2840
2841 /* If powering down don't attempt to suspend */
2842 if (mgmt_powering_down(hdev))
2843 return 0;
2844
2845 /* Cancel potentially blocking sync operation before suspend */
2846 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2847
2848 hci_req_sync_lock(hdev);
2849 ret = hci_suspend_sync(hdev);
2850 hci_req_sync_unlock(hdev);
2851
2852 hci_clear_wake_reason(hdev);
2853 mgmt_suspending(hdev, hdev->suspend_state);
2854
2855 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2856 return ret;
2857 }
2858 EXPORT_SYMBOL(hci_suspend_dev);
2859
2860 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2861 int hci_resume_dev(struct hci_dev *hdev)
2862 {
2863 int ret;
2864
2865 bt_dev_dbg(hdev, "");
2866
2867 /* Resume should only act on when powered. */
2868 if (!hdev_is_powered(hdev) ||
2869 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2870 return 0;
2871
2872 /* If powering down don't attempt to resume */
2873 if (mgmt_powering_down(hdev))
2874 return 0;
2875
2876 hci_req_sync_lock(hdev);
2877 ret = hci_resume_sync(hdev);
2878 hci_req_sync_unlock(hdev);
2879
2880 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2881 hdev->wake_addr_type);
2882
2883 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2884 return ret;
2885 }
2886 EXPORT_SYMBOL(hci_resume_dev);
2887
2888 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2889 int hci_reset_dev(struct hci_dev *hdev)
2890 {
2891 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2892 struct sk_buff *skb;
2893
2894 skb = bt_skb_alloc(3, GFP_ATOMIC);
2895 if (!skb)
2896 return -ENOMEM;
2897
2898 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2899 skb_put_data(skb, hw_err, 3);
2900
2901 bt_dev_err(hdev, "Injecting HCI hardware error event");
2902
2903 /* Send Hardware Error to upper stack */
2904 return hci_recv_frame(hdev, skb);
2905 }
2906 EXPORT_SYMBOL(hci_reset_dev);
2907
hci_dev_classify_pkt_type(struct hci_dev * hdev,struct sk_buff * skb)2908 static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
2909 {
2910 if (hdev->classify_pkt_type)
2911 return hdev->classify_pkt_type(hdev, skb);
2912
2913 return hci_skb_pkt_type(skb);
2914 }
2915
2916 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2917 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2918 {
2919 u8 dev_pkt_type;
2920
2921 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2922 && !test_bit(HCI_INIT, &hdev->flags))) {
2923 kfree_skb(skb);
2924 return -ENXIO;
2925 }
2926
2927 /* Check if the driver agree with packet type classification */
2928 dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
2929 if (hci_skb_pkt_type(skb) != dev_pkt_type) {
2930 hci_skb_pkt_type(skb) = dev_pkt_type;
2931 }
2932
2933 switch (hci_skb_pkt_type(skb)) {
2934 case HCI_EVENT_PKT:
2935 break;
2936 case HCI_ACLDATA_PKT:
2937 /* Detect if ISO packet has been sent as ACL */
2938 if (hci_conn_num(hdev, CIS_LINK) ||
2939 hci_conn_num(hdev, BIS_LINK) ||
2940 hci_conn_num(hdev, PA_LINK)) {
2941 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2942 __u8 type;
2943
2944 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2945 if (type == CIS_LINK || type == BIS_LINK ||
2946 type == PA_LINK)
2947 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2948 }
2949 break;
2950 case HCI_SCODATA_PKT:
2951 break;
2952 case HCI_ISODATA_PKT:
2953 break;
2954 case HCI_DRV_PKT:
2955 break;
2956 default:
2957 kfree_skb(skb);
2958 return -EINVAL;
2959 }
2960
2961 /* Incoming skb */
2962 bt_cb(skb)->incoming = 1;
2963
2964 /* Time stamp */
2965 __net_timestamp(skb);
2966
2967 skb_queue_tail(&hdev->rx_q, skb);
2968 queue_work(hdev->workqueue, &hdev->rx_work);
2969
2970 return 0;
2971 }
2972 EXPORT_SYMBOL(hci_recv_frame);
2973
2974 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2975 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2976 {
2977 /* Mark as diagnostic packet */
2978 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2979
2980 /* Time stamp */
2981 __net_timestamp(skb);
2982
2983 skb_queue_tail(&hdev->rx_q, skb);
2984 queue_work(hdev->workqueue, &hdev->rx_work);
2985
2986 return 0;
2987 }
2988 EXPORT_SYMBOL(hci_recv_diag);
2989
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2990 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2991 {
2992 va_list vargs;
2993
2994 va_start(vargs, fmt);
2995 kfree_const(hdev->hw_info);
2996 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2997 va_end(vargs);
2998 }
2999 EXPORT_SYMBOL(hci_set_hw_info);
3000
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)3001 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3002 {
3003 va_list vargs;
3004
3005 va_start(vargs, fmt);
3006 kfree_const(hdev->fw_info);
3007 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3008 va_end(vargs);
3009 }
3010 EXPORT_SYMBOL(hci_set_fw_info);
3011
3012 /* ---- Interface to upper protocols ---- */
3013
hci_register_cb(struct hci_cb * cb)3014 int hci_register_cb(struct hci_cb *cb)
3015 {
3016 BT_DBG("%p name %s", cb, cb->name);
3017
3018 mutex_lock(&hci_cb_list_lock);
3019 list_add_tail(&cb->list, &hci_cb_list);
3020 mutex_unlock(&hci_cb_list_lock);
3021
3022 return 0;
3023 }
3024 EXPORT_SYMBOL(hci_register_cb);
3025
hci_unregister_cb(struct hci_cb * cb)3026 int hci_unregister_cb(struct hci_cb *cb)
3027 {
3028 BT_DBG("%p name %s", cb, cb->name);
3029
3030 mutex_lock(&hci_cb_list_lock);
3031 list_del(&cb->list);
3032 mutex_unlock(&hci_cb_list_lock);
3033
3034 return 0;
3035 }
3036 EXPORT_SYMBOL(hci_unregister_cb);
3037
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3038 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3039 {
3040 int err;
3041
3042 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3043 skb->len);
3044
3045 /* Time stamp */
3046 __net_timestamp(skb);
3047
3048 /* Send copy to monitor */
3049 hci_send_to_monitor(hdev, skb);
3050
3051 if (atomic_read(&hdev->promisc)) {
3052 /* Send copy to the sockets */
3053 hci_send_to_sock(hdev, skb);
3054 }
3055
3056 /* Get rid of skb owner, prior to sending to the driver. */
3057 skb_orphan(skb);
3058
3059 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3060 kfree_skb(skb);
3061 return -EINVAL;
3062 }
3063
3064 if (hci_skb_pkt_type(skb) == HCI_DRV_PKT) {
3065 /* Intercept HCI Drv packet here and don't go with hdev->send
3066 * callback.
3067 */
3068 err = hci_drv_process_cmd(hdev, skb);
3069 kfree_skb(skb);
3070 return err;
3071 }
3072
3073 err = hdev->send(hdev, skb);
3074 if (err < 0) {
3075 bt_dev_err(hdev, "sending frame failed (%d)", err);
3076 kfree_skb(skb);
3077 return err;
3078 }
3079
3080 return 0;
3081 }
3082
hci_send_conn_frame(struct hci_dev * hdev,struct hci_conn * conn,struct sk_buff * skb)3083 static int hci_send_conn_frame(struct hci_dev *hdev, struct hci_conn *conn,
3084 struct sk_buff *skb)
3085 {
3086 hci_conn_tx_queue(conn, skb);
3087 return hci_send_frame(hdev, skb);
3088 }
3089
3090 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3091 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3092 const void *param)
3093 {
3094 struct sk_buff *skb;
3095
3096 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3097
3098 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3099 if (!skb) {
3100 bt_dev_err(hdev, "no memory for command");
3101 return -ENOMEM;
3102 }
3103
3104 /* Stand-alone HCI commands must be flagged as
3105 * single-command requests.
3106 */
3107 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3108
3109 skb_queue_tail(&hdev->cmd_q, skb);
3110 queue_work(hdev->workqueue, &hdev->cmd_work);
3111
3112 return 0;
3113 }
3114
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3115 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3116 const void *param)
3117 {
3118 struct sk_buff *skb;
3119
3120 if (hci_opcode_ogf(opcode) != 0x3f) {
3121 /* A controller receiving a command shall respond with either
3122 * a Command Status Event or a Command Complete Event.
3123 * Therefore, all standard HCI commands must be sent via the
3124 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3125 * Some vendors do not comply with this rule for vendor-specific
3126 * commands and do not return any event. We want to support
3127 * unresponded commands for such cases only.
3128 */
3129 bt_dev_err(hdev, "unresponded command not supported");
3130 return -EINVAL;
3131 }
3132
3133 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3134 if (!skb) {
3135 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3136 opcode);
3137 return -ENOMEM;
3138 }
3139
3140 hci_send_frame(hdev, skb);
3141
3142 return 0;
3143 }
3144 EXPORT_SYMBOL(__hci_cmd_send);
3145
3146 /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3147 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3148 {
3149 struct hci_command_hdr *hdr;
3150
3151 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3152 return NULL;
3153
3154 hdr = (void *)skb->data;
3155
3156 if (hdr->opcode != cpu_to_le16(opcode))
3157 return NULL;
3158
3159 return skb->data + HCI_COMMAND_HDR_SIZE;
3160 }
3161
3162 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3163 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3164 {
3165 void *data;
3166
3167 /* Check if opcode matches last sent command */
3168 data = hci_cmd_data(hdev->sent_cmd, opcode);
3169 if (!data)
3170 /* Check if opcode matches last request */
3171 data = hci_cmd_data(hdev->req_skb, opcode);
3172
3173 return data;
3174 }
3175
3176 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3177 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3178 {
3179 struct hci_event_hdr *hdr;
3180 int offset;
3181
3182 if (!hdev->recv_event)
3183 return NULL;
3184
3185 hdr = (void *)hdev->recv_event->data;
3186 offset = sizeof(*hdr);
3187
3188 if (hdr->evt != event) {
3189 /* In case of LE metaevent check the subevent match */
3190 if (hdr->evt == HCI_EV_LE_META) {
3191 struct hci_ev_le_meta *ev;
3192
3193 ev = (void *)hdev->recv_event->data + offset;
3194 offset += sizeof(*ev);
3195 if (ev->subevent == event)
3196 goto found;
3197 }
3198 return NULL;
3199 }
3200
3201 found:
3202 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3203
3204 return hdev->recv_event->data + offset;
3205 }
3206
3207 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3208 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3209 {
3210 struct hci_acl_hdr *hdr;
3211 int len = skb->len;
3212
3213 skb_push(skb, HCI_ACL_HDR_SIZE);
3214 skb_reset_transport_header(skb);
3215 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3216 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3217 hdr->dlen = cpu_to_le16(len);
3218 }
3219
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3220 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3221 struct sk_buff *skb, __u16 flags)
3222 {
3223 struct hci_conn *conn = chan->conn;
3224 struct hci_dev *hdev = conn->hdev;
3225 struct sk_buff *list;
3226
3227 skb->len = skb_headlen(skb);
3228 skb->data_len = 0;
3229
3230 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3231
3232 hci_add_acl_hdr(skb, conn->handle, flags);
3233
3234 list = skb_shinfo(skb)->frag_list;
3235 if (!list) {
3236 /* Non fragmented */
3237 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3238
3239 skb_queue_tail(queue, skb);
3240 } else {
3241 /* Fragmented */
3242 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3243
3244 skb_shinfo(skb)->frag_list = NULL;
3245
3246 /* Queue all fragments atomically. We need to use spin_lock_bh
3247 * here because of 6LoWPAN links, as there this function is
3248 * called from softirq and using normal spin lock could cause
3249 * deadlocks.
3250 */
3251 spin_lock_bh(&queue->lock);
3252
3253 __skb_queue_tail(queue, skb);
3254
3255 flags &= ~ACL_START;
3256 flags |= ACL_CONT;
3257 do {
3258 skb = list; list = list->next;
3259
3260 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3261 hci_add_acl_hdr(skb, conn->handle, flags);
3262
3263 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3264
3265 __skb_queue_tail(queue, skb);
3266 } while (list);
3267
3268 spin_unlock_bh(&queue->lock);
3269 }
3270 }
3271
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3272 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3273 {
3274 struct hci_dev *hdev = chan->conn->hdev;
3275
3276 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3277
3278 hci_queue_acl(chan, &chan->data_q, skb, flags);
3279
3280 queue_work(hdev->workqueue, &hdev->tx_work);
3281 }
3282
3283 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3284 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3285 {
3286 struct hci_dev *hdev = conn->hdev;
3287 struct hci_sco_hdr hdr;
3288
3289 BT_DBG("%s len %d", hdev->name, skb->len);
3290
3291 hdr.handle = cpu_to_le16(conn->handle);
3292 hdr.dlen = skb->len;
3293
3294 skb_push(skb, HCI_SCO_HDR_SIZE);
3295 skb_reset_transport_header(skb);
3296 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3297
3298 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3299
3300 skb_queue_tail(&conn->data_q, skb);
3301 queue_work(hdev->workqueue, &hdev->tx_work);
3302 }
3303
3304 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3305 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3306 {
3307 struct hci_iso_hdr *hdr;
3308 int len = skb->len;
3309
3310 skb_push(skb, HCI_ISO_HDR_SIZE);
3311 skb_reset_transport_header(skb);
3312 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3313 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3314 hdr->dlen = cpu_to_le16(len);
3315 }
3316
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3317 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3318 struct sk_buff *skb)
3319 {
3320 struct hci_dev *hdev = conn->hdev;
3321 struct sk_buff *list;
3322 __u16 flags;
3323
3324 skb->len = skb_headlen(skb);
3325 skb->data_len = 0;
3326
3327 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3328
3329 list = skb_shinfo(skb)->frag_list;
3330
3331 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3332 hci_add_iso_hdr(skb, conn->handle, flags);
3333
3334 if (!list) {
3335 /* Non fragmented */
3336 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3337
3338 skb_queue_tail(queue, skb);
3339 } else {
3340 /* Fragmented */
3341 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3342
3343 skb_shinfo(skb)->frag_list = NULL;
3344
3345 __skb_queue_tail(queue, skb);
3346
3347 do {
3348 skb = list; list = list->next;
3349
3350 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3351 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3352 0x00);
3353 hci_add_iso_hdr(skb, conn->handle, flags);
3354
3355 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3356
3357 __skb_queue_tail(queue, skb);
3358 } while (list);
3359 }
3360 }
3361
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3362 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3363 {
3364 struct hci_dev *hdev = conn->hdev;
3365
3366 BT_DBG("%s len %d", hdev->name, skb->len);
3367
3368 hci_queue_iso(conn, &conn->data_q, skb);
3369
3370 queue_work(hdev->workqueue, &hdev->tx_work);
3371 }
3372
3373 /* ---- HCI TX task (outgoing data) ---- */
3374
3375 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3376 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3377 {
3378 struct hci_dev *hdev;
3379 int cnt, q;
3380
3381 if (!conn) {
3382 *quote = 0;
3383 return;
3384 }
3385
3386 hdev = conn->hdev;
3387
3388 switch (conn->type) {
3389 case ACL_LINK:
3390 cnt = hdev->acl_cnt;
3391 break;
3392 case SCO_LINK:
3393 case ESCO_LINK:
3394 cnt = hdev->sco_cnt;
3395 break;
3396 case LE_LINK:
3397 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3398 break;
3399 case CIS_LINK:
3400 case BIS_LINK:
3401 case PA_LINK:
3402 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3403 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3404 break;
3405 default:
3406 cnt = 0;
3407 bt_dev_err(hdev, "unknown link type %d", conn->type);
3408 }
3409
3410 q = cnt / num;
3411 *quote = q ? q : 1;
3412 }
3413
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3414 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3415 int *quote)
3416 {
3417 struct hci_conn_hash *h = &hdev->conn_hash;
3418 struct hci_conn *conn = NULL, *c;
3419 unsigned int num = 0, min = ~0;
3420
3421 /* We don't have to lock device here. Connections are always
3422 * added and removed with TX task disabled. */
3423
3424 rcu_read_lock();
3425
3426 list_for_each_entry_rcu(c, &h->list, list) {
3427 if (c->type != type ||
3428 skb_queue_empty(&c->data_q))
3429 continue;
3430
3431 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3432 continue;
3433
3434 num++;
3435
3436 if (c->sent < min) {
3437 min = c->sent;
3438 conn = c;
3439 }
3440
3441 if (hci_conn_num(hdev, type) == num)
3442 break;
3443 }
3444
3445 rcu_read_unlock();
3446
3447 hci_quote_sent(conn, num, quote);
3448
3449 BT_DBG("conn %p quote %d", conn, *quote);
3450 return conn;
3451 }
3452
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3453 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3454 {
3455 struct hci_conn_hash *h = &hdev->conn_hash;
3456 struct hci_conn *c;
3457
3458 bt_dev_err(hdev, "link tx timeout");
3459
3460 hci_dev_lock(hdev);
3461
3462 /* Kill stalled connections */
3463 list_for_each_entry(c, &h->list, list) {
3464 if (c->type == type && c->sent) {
3465 bt_dev_err(hdev, "killing stalled connection %pMR",
3466 &c->dst);
3467 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3468 }
3469 }
3470
3471 hci_dev_unlock(hdev);
3472 }
3473
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3474 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3475 int *quote)
3476 {
3477 struct hci_conn_hash *h = &hdev->conn_hash;
3478 struct hci_chan *chan = NULL;
3479 unsigned int num = 0, min = ~0, cur_prio = 0;
3480 struct hci_conn *conn;
3481 int conn_num = 0;
3482
3483 BT_DBG("%s", hdev->name);
3484
3485 rcu_read_lock();
3486
3487 list_for_each_entry_rcu(conn, &h->list, list) {
3488 struct hci_chan *tmp;
3489
3490 if (conn->type != type)
3491 continue;
3492
3493 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3494 continue;
3495
3496 conn_num++;
3497
3498 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3499 struct sk_buff *skb;
3500
3501 if (skb_queue_empty(&tmp->data_q))
3502 continue;
3503
3504 skb = skb_peek(&tmp->data_q);
3505 if (skb->priority < cur_prio)
3506 continue;
3507
3508 if (skb->priority > cur_prio) {
3509 num = 0;
3510 min = ~0;
3511 cur_prio = skb->priority;
3512 }
3513
3514 num++;
3515
3516 if (conn->sent < min) {
3517 min = conn->sent;
3518 chan = tmp;
3519 }
3520 }
3521
3522 if (hci_conn_num(hdev, type) == conn_num)
3523 break;
3524 }
3525
3526 rcu_read_unlock();
3527
3528 if (!chan)
3529 return NULL;
3530
3531 hci_quote_sent(chan->conn, num, quote);
3532
3533 BT_DBG("chan %p quote %d", chan, *quote);
3534 return chan;
3535 }
3536
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3537 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3538 {
3539 struct hci_conn_hash *h = &hdev->conn_hash;
3540 struct hci_conn *conn;
3541 int num = 0;
3542
3543 BT_DBG("%s", hdev->name);
3544
3545 rcu_read_lock();
3546
3547 list_for_each_entry_rcu(conn, &h->list, list) {
3548 struct hci_chan *chan;
3549
3550 if (conn->type != type)
3551 continue;
3552
3553 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3554 continue;
3555
3556 num++;
3557
3558 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3559 struct sk_buff *skb;
3560
3561 if (chan->sent) {
3562 chan->sent = 0;
3563 continue;
3564 }
3565
3566 if (skb_queue_empty(&chan->data_q))
3567 continue;
3568
3569 skb = skb_peek(&chan->data_q);
3570 if (skb->priority >= HCI_PRIO_MAX - 1)
3571 continue;
3572
3573 skb->priority = HCI_PRIO_MAX - 1;
3574
3575 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3576 skb->priority);
3577 }
3578
3579 if (hci_conn_num(hdev, type) == num)
3580 break;
3581 }
3582
3583 rcu_read_unlock();
3584
3585 }
3586
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3587 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3588 {
3589 unsigned long last_tx;
3590
3591 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3592 return;
3593
3594 switch (type) {
3595 case LE_LINK:
3596 last_tx = hdev->le_last_tx;
3597 break;
3598 default:
3599 last_tx = hdev->acl_last_tx;
3600 break;
3601 }
3602
3603 /* tx timeout must be longer than maximum link supervision timeout
3604 * (40.9 seconds)
3605 */
3606 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3607 hci_link_tx_to(hdev, type);
3608 }
3609
3610 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev,__u8 type)3611 static void hci_sched_sco(struct hci_dev *hdev, __u8 type)
3612 {
3613 struct hci_conn *conn;
3614 struct sk_buff *skb;
3615 int quote, *cnt;
3616 unsigned int pkts = hdev->sco_pkts;
3617
3618 bt_dev_dbg(hdev, "type %u", type);
3619
3620 if (!hci_conn_num(hdev, type) || !pkts)
3621 return;
3622
3623 /* Use sco_pkts if flow control has not been enabled which will limit
3624 * the amount of buffer sent in a row.
3625 */
3626 if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
3627 cnt = &pkts;
3628 else
3629 cnt = &hdev->sco_cnt;
3630
3631 while (*cnt && (conn = hci_low_sent(hdev, type, "e))) {
3632 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3633 BT_DBG("skb %p len %d", skb, skb->len);
3634 hci_send_conn_frame(hdev, conn, skb);
3635
3636 conn->sent++;
3637 if (conn->sent == ~0)
3638 conn->sent = 0;
3639 (*cnt)--;
3640 }
3641 }
3642
3643 /* Rescheduled if all packets were sent and flow control is not enabled
3644 * as there could be more packets queued that could not be sent and
3645 * since no HCI_EV_NUM_COMP_PKTS event will be generated the reschedule
3646 * needs to be forced.
3647 */
3648 if (!pkts && !hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
3649 queue_work(hdev->workqueue, &hdev->tx_work);
3650 }
3651
hci_sched_acl_pkt(struct hci_dev * hdev)3652 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3653 {
3654 unsigned int cnt = hdev->acl_cnt;
3655 struct hci_chan *chan;
3656 struct sk_buff *skb;
3657 int quote;
3658
3659 __check_timeout(hdev, cnt, ACL_LINK);
3660
3661 while (hdev->acl_cnt &&
3662 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3663 u32 priority = (skb_peek(&chan->data_q))->priority;
3664 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3665 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3666 skb->len, skb->priority);
3667
3668 /* Stop if priority has changed */
3669 if (skb->priority < priority)
3670 break;
3671
3672 skb = skb_dequeue(&chan->data_q);
3673
3674 hci_conn_enter_active_mode(chan->conn,
3675 bt_cb(skb)->force_active);
3676
3677 hci_send_conn_frame(hdev, chan->conn, skb);
3678 hdev->acl_last_tx = jiffies;
3679
3680 hdev->acl_cnt--;
3681 chan->sent++;
3682 chan->conn->sent++;
3683
3684 /* Send pending SCO packets right away */
3685 hci_sched_sco(hdev, SCO_LINK);
3686 hci_sched_sco(hdev, ESCO_LINK);
3687 }
3688 }
3689
3690 if (cnt != hdev->acl_cnt)
3691 hci_prio_recalculate(hdev, ACL_LINK);
3692 }
3693
hci_sched_acl(struct hci_dev * hdev)3694 static void hci_sched_acl(struct hci_dev *hdev)
3695 {
3696 BT_DBG("%s", hdev->name);
3697
3698 /* No ACL link over BR/EDR controller */
3699 if (!hci_conn_num(hdev, ACL_LINK))
3700 return;
3701
3702 hci_sched_acl_pkt(hdev);
3703 }
3704
hci_sched_le(struct hci_dev * hdev)3705 static void hci_sched_le(struct hci_dev *hdev)
3706 {
3707 struct hci_chan *chan;
3708 struct sk_buff *skb;
3709 int quote, *cnt, tmp;
3710
3711 BT_DBG("%s", hdev->name);
3712
3713 if (!hci_conn_num(hdev, LE_LINK))
3714 return;
3715
3716 cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3717
3718 __check_timeout(hdev, *cnt, LE_LINK);
3719
3720 tmp = *cnt;
3721 while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3722 u32 priority = (skb_peek(&chan->data_q))->priority;
3723 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3724 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3725 skb->len, skb->priority);
3726
3727 /* Stop if priority has changed */
3728 if (skb->priority < priority)
3729 break;
3730
3731 skb = skb_dequeue(&chan->data_q);
3732
3733 hci_send_conn_frame(hdev, chan->conn, skb);
3734 hdev->le_last_tx = jiffies;
3735
3736 (*cnt)--;
3737 chan->sent++;
3738 chan->conn->sent++;
3739
3740 /* Send pending SCO packets right away */
3741 hci_sched_sco(hdev, SCO_LINK);
3742 hci_sched_sco(hdev, ESCO_LINK);
3743 }
3744 }
3745
3746 if (*cnt != tmp)
3747 hci_prio_recalculate(hdev, LE_LINK);
3748 }
3749
3750 /* Schedule iso */
hci_sched_iso(struct hci_dev * hdev,__u8 type)3751 static void hci_sched_iso(struct hci_dev *hdev, __u8 type)
3752 {
3753 struct hci_conn *conn;
3754 struct sk_buff *skb;
3755 int quote, *cnt;
3756
3757 BT_DBG("%s", hdev->name);
3758
3759 if (!hci_conn_num(hdev, type))
3760 return;
3761
3762 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3763 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3764 while (*cnt && (conn = hci_low_sent(hdev, type, "e))) {
3765 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3766 BT_DBG("skb %p len %d", skb, skb->len);
3767 hci_send_conn_frame(hdev, conn, skb);
3768
3769 conn->sent++;
3770 if (conn->sent == ~0)
3771 conn->sent = 0;
3772 (*cnt)--;
3773 }
3774 }
3775 }
3776
hci_tx_work(struct work_struct * work)3777 static void hci_tx_work(struct work_struct *work)
3778 {
3779 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3780 struct sk_buff *skb;
3781
3782 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3783 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3784
3785 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3786 /* Schedule queues and send stuff to HCI driver */
3787 hci_sched_sco(hdev, SCO_LINK);
3788 hci_sched_sco(hdev, ESCO_LINK);
3789 hci_sched_iso(hdev, CIS_LINK);
3790 hci_sched_iso(hdev, BIS_LINK);
3791 hci_sched_iso(hdev, PA_LINK);
3792 hci_sched_acl(hdev);
3793 hci_sched_le(hdev);
3794 }
3795
3796 /* Send next queued raw (unknown type) packet */
3797 while ((skb = skb_dequeue(&hdev->raw_q)))
3798 hci_send_frame(hdev, skb);
3799 }
3800
3801 /* ----- HCI RX task (incoming data processing) ----- */
3802
3803 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3804 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3805 {
3806 struct hci_acl_hdr *hdr;
3807 struct hci_conn *conn;
3808 __u16 handle, flags;
3809
3810 hdr = skb_pull_data(skb, sizeof(*hdr));
3811 if (!hdr) {
3812 bt_dev_err(hdev, "ACL packet too small");
3813 goto drop;
3814 }
3815
3816 handle = __le16_to_cpu(hdr->handle);
3817 flags = hci_flags(handle);
3818 handle = hci_handle(handle);
3819
3820 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3821 handle, flags);
3822
3823 hdev->stat.acl_rx++;
3824
3825 hci_dev_lock(hdev);
3826 conn = hci_conn_hash_lookup_handle(hdev, handle);
3827 hci_dev_unlock(hdev);
3828
3829 if (conn) {
3830 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3831
3832 /* Send to upper protocol */
3833 l2cap_recv_acldata(conn, skb, flags);
3834 return;
3835 } else {
3836 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3837 handle);
3838 }
3839
3840 drop:
3841 kfree_skb(skb);
3842 }
3843
3844 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3845 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3846 {
3847 struct hci_sco_hdr *hdr;
3848 struct hci_conn *conn;
3849 __u16 handle, flags;
3850
3851 hdr = skb_pull_data(skb, sizeof(*hdr));
3852 if (!hdr) {
3853 bt_dev_err(hdev, "SCO packet too small");
3854 goto drop;
3855 }
3856
3857 handle = __le16_to_cpu(hdr->handle);
3858 flags = hci_flags(handle);
3859 handle = hci_handle(handle);
3860
3861 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3862 handle, flags);
3863
3864 hdev->stat.sco_rx++;
3865
3866 hci_dev_lock(hdev);
3867 conn = hci_conn_hash_lookup_handle(hdev, handle);
3868 hci_dev_unlock(hdev);
3869
3870 if (conn) {
3871 /* Send to upper protocol */
3872 hci_skb_pkt_status(skb) = flags & 0x03;
3873 sco_recv_scodata(conn, skb);
3874 return;
3875 } else {
3876 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3877 handle);
3878 }
3879
3880 drop:
3881 kfree_skb(skb);
3882 }
3883
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3884 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3885 {
3886 struct hci_iso_hdr *hdr;
3887 struct hci_conn *conn;
3888 __u16 handle, flags;
3889
3890 hdr = skb_pull_data(skb, sizeof(*hdr));
3891 if (!hdr) {
3892 bt_dev_err(hdev, "ISO packet too small");
3893 goto drop;
3894 }
3895
3896 handle = __le16_to_cpu(hdr->handle);
3897 flags = hci_flags(handle);
3898 handle = hci_handle(handle);
3899
3900 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3901 handle, flags);
3902
3903 hci_dev_lock(hdev);
3904 conn = hci_conn_hash_lookup_handle(hdev, handle);
3905 hci_dev_unlock(hdev);
3906
3907 if (!conn) {
3908 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3909 handle);
3910 goto drop;
3911 }
3912
3913 /* Send to upper protocol */
3914 iso_recv(conn, skb, flags);
3915 return;
3916
3917 drop:
3918 kfree_skb(skb);
3919 }
3920
hci_req_is_complete(struct hci_dev * hdev)3921 static bool hci_req_is_complete(struct hci_dev *hdev)
3922 {
3923 struct sk_buff *skb;
3924
3925 skb = skb_peek(&hdev->cmd_q);
3926 if (!skb)
3927 return true;
3928
3929 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3930 }
3931
hci_resend_last(struct hci_dev * hdev)3932 static void hci_resend_last(struct hci_dev *hdev)
3933 {
3934 struct hci_command_hdr *sent;
3935 struct sk_buff *skb;
3936 u16 opcode;
3937
3938 if (!hdev->sent_cmd)
3939 return;
3940
3941 sent = (void *) hdev->sent_cmd->data;
3942 opcode = __le16_to_cpu(sent->opcode);
3943 if (opcode == HCI_OP_RESET)
3944 return;
3945
3946 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3947 if (!skb)
3948 return;
3949
3950 skb_queue_head(&hdev->cmd_q, skb);
3951 queue_work(hdev->workqueue, &hdev->cmd_work);
3952 }
3953
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3954 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3955 hci_req_complete_t *req_complete,
3956 hci_req_complete_skb_t *req_complete_skb)
3957 {
3958 struct sk_buff *skb;
3959 unsigned long flags;
3960
3961 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3962
3963 /* If the completed command doesn't match the last one that was
3964 * sent we need to do special handling of it.
3965 */
3966 if (!hci_sent_cmd_data(hdev, opcode)) {
3967 /* Some CSR based controllers generate a spontaneous
3968 * reset complete event during init and any pending
3969 * command will never be completed. In such a case we
3970 * need to resend whatever was the last sent
3971 * command.
3972 */
3973 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3974 hci_resend_last(hdev);
3975
3976 return;
3977 }
3978
3979 /* If we reach this point this event matches the last command sent */
3980 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3981
3982 /* If the command succeeded and there's still more commands in
3983 * this request the request is not yet complete.
3984 */
3985 if (!status && !hci_req_is_complete(hdev))
3986 return;
3987
3988 skb = hdev->req_skb;
3989
3990 /* If this was the last command in a request the complete
3991 * callback would be found in hdev->req_skb instead of the
3992 * command queue (hdev->cmd_q).
3993 */
3994 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3995 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3996 return;
3997 }
3998
3999 if (skb && bt_cb(skb)->hci.req_complete) {
4000 *req_complete = bt_cb(skb)->hci.req_complete;
4001 return;
4002 }
4003
4004 /* Remove all pending commands belonging to this request */
4005 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4006 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4007 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4008 __skb_queue_head(&hdev->cmd_q, skb);
4009 break;
4010 }
4011
4012 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4013 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4014 else
4015 *req_complete = bt_cb(skb)->hci.req_complete;
4016 dev_kfree_skb_irq(skb);
4017 }
4018 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4019 }
4020
hci_rx_work(struct work_struct * work)4021 static void hci_rx_work(struct work_struct *work)
4022 {
4023 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4024 struct sk_buff *skb;
4025
4026 BT_DBG("%s", hdev->name);
4027
4028 /* The kcov_remote functions used for collecting packet parsing
4029 * coverage information from this background thread and associate
4030 * the coverage with the syscall's thread which originally injected
4031 * the packet. This helps fuzzing the kernel.
4032 */
4033 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4034 kcov_remote_start_common(skb_get_kcov_handle(skb));
4035
4036 /* Send copy to monitor */
4037 hci_send_to_monitor(hdev, skb);
4038
4039 if (atomic_read(&hdev->promisc)) {
4040 /* Send copy to the sockets */
4041 hci_send_to_sock(hdev, skb);
4042 }
4043
4044 /* If the device has been opened in HCI_USER_CHANNEL,
4045 * the userspace has exclusive access to device.
4046 * When device is HCI_INIT, we still need to process
4047 * the data packets to the driver in order
4048 * to complete its setup().
4049 */
4050 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4051 !test_bit(HCI_INIT, &hdev->flags)) {
4052 kfree_skb(skb);
4053 continue;
4054 }
4055
4056 if (test_bit(HCI_INIT, &hdev->flags)) {
4057 /* Don't process data packets in this states. */
4058 switch (hci_skb_pkt_type(skb)) {
4059 case HCI_ACLDATA_PKT:
4060 case HCI_SCODATA_PKT:
4061 case HCI_ISODATA_PKT:
4062 kfree_skb(skb);
4063 continue;
4064 }
4065 }
4066
4067 /* Process frame */
4068 switch (hci_skb_pkt_type(skb)) {
4069 case HCI_EVENT_PKT:
4070 BT_DBG("%s Event packet", hdev->name);
4071 hci_event_packet(hdev, skb);
4072 break;
4073
4074 case HCI_ACLDATA_PKT:
4075 BT_DBG("%s ACL data packet", hdev->name);
4076 hci_acldata_packet(hdev, skb);
4077 break;
4078
4079 case HCI_SCODATA_PKT:
4080 BT_DBG("%s SCO data packet", hdev->name);
4081 hci_scodata_packet(hdev, skb);
4082 break;
4083
4084 case HCI_ISODATA_PKT:
4085 BT_DBG("%s ISO data packet", hdev->name);
4086 hci_isodata_packet(hdev, skb);
4087 break;
4088
4089 default:
4090 kfree_skb(skb);
4091 break;
4092 }
4093 }
4094 }
4095
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4096 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4097 {
4098 int err;
4099
4100 bt_dev_dbg(hdev, "skb %p", skb);
4101
4102 kfree_skb(hdev->sent_cmd);
4103
4104 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4105 if (!hdev->sent_cmd) {
4106 skb_queue_head(&hdev->cmd_q, skb);
4107 queue_work(hdev->workqueue, &hdev->cmd_work);
4108 return;
4109 }
4110
4111 if (hci_skb_opcode(skb) != HCI_OP_NOP) {
4112 err = hci_send_frame(hdev, skb);
4113 if (err < 0) {
4114 hci_cmd_sync_cancel_sync(hdev, -err);
4115 return;
4116 }
4117 atomic_dec(&hdev->cmd_cnt);
4118 }
4119
4120 if (hdev->req_status == HCI_REQ_PEND &&
4121 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4122 kfree_skb(hdev->req_skb);
4123 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4124 }
4125 }
4126
hci_cmd_work(struct work_struct * work)4127 static void hci_cmd_work(struct work_struct *work)
4128 {
4129 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4130 struct sk_buff *skb;
4131
4132 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4133 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4134
4135 /* Send queued commands */
4136 if (atomic_read(&hdev->cmd_cnt)) {
4137 skb = skb_dequeue(&hdev->cmd_q);
4138 if (!skb)
4139 return;
4140
4141 hci_send_cmd_sync(hdev, skb);
4142
4143 rcu_read_lock();
4144 if (test_bit(HCI_RESET, &hdev->flags) ||
4145 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4146 cancel_delayed_work(&hdev->cmd_timer);
4147 else
4148 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4149 HCI_CMD_TIMEOUT);
4150 rcu_read_unlock();
4151 }
4152 }
4153