1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <linux/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48 #include "hci_codec.h"
49
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
53
54 /* HCI device list */
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
57
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
61
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
64
65 /* Get HCI device by index.
66 * Device is held on return. */
__hci_dev_get(int index,int * srcu_index)67 static struct hci_dev *__hci_dev_get(int index, int *srcu_index)
68 {
69 struct hci_dev *hdev = NULL, *d;
70
71 BT_DBG("%d", index);
72
73 if (index < 0)
74 return NULL;
75
76 read_lock(&hci_dev_list_lock);
77 list_for_each_entry(d, &hci_dev_list, list) {
78 if (d->id == index) {
79 hdev = hci_dev_hold(d);
80 if (srcu_index)
81 *srcu_index = srcu_read_lock(&d->srcu);
82 break;
83 }
84 }
85 read_unlock(&hci_dev_list_lock);
86 return hdev;
87 }
88
hci_dev_get(int index)89 struct hci_dev *hci_dev_get(int index)
90 {
91 return __hci_dev_get(index, NULL);
92 }
93
hci_dev_get_srcu(int index,int * srcu_index)94 static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index)
95 {
96 return __hci_dev_get(index, srcu_index);
97 }
98
hci_dev_put_srcu(struct hci_dev * hdev,int srcu_index)99 static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index)
100 {
101 srcu_read_unlock(&hdev->srcu, srcu_index);
102 hci_dev_put(hdev);
103 }
104
105 /* ---- Inquiry support ---- */
106
hci_discovery_active(struct hci_dev * hdev)107 bool hci_discovery_active(struct hci_dev *hdev)
108 {
109 struct discovery_state *discov = &hdev->discovery;
110
111 switch (discov->state) {
112 case DISCOVERY_FINDING:
113 case DISCOVERY_RESOLVING:
114 return true;
115
116 default:
117 return false;
118 }
119 }
120
hci_discovery_set_state(struct hci_dev * hdev,int state)121 void hci_discovery_set_state(struct hci_dev *hdev, int state)
122 {
123 int old_state = hdev->discovery.state;
124
125 if (old_state == state)
126 return;
127
128 hdev->discovery.state = state;
129
130 switch (state) {
131 case DISCOVERY_STOPPED:
132 hci_update_passive_scan(hdev);
133
134 if (old_state != DISCOVERY_STARTING)
135 mgmt_discovering(hdev, 0);
136 break;
137 case DISCOVERY_STARTING:
138 break;
139 case DISCOVERY_FINDING:
140 mgmt_discovering(hdev, 1);
141 break;
142 case DISCOVERY_RESOLVING:
143 break;
144 case DISCOVERY_STOPPING:
145 break;
146 }
147
148 bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
149 }
150
hci_inquiry_cache_flush(struct hci_dev * hdev)151 void hci_inquiry_cache_flush(struct hci_dev *hdev)
152 {
153 struct discovery_state *cache = &hdev->discovery;
154 struct inquiry_entry *p, *n;
155
156 list_for_each_entry_safe(p, n, &cache->all, all) {
157 list_del(&p->all);
158 kfree(p);
159 }
160
161 INIT_LIST_HEAD(&cache->unknown);
162 INIT_LIST_HEAD(&cache->resolve);
163 }
164
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)165 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
166 bdaddr_t *bdaddr)
167 {
168 struct discovery_state *cache = &hdev->discovery;
169 struct inquiry_entry *e;
170
171 BT_DBG("cache %p, %pMR", cache, bdaddr);
172
173 list_for_each_entry(e, &cache->all, all) {
174 if (!bacmp(&e->data.bdaddr, bdaddr))
175 return e;
176 }
177
178 return NULL;
179 }
180
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)181 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
182 bdaddr_t *bdaddr)
183 {
184 struct discovery_state *cache = &hdev->discovery;
185 struct inquiry_entry *e;
186
187 BT_DBG("cache %p, %pMR", cache, bdaddr);
188
189 list_for_each_entry(e, &cache->unknown, list) {
190 if (!bacmp(&e->data.bdaddr, bdaddr))
191 return e;
192 }
193
194 return NULL;
195 }
196
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)197 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
198 bdaddr_t *bdaddr,
199 int state)
200 {
201 struct discovery_state *cache = &hdev->discovery;
202 struct inquiry_entry *e;
203
204 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
205
206 list_for_each_entry(e, &cache->resolve, list) {
207 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
208 return e;
209 if (!bacmp(&e->data.bdaddr, bdaddr))
210 return e;
211 }
212
213 return NULL;
214 }
215
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)216 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
217 struct inquiry_entry *ie)
218 {
219 struct discovery_state *cache = &hdev->discovery;
220 struct list_head *pos = &cache->resolve;
221 struct inquiry_entry *p;
222
223 list_del(&ie->list);
224
225 list_for_each_entry(p, &cache->resolve, list) {
226 if (p->name_state != NAME_PENDING &&
227 abs(p->data.rssi) >= abs(ie->data.rssi))
228 break;
229 pos = &p->list;
230 }
231
232 list_add(&ie->list, pos);
233 }
234
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)235 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
236 bool name_known)
237 {
238 struct discovery_state *cache = &hdev->discovery;
239 struct inquiry_entry *ie;
240 u32 flags = 0;
241
242 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
243
244 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
245
246 if (!data->ssp_mode)
247 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
248
249 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
250 if (ie) {
251 if (!ie->data.ssp_mode)
252 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
253
254 if (ie->name_state == NAME_NEEDED &&
255 data->rssi != ie->data.rssi) {
256 ie->data.rssi = data->rssi;
257 hci_inquiry_cache_update_resolve(hdev, ie);
258 }
259
260 goto update;
261 }
262
263 /* Entry not in the cache. Add new one. */
264 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
265 if (!ie) {
266 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
267 goto done;
268 }
269
270 list_add(&ie->all, &cache->all);
271
272 if (name_known) {
273 ie->name_state = NAME_KNOWN;
274 } else {
275 ie->name_state = NAME_NOT_KNOWN;
276 list_add(&ie->list, &cache->unknown);
277 }
278
279 update:
280 if (name_known && ie->name_state != NAME_KNOWN &&
281 ie->name_state != NAME_PENDING) {
282 ie->name_state = NAME_KNOWN;
283 list_del(&ie->list);
284 }
285
286 memcpy(&ie->data, data, sizeof(*data));
287 ie->timestamp = jiffies;
288 cache->timestamp = jiffies;
289
290 if (ie->name_state == NAME_NOT_KNOWN)
291 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
292
293 done:
294 return flags;
295 }
296
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)297 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
298 {
299 struct discovery_state *cache = &hdev->discovery;
300 struct inquiry_info *info = (struct inquiry_info *) buf;
301 struct inquiry_entry *e;
302 int copied = 0;
303
304 list_for_each_entry(e, &cache->all, all) {
305 struct inquiry_data *data = &e->data;
306
307 if (copied >= num)
308 break;
309
310 bacpy(&info->bdaddr, &data->bdaddr);
311 info->pscan_rep_mode = data->pscan_rep_mode;
312 info->pscan_period_mode = data->pscan_period_mode;
313 info->pscan_mode = data->pscan_mode;
314 memcpy(info->dev_class, data->dev_class, 3);
315 info->clock_offset = data->clock_offset;
316
317 info++;
318 copied++;
319 }
320
321 BT_DBG("cache %p, copied %d", cache, copied);
322 return copied;
323 }
324
hci_inquiry(void __user * arg)325 int hci_inquiry(void __user *arg)
326 {
327 __u8 __user *ptr = arg;
328 struct hci_inquiry_req ir;
329 struct hci_dev *hdev;
330 int err = 0, do_inquiry = 0, max_rsp;
331 __u8 *buf;
332
333 if (copy_from_user(&ir, ptr, sizeof(ir)))
334 return -EFAULT;
335
336 hdev = hci_dev_get(ir.dev_id);
337 if (!hdev)
338 return -ENODEV;
339
340 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
341 err = -EBUSY;
342 goto done;
343 }
344
345 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
346 err = -EOPNOTSUPP;
347 goto done;
348 }
349
350 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
351 err = -EOPNOTSUPP;
352 goto done;
353 }
354
355 /* Restrict maximum inquiry length to 60 seconds */
356 if (ir.length > 60) {
357 err = -EINVAL;
358 goto done;
359 }
360
361 hci_dev_lock(hdev);
362 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
363 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
364 hci_inquiry_cache_flush(hdev);
365 do_inquiry = 1;
366 }
367 hci_dev_unlock(hdev);
368
369 if (do_inquiry) {
370 hci_req_sync_lock(hdev);
371 err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
372 hci_req_sync_unlock(hdev);
373
374 if (err < 0)
375 goto done;
376
377 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
378 * cleared). If it is interrupted by a signal, return -EINTR.
379 */
380 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
381 TASK_INTERRUPTIBLE)) {
382 err = -EINTR;
383 goto done;
384 }
385 }
386
387 /* for unlimited number of responses we will use buffer with
388 * 255 entries
389 */
390 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
391
392 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
393 * copy it to the user space.
394 */
395 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
396 if (!buf) {
397 err = -ENOMEM;
398 goto done;
399 }
400
401 hci_dev_lock(hdev);
402 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
403 hci_dev_unlock(hdev);
404
405 BT_DBG("num_rsp %d", ir.num_rsp);
406
407 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
408 ptr += sizeof(ir);
409 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
410 ir.num_rsp))
411 err = -EFAULT;
412 } else
413 err = -EFAULT;
414
415 kfree(buf);
416
417 done:
418 hci_dev_put(hdev);
419 return err;
420 }
421
hci_dev_do_open(struct hci_dev * hdev)422 static int hci_dev_do_open(struct hci_dev *hdev)
423 {
424 int ret = 0;
425
426 BT_DBG("%s %p", hdev->name, hdev);
427
428 hci_req_sync_lock(hdev);
429
430 ret = hci_dev_open_sync(hdev);
431
432 hci_req_sync_unlock(hdev);
433 return ret;
434 }
435
436 /* ---- HCI ioctl helpers ---- */
437
hci_dev_open(__u16 dev)438 int hci_dev_open(__u16 dev)
439 {
440 struct hci_dev *hdev;
441 int err;
442
443 hdev = hci_dev_get(dev);
444 if (!hdev)
445 return -ENODEV;
446
447 /* Devices that are marked as unconfigured can only be powered
448 * up as user channel. Trying to bring them up as normal devices
449 * will result into a failure. Only user channel operation is
450 * possible.
451 *
452 * When this function is called for a user channel, the flag
453 * HCI_USER_CHANNEL will be set first before attempting to
454 * open the device.
455 */
456 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
457 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
458 err = -EOPNOTSUPP;
459 goto done;
460 }
461
462 /* We need to ensure that no other power on/off work is pending
463 * before proceeding to call hci_dev_do_open. This is
464 * particularly important if the setup procedure has not yet
465 * completed.
466 */
467 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
468 cancel_delayed_work(&hdev->power_off);
469
470 /* After this call it is guaranteed that the setup procedure
471 * has finished. This means that error conditions like RFKILL
472 * or no valid public or static random address apply.
473 */
474 flush_workqueue(hdev->req_workqueue);
475
476 /* For controllers not using the management interface and that
477 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
478 * so that pairing works for them. Once the management interface
479 * is in use this bit will be cleared again and userspace has
480 * to explicitly enable it.
481 */
482 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
483 !hci_dev_test_flag(hdev, HCI_MGMT))
484 hci_dev_set_flag(hdev, HCI_BONDABLE);
485
486 err = hci_dev_do_open(hdev);
487
488 done:
489 hci_dev_put(hdev);
490 return err;
491 }
492
hci_dev_do_close(struct hci_dev * hdev)493 int hci_dev_do_close(struct hci_dev *hdev)
494 {
495 int err;
496
497 BT_DBG("%s %p", hdev->name, hdev);
498
499 hci_req_sync_lock(hdev);
500
501 err = hci_dev_close_sync(hdev);
502
503 hci_req_sync_unlock(hdev);
504
505 return err;
506 }
507
hci_dev_close(__u16 dev)508 int hci_dev_close(__u16 dev)
509 {
510 struct hci_dev *hdev;
511 int err;
512
513 hdev = hci_dev_get(dev);
514 if (!hdev)
515 return -ENODEV;
516
517 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
518 err = -EBUSY;
519 goto done;
520 }
521
522 cancel_work_sync(&hdev->power_on);
523 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
524 cancel_delayed_work(&hdev->power_off);
525
526 err = hci_dev_do_close(hdev);
527
528 done:
529 hci_dev_put(hdev);
530 return err;
531 }
532
hci_dev_do_reset(struct hci_dev * hdev)533 static int hci_dev_do_reset(struct hci_dev *hdev)
534 {
535 int ret;
536
537 BT_DBG("%s %p", hdev->name, hdev);
538
539 hci_req_sync_lock(hdev);
540
541 /* Drop queues */
542 skb_queue_purge(&hdev->rx_q);
543 skb_queue_purge(&hdev->cmd_q);
544
545 /* Cancel these to avoid queueing non-chained pending work */
546 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
547 /* Wait for
548 *
549 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
550 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
551 *
552 * inside RCU section to see the flag or complete scheduling.
553 */
554 synchronize_rcu();
555 /* Explicitly cancel works in case scheduled after setting the flag. */
556 cancel_delayed_work(&hdev->cmd_timer);
557 cancel_delayed_work(&hdev->ncmd_timer);
558
559 /* Avoid potential lockdep warnings from the *_flush() calls by
560 * ensuring the workqueue is empty up front.
561 */
562 drain_workqueue(hdev->workqueue);
563
564 hci_dev_lock(hdev);
565 hci_inquiry_cache_flush(hdev);
566 hci_conn_hash_flush(hdev);
567 hci_dev_unlock(hdev);
568
569 if (hdev->flush)
570 hdev->flush(hdev);
571
572 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
573
574 atomic_set(&hdev->cmd_cnt, 1);
575 hdev->acl_cnt = 0;
576 hdev->sco_cnt = 0;
577 hdev->le_cnt = 0;
578 hdev->iso_cnt = 0;
579
580 ret = hci_reset_sync(hdev);
581
582 hci_req_sync_unlock(hdev);
583 return ret;
584 }
585
hci_dev_reset(__u16 dev)586 int hci_dev_reset(__u16 dev)
587 {
588 struct hci_dev *hdev;
589 int err, srcu_index;
590
591 hdev = hci_dev_get_srcu(dev, &srcu_index);
592 if (!hdev)
593 return -ENODEV;
594
595 if (!test_bit(HCI_UP, &hdev->flags)) {
596 err = -ENETDOWN;
597 goto done;
598 }
599
600 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
601 err = -EBUSY;
602 goto done;
603 }
604
605 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
606 err = -EOPNOTSUPP;
607 goto done;
608 }
609
610 err = hci_dev_do_reset(hdev);
611
612 done:
613 hci_dev_put_srcu(hdev, srcu_index);
614 return err;
615 }
616
hci_dev_reset_stat(__u16 dev)617 int hci_dev_reset_stat(__u16 dev)
618 {
619 struct hci_dev *hdev;
620 int ret = 0;
621
622 hdev = hci_dev_get(dev);
623 if (!hdev)
624 return -ENODEV;
625
626 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
627 ret = -EBUSY;
628 goto done;
629 }
630
631 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
632 ret = -EOPNOTSUPP;
633 goto done;
634 }
635
636 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
637
638 done:
639 hci_dev_put(hdev);
640 return ret;
641 }
642
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)643 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
644 {
645 bool conn_changed, discov_changed;
646
647 BT_DBG("%s scan 0x%02x", hdev->name, scan);
648
649 if ((scan & SCAN_PAGE))
650 conn_changed = !hci_dev_test_and_set_flag(hdev,
651 HCI_CONNECTABLE);
652 else
653 conn_changed = hci_dev_test_and_clear_flag(hdev,
654 HCI_CONNECTABLE);
655
656 if ((scan & SCAN_INQUIRY)) {
657 discov_changed = !hci_dev_test_and_set_flag(hdev,
658 HCI_DISCOVERABLE);
659 } else {
660 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
661 discov_changed = hci_dev_test_and_clear_flag(hdev,
662 HCI_DISCOVERABLE);
663 }
664
665 if (!hci_dev_test_flag(hdev, HCI_MGMT))
666 return;
667
668 if (conn_changed || discov_changed) {
669 /* In case this was disabled through mgmt */
670 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
671
672 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
673 hci_update_adv_data(hdev, hdev->cur_adv_instance);
674
675 mgmt_new_settings(hdev);
676 }
677 }
678
hci_dev_cmd(unsigned int cmd,void __user * arg)679 int hci_dev_cmd(unsigned int cmd, void __user *arg)
680 {
681 struct hci_dev *hdev;
682 struct hci_dev_req dr;
683 __le16 policy;
684 int err = 0;
685
686 if (copy_from_user(&dr, arg, sizeof(dr)))
687 return -EFAULT;
688
689 hdev = hci_dev_get(dr.dev_id);
690 if (!hdev)
691 return -ENODEV;
692
693 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
694 err = -EBUSY;
695 goto done;
696 }
697
698 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
699 err = -EOPNOTSUPP;
700 goto done;
701 }
702
703 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
704 err = -EOPNOTSUPP;
705 goto done;
706 }
707
708 switch (cmd) {
709 case HCISETAUTH:
710 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
711 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
712 break;
713
714 case HCISETENCRYPT:
715 if (!lmp_encrypt_capable(hdev)) {
716 err = -EOPNOTSUPP;
717 break;
718 }
719
720 if (!test_bit(HCI_AUTH, &hdev->flags)) {
721 /* Auth must be enabled first */
722 err = hci_cmd_sync_status(hdev,
723 HCI_OP_WRITE_AUTH_ENABLE,
724 1, &dr.dev_opt,
725 HCI_CMD_TIMEOUT);
726 if (err)
727 break;
728 }
729
730 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
731 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
732 break;
733
734 case HCISETSCAN:
735 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
736 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
737
738 /* Ensure that the connectable and discoverable states
739 * get correctly modified as this was a non-mgmt change.
740 */
741 if (!err)
742 hci_update_passive_scan_state(hdev, dr.dev_opt);
743 break;
744
745 case HCISETLINKPOL:
746 policy = cpu_to_le16(dr.dev_opt);
747
748 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
749 2, &policy, HCI_CMD_TIMEOUT);
750 break;
751
752 case HCISETLINKMODE:
753 hdev->link_mode = ((__u16) dr.dev_opt) &
754 (HCI_LM_MASTER | HCI_LM_ACCEPT);
755 break;
756
757 case HCISETPTYPE:
758 if (hdev->pkt_type == (__u16) dr.dev_opt)
759 break;
760
761 hdev->pkt_type = (__u16) dr.dev_opt;
762 mgmt_phy_configuration_changed(hdev, NULL);
763 break;
764
765 case HCISETACLMTU:
766 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
767 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
768 break;
769
770 case HCISETSCOMTU:
771 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
772 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
773 break;
774
775 default:
776 err = -EINVAL;
777 break;
778 }
779
780 done:
781 hci_dev_put(hdev);
782 return err;
783 }
784
hci_get_dev_list(void __user * arg)785 int hci_get_dev_list(void __user *arg)
786 {
787 struct hci_dev *hdev;
788 struct hci_dev_list_req *dl;
789 struct hci_dev_req *dr;
790 int n = 0, err;
791 __u16 dev_num;
792
793 if (get_user(dev_num, (__u16 __user *) arg))
794 return -EFAULT;
795
796 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
797 return -EINVAL;
798
799 dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
800 if (!dl)
801 return -ENOMEM;
802
803 dl->dev_num = dev_num;
804 dr = dl->dev_req;
805
806 read_lock(&hci_dev_list_lock);
807 list_for_each_entry(hdev, &hci_dev_list, list) {
808 unsigned long flags = hdev->flags;
809
810 /* When the auto-off is configured it means the transport
811 * is running, but in that case still indicate that the
812 * device is actually down.
813 */
814 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
815 flags &= ~BIT(HCI_UP);
816
817 dr[n].dev_id = hdev->id;
818 dr[n].dev_opt = flags;
819
820 if (++n >= dev_num)
821 break;
822 }
823 read_unlock(&hci_dev_list_lock);
824
825 dl->dev_num = n;
826 err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
827 kfree(dl);
828
829 return err ? -EFAULT : 0;
830 }
831
hci_get_dev_info(void __user * arg)832 int hci_get_dev_info(void __user *arg)
833 {
834 struct hci_dev *hdev;
835 struct hci_dev_info di;
836 unsigned long flags;
837 int err = 0;
838
839 if (copy_from_user(&di, arg, sizeof(di)))
840 return -EFAULT;
841
842 hdev = hci_dev_get(di.dev_id);
843 if (!hdev)
844 return -ENODEV;
845
846 /* When the auto-off is configured it means the transport
847 * is running, but in that case still indicate that the
848 * device is actually down.
849 */
850 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
851 flags = hdev->flags & ~BIT(HCI_UP);
852 else
853 flags = hdev->flags;
854
855 strscpy(di.name, hdev->name, sizeof(di.name));
856 di.bdaddr = hdev->bdaddr;
857 di.type = (hdev->bus & 0x0f);
858 di.flags = flags;
859 di.pkt_type = hdev->pkt_type;
860 if (lmp_bredr_capable(hdev)) {
861 di.acl_mtu = hdev->acl_mtu;
862 di.acl_pkts = hdev->acl_pkts;
863 di.sco_mtu = hdev->sco_mtu;
864 di.sco_pkts = hdev->sco_pkts;
865 } else {
866 di.acl_mtu = hdev->le_mtu;
867 di.acl_pkts = hdev->le_pkts;
868 di.sco_mtu = 0;
869 di.sco_pkts = 0;
870 }
871 di.link_policy = hdev->link_policy;
872 di.link_mode = hdev->link_mode;
873
874 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
875 memcpy(&di.features, &hdev->features, sizeof(di.features));
876
877 if (copy_to_user(arg, &di, sizeof(di)))
878 err = -EFAULT;
879
880 hci_dev_put(hdev);
881
882 return err;
883 }
884
885 /* ---- Interface to HCI drivers ---- */
886
hci_dev_do_poweroff(struct hci_dev * hdev)887 static int hci_dev_do_poweroff(struct hci_dev *hdev)
888 {
889 int err;
890
891 BT_DBG("%s %p", hdev->name, hdev);
892
893 hci_req_sync_lock(hdev);
894
895 err = hci_set_powered_sync(hdev, false);
896
897 hci_req_sync_unlock(hdev);
898
899 return err;
900 }
901
hci_rfkill_set_block(void * data,bool blocked)902 static int hci_rfkill_set_block(void *data, bool blocked)
903 {
904 struct hci_dev *hdev = data;
905 int err;
906
907 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
908
909 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
910 return -EBUSY;
911
912 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
913 return 0;
914
915 if (blocked) {
916 hci_dev_set_flag(hdev, HCI_RFKILLED);
917
918 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
919 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
920 err = hci_dev_do_poweroff(hdev);
921 if (err) {
922 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
923 err);
924
925 /* Make sure the device is still closed even if
926 * anything during power off sequence (eg.
927 * disconnecting devices) failed.
928 */
929 hci_dev_do_close(hdev);
930 }
931 }
932 } else {
933 hci_dev_clear_flag(hdev, HCI_RFKILLED);
934 }
935
936 return 0;
937 }
938
939 static const struct rfkill_ops hci_rfkill_ops = {
940 .set_block = hci_rfkill_set_block,
941 };
942
hci_power_on(struct work_struct * work)943 static void hci_power_on(struct work_struct *work)
944 {
945 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
946 int err;
947
948 BT_DBG("%s", hdev->name);
949
950 if (test_bit(HCI_UP, &hdev->flags) &&
951 hci_dev_test_flag(hdev, HCI_MGMT) &&
952 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
953 cancel_delayed_work(&hdev->power_off);
954 err = hci_powered_update_sync(hdev);
955 mgmt_power_on(hdev, err);
956 return;
957 }
958
959 err = hci_dev_do_open(hdev);
960 if (err < 0) {
961 hci_dev_lock(hdev);
962 mgmt_set_powered_failed(hdev, err);
963 hci_dev_unlock(hdev);
964 return;
965 }
966
967 /* During the HCI setup phase, a few error conditions are
968 * ignored and they need to be checked now. If they are still
969 * valid, it is important to turn the device back off.
970 */
971 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
972 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
973 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
974 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
975 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
976 hci_dev_do_close(hdev);
977 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
978 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
979 HCI_AUTO_OFF_TIMEOUT);
980 }
981
982 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
983 /* For unconfigured devices, set the HCI_RAW flag
984 * so that userspace can easily identify them.
985 */
986 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
987 set_bit(HCI_RAW, &hdev->flags);
988
989 /* For fully configured devices, this will send
990 * the Index Added event. For unconfigured devices,
991 * it will send Unconfigued Index Added event.
992 *
993 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
994 * and no event will be send.
995 */
996 mgmt_index_added(hdev);
997 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
998 /* When the controller is now configured, then it
999 * is important to clear the HCI_RAW flag.
1000 */
1001 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1002 clear_bit(HCI_RAW, &hdev->flags);
1003
1004 /* Powering on the controller with HCI_CONFIG set only
1005 * happens with the transition from unconfigured to
1006 * configured. This will send the Index Added event.
1007 */
1008 mgmt_index_added(hdev);
1009 }
1010 }
1011
hci_power_off(struct work_struct * work)1012 static void hci_power_off(struct work_struct *work)
1013 {
1014 struct hci_dev *hdev = container_of(work, struct hci_dev,
1015 power_off.work);
1016
1017 BT_DBG("%s", hdev->name);
1018
1019 hci_dev_do_close(hdev);
1020 }
1021
hci_error_reset(struct work_struct * work)1022 static void hci_error_reset(struct work_struct *work)
1023 {
1024 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1025
1026 hci_dev_hold(hdev);
1027 BT_DBG("%s", hdev->name);
1028
1029 if (hdev->hw_error)
1030 hdev->hw_error(hdev, hdev->hw_error_code);
1031 else
1032 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1033
1034 if (!hci_dev_do_close(hdev))
1035 hci_dev_do_open(hdev);
1036
1037 hci_dev_put(hdev);
1038 }
1039
hci_uuids_clear(struct hci_dev * hdev)1040 void hci_uuids_clear(struct hci_dev *hdev)
1041 {
1042 struct bt_uuid *uuid, *tmp;
1043
1044 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1045 list_del(&uuid->list);
1046 kfree(uuid);
1047 }
1048 }
1049
hci_link_keys_clear(struct hci_dev * hdev)1050 void hci_link_keys_clear(struct hci_dev *hdev)
1051 {
1052 struct link_key *key, *tmp;
1053
1054 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1055 list_del_rcu(&key->list);
1056 kfree_rcu(key, rcu);
1057 }
1058 }
1059
hci_smp_ltks_clear(struct hci_dev * hdev)1060 void hci_smp_ltks_clear(struct hci_dev *hdev)
1061 {
1062 struct smp_ltk *k, *tmp;
1063
1064 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1065 list_del_rcu(&k->list);
1066 kfree_rcu(k, rcu);
1067 }
1068 }
1069
hci_smp_irks_clear(struct hci_dev * hdev)1070 void hci_smp_irks_clear(struct hci_dev *hdev)
1071 {
1072 struct smp_irk *k, *tmp;
1073
1074 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1075 list_del_rcu(&k->list);
1076 kfree_rcu(k, rcu);
1077 }
1078 }
1079
hci_blocked_keys_clear(struct hci_dev * hdev)1080 void hci_blocked_keys_clear(struct hci_dev *hdev)
1081 {
1082 struct blocked_key *b, *tmp;
1083
1084 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1085 list_del_rcu(&b->list);
1086 kfree_rcu(b, rcu);
1087 }
1088 }
1089
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1090 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1091 {
1092 bool blocked = false;
1093 struct blocked_key *b;
1094
1095 rcu_read_lock();
1096 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1097 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1098 blocked = true;
1099 break;
1100 }
1101 }
1102
1103 rcu_read_unlock();
1104 return blocked;
1105 }
1106
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1107 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1108 {
1109 struct link_key *k;
1110
1111 rcu_read_lock();
1112 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1113 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1114 rcu_read_unlock();
1115
1116 if (hci_is_blocked_key(hdev,
1117 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1118 k->val)) {
1119 bt_dev_warn_ratelimited(hdev,
1120 "Link key blocked for %pMR",
1121 &k->bdaddr);
1122 return NULL;
1123 }
1124
1125 return k;
1126 }
1127 }
1128 rcu_read_unlock();
1129
1130 return NULL;
1131 }
1132
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1133 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1134 u8 key_type, u8 old_key_type)
1135 {
1136 /* Legacy key */
1137 if (key_type < 0x03)
1138 return true;
1139
1140 /* Debug keys are insecure so don't store them persistently */
1141 if (key_type == HCI_LK_DEBUG_COMBINATION)
1142 return false;
1143
1144 /* Changed combination key and there's no previous one */
1145 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1146 return false;
1147
1148 /* Security mode 3 case */
1149 if (!conn)
1150 return true;
1151
1152 /* BR/EDR key derived using SC from an LE link */
1153 if (conn->type == LE_LINK)
1154 return true;
1155
1156 /* Neither local nor remote side had no-bonding as requirement */
1157 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1158 return true;
1159
1160 /* Local side had dedicated bonding as requirement */
1161 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1162 return true;
1163
1164 /* Remote side had dedicated bonding as requirement */
1165 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1166 return true;
1167
1168 /* If none of the above criteria match, then don't store the key
1169 * persistently */
1170 return false;
1171 }
1172
ltk_role(u8 type)1173 static u8 ltk_role(u8 type)
1174 {
1175 if (type == SMP_LTK)
1176 return HCI_ROLE_MASTER;
1177
1178 return HCI_ROLE_SLAVE;
1179 }
1180
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1181 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1182 u8 addr_type, u8 role)
1183 {
1184 struct smp_ltk *k;
1185
1186 rcu_read_lock();
1187 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1188 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1189 continue;
1190
1191 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1192 rcu_read_unlock();
1193
1194 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1195 k->val)) {
1196 bt_dev_warn_ratelimited(hdev,
1197 "LTK blocked for %pMR",
1198 &k->bdaddr);
1199 return NULL;
1200 }
1201
1202 return k;
1203 }
1204 }
1205 rcu_read_unlock();
1206
1207 return NULL;
1208 }
1209
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1210 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1211 {
1212 struct smp_irk *irk_to_return = NULL;
1213 struct smp_irk *irk;
1214
1215 rcu_read_lock();
1216 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1217 if (!bacmp(&irk->rpa, rpa)) {
1218 irk_to_return = irk;
1219 goto done;
1220 }
1221 }
1222
1223 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1224 if (smp_irk_matches(hdev, irk->val, rpa)) {
1225 bacpy(&irk->rpa, rpa);
1226 irk_to_return = irk;
1227 goto done;
1228 }
1229 }
1230
1231 done:
1232 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1233 irk_to_return->val)) {
1234 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1235 &irk_to_return->bdaddr);
1236 irk_to_return = NULL;
1237 }
1238
1239 rcu_read_unlock();
1240
1241 return irk_to_return;
1242 }
1243
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1244 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1245 u8 addr_type)
1246 {
1247 struct smp_irk *irk_to_return = NULL;
1248 struct smp_irk *irk;
1249
1250 /* Identity Address must be public or static random */
1251 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1252 return NULL;
1253
1254 rcu_read_lock();
1255 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1256 if (addr_type == irk->addr_type &&
1257 bacmp(bdaddr, &irk->bdaddr) == 0) {
1258 irk_to_return = irk;
1259 goto done;
1260 }
1261 }
1262
1263 done:
1264
1265 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1266 irk_to_return->val)) {
1267 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1268 &irk_to_return->bdaddr);
1269 irk_to_return = NULL;
1270 }
1271
1272 rcu_read_unlock();
1273
1274 return irk_to_return;
1275 }
1276
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1277 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1278 bdaddr_t *bdaddr, u8 *val, u8 type,
1279 u8 pin_len, bool *persistent)
1280 {
1281 struct link_key *key, *old_key;
1282 u8 old_key_type;
1283
1284 old_key = hci_find_link_key(hdev, bdaddr);
1285 if (old_key) {
1286 old_key_type = old_key->type;
1287 key = old_key;
1288 } else {
1289 old_key_type = conn ? conn->key_type : 0xff;
1290 key = kzalloc(sizeof(*key), GFP_KERNEL);
1291 if (!key)
1292 return NULL;
1293 list_add_rcu(&key->list, &hdev->link_keys);
1294 }
1295
1296 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1297
1298 /* Some buggy controller combinations generate a changed
1299 * combination key for legacy pairing even when there's no
1300 * previous key */
1301 if (type == HCI_LK_CHANGED_COMBINATION &&
1302 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1303 type = HCI_LK_COMBINATION;
1304 if (conn)
1305 conn->key_type = type;
1306 }
1307
1308 bacpy(&key->bdaddr, bdaddr);
1309 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1310 key->pin_len = pin_len;
1311
1312 if (type == HCI_LK_CHANGED_COMBINATION)
1313 key->type = old_key_type;
1314 else
1315 key->type = type;
1316
1317 if (persistent)
1318 *persistent = hci_persistent_key(hdev, conn, type,
1319 old_key_type);
1320
1321 return key;
1322 }
1323
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1324 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1325 u8 addr_type, u8 type, u8 authenticated,
1326 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1327 {
1328 struct smp_ltk *key, *old_key;
1329 u8 role = ltk_role(type);
1330
1331 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1332 if (old_key)
1333 key = old_key;
1334 else {
1335 key = kzalloc(sizeof(*key), GFP_KERNEL);
1336 if (!key)
1337 return NULL;
1338 list_add_rcu(&key->list, &hdev->long_term_keys);
1339 }
1340
1341 bacpy(&key->bdaddr, bdaddr);
1342 key->bdaddr_type = addr_type;
1343 memcpy(key->val, tk, sizeof(key->val));
1344 key->authenticated = authenticated;
1345 key->ediv = ediv;
1346 key->rand = rand;
1347 key->enc_size = enc_size;
1348 key->type = type;
1349
1350 return key;
1351 }
1352
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1353 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1354 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1355 {
1356 struct smp_irk *irk;
1357
1358 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1359 if (!irk) {
1360 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1361 if (!irk)
1362 return NULL;
1363
1364 bacpy(&irk->bdaddr, bdaddr);
1365 irk->addr_type = addr_type;
1366
1367 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1368 }
1369
1370 memcpy(irk->val, val, 16);
1371 bacpy(&irk->rpa, rpa);
1372
1373 return irk;
1374 }
1375
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1376 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1377 {
1378 struct link_key *key;
1379
1380 key = hci_find_link_key(hdev, bdaddr);
1381 if (!key)
1382 return -ENOENT;
1383
1384 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1385
1386 list_del_rcu(&key->list);
1387 kfree_rcu(key, rcu);
1388
1389 return 0;
1390 }
1391
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1392 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1393 {
1394 struct smp_ltk *k, *tmp;
1395 int removed = 0;
1396
1397 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1398 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1399 continue;
1400
1401 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1402
1403 list_del_rcu(&k->list);
1404 kfree_rcu(k, rcu);
1405 removed++;
1406 }
1407
1408 return removed ? 0 : -ENOENT;
1409 }
1410
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1411 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1412 {
1413 struct smp_irk *k, *tmp;
1414
1415 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1416 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1417 continue;
1418
1419 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1420
1421 list_del_rcu(&k->list);
1422 kfree_rcu(k, rcu);
1423 }
1424 }
1425
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1426 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1427 {
1428 struct smp_ltk *k;
1429 struct smp_irk *irk;
1430 u8 addr_type;
1431
1432 if (type == BDADDR_BREDR) {
1433 if (hci_find_link_key(hdev, bdaddr))
1434 return true;
1435 return false;
1436 }
1437
1438 /* Convert to HCI addr type which struct smp_ltk uses */
1439 if (type == BDADDR_LE_PUBLIC)
1440 addr_type = ADDR_LE_DEV_PUBLIC;
1441 else
1442 addr_type = ADDR_LE_DEV_RANDOM;
1443
1444 irk = hci_get_irk(hdev, bdaddr, addr_type);
1445 if (irk) {
1446 bdaddr = &irk->bdaddr;
1447 addr_type = irk->addr_type;
1448 }
1449
1450 rcu_read_lock();
1451 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1452 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1453 rcu_read_unlock();
1454 return true;
1455 }
1456 }
1457 rcu_read_unlock();
1458
1459 return false;
1460 }
1461
1462 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1463 static void hci_cmd_timeout(struct work_struct *work)
1464 {
1465 struct hci_dev *hdev = container_of(work, struct hci_dev,
1466 cmd_timer.work);
1467
1468 if (hdev->req_skb) {
1469 u16 opcode = hci_skb_opcode(hdev->req_skb);
1470
1471 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1472
1473 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1474 } else {
1475 bt_dev_err(hdev, "command tx timeout");
1476 }
1477
1478 if (hdev->reset)
1479 hdev->reset(hdev);
1480
1481 atomic_set(&hdev->cmd_cnt, 1);
1482 queue_work(hdev->workqueue, &hdev->cmd_work);
1483 }
1484
1485 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1486 static void hci_ncmd_timeout(struct work_struct *work)
1487 {
1488 struct hci_dev *hdev = container_of(work, struct hci_dev,
1489 ncmd_timer.work);
1490
1491 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1492
1493 /* During HCI_INIT phase no events can be injected if the ncmd timer
1494 * triggers since the procedure has its own timeout handling.
1495 */
1496 if (test_bit(HCI_INIT, &hdev->flags))
1497 return;
1498
1499 /* This is an irrecoverable state, inject hardware error event */
1500 hci_reset_dev(hdev);
1501 }
1502
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1503 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1504 bdaddr_t *bdaddr, u8 bdaddr_type)
1505 {
1506 struct oob_data *data;
1507
1508 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1509 if (bacmp(bdaddr, &data->bdaddr) != 0)
1510 continue;
1511 if (data->bdaddr_type != bdaddr_type)
1512 continue;
1513 return data;
1514 }
1515
1516 return NULL;
1517 }
1518
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1519 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1520 u8 bdaddr_type)
1521 {
1522 struct oob_data *data;
1523
1524 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1525 if (!data)
1526 return -ENOENT;
1527
1528 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1529
1530 list_del(&data->list);
1531 kfree(data);
1532
1533 return 0;
1534 }
1535
hci_remote_oob_data_clear(struct hci_dev * hdev)1536 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1537 {
1538 struct oob_data *data, *n;
1539
1540 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1541 list_del(&data->list);
1542 kfree(data);
1543 }
1544 }
1545
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1546 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1547 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1548 u8 *hash256, u8 *rand256)
1549 {
1550 struct oob_data *data;
1551
1552 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1553 if (!data) {
1554 data = kmalloc(sizeof(*data), GFP_KERNEL);
1555 if (!data)
1556 return -ENOMEM;
1557
1558 bacpy(&data->bdaddr, bdaddr);
1559 data->bdaddr_type = bdaddr_type;
1560 list_add(&data->list, &hdev->remote_oob_data);
1561 }
1562
1563 if (hash192 && rand192) {
1564 memcpy(data->hash192, hash192, sizeof(data->hash192));
1565 memcpy(data->rand192, rand192, sizeof(data->rand192));
1566 if (hash256 && rand256)
1567 data->present = 0x03;
1568 } else {
1569 memset(data->hash192, 0, sizeof(data->hash192));
1570 memset(data->rand192, 0, sizeof(data->rand192));
1571 if (hash256 && rand256)
1572 data->present = 0x02;
1573 else
1574 data->present = 0x00;
1575 }
1576
1577 if (hash256 && rand256) {
1578 memcpy(data->hash256, hash256, sizeof(data->hash256));
1579 memcpy(data->rand256, rand256, sizeof(data->rand256));
1580 } else {
1581 memset(data->hash256, 0, sizeof(data->hash256));
1582 memset(data->rand256, 0, sizeof(data->rand256));
1583 if (hash192 && rand192)
1584 data->present = 0x01;
1585 }
1586
1587 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1588
1589 return 0;
1590 }
1591
1592 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1593 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1594 {
1595 struct adv_info *adv_instance;
1596
1597 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1598 if (adv_instance->instance == instance)
1599 return adv_instance;
1600 }
1601
1602 return NULL;
1603 }
1604
1605 /* This function requires the caller holds hdev->lock */
hci_find_adv_sid(struct hci_dev * hdev,u8 sid)1606 struct adv_info *hci_find_adv_sid(struct hci_dev *hdev, u8 sid)
1607 {
1608 struct adv_info *adv;
1609
1610 list_for_each_entry(adv, &hdev->adv_instances, list) {
1611 if (adv->sid == sid)
1612 return adv;
1613 }
1614
1615 return NULL;
1616 }
1617
1618 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1619 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1620 {
1621 struct adv_info *cur_instance;
1622
1623 cur_instance = hci_find_adv_instance(hdev, instance);
1624 if (!cur_instance)
1625 return NULL;
1626
1627 if (cur_instance == list_last_entry(&hdev->adv_instances,
1628 struct adv_info, list))
1629 return list_first_entry(&hdev->adv_instances,
1630 struct adv_info, list);
1631 else
1632 return list_next_entry(cur_instance, list);
1633 }
1634
1635 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1636 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1637 {
1638 struct adv_info *adv_instance;
1639
1640 adv_instance = hci_find_adv_instance(hdev, instance);
1641 if (!adv_instance)
1642 return -ENOENT;
1643
1644 BT_DBG("%s removing %dMR", hdev->name, instance);
1645
1646 if (hdev->cur_adv_instance == instance) {
1647 if (hdev->adv_instance_timeout) {
1648 cancel_delayed_work(&hdev->adv_instance_expire);
1649 hdev->adv_instance_timeout = 0;
1650 }
1651 hdev->cur_adv_instance = 0x00;
1652 }
1653
1654 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1655
1656 list_del(&adv_instance->list);
1657 kfree(adv_instance);
1658
1659 hdev->adv_instance_cnt--;
1660
1661 return 0;
1662 }
1663
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1664 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1665 {
1666 struct adv_info *adv_instance, *n;
1667
1668 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1669 adv_instance->rpa_expired = rpa_expired;
1670 }
1671
1672 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1673 void hci_adv_instances_clear(struct hci_dev *hdev)
1674 {
1675 struct adv_info *adv_instance, *n;
1676
1677 if (hdev->adv_instance_timeout) {
1678 disable_delayed_work(&hdev->adv_instance_expire);
1679 hdev->adv_instance_timeout = 0;
1680 }
1681
1682 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1683 disable_delayed_work_sync(&adv_instance->rpa_expired_cb);
1684 list_del(&adv_instance->list);
1685 kfree(adv_instance);
1686 }
1687
1688 hdev->adv_instance_cnt = 0;
1689 hdev->cur_adv_instance = 0x00;
1690 }
1691
adv_instance_rpa_expired(struct work_struct * work)1692 static void adv_instance_rpa_expired(struct work_struct *work)
1693 {
1694 struct adv_info *adv_instance = container_of(work, struct adv_info,
1695 rpa_expired_cb.work);
1696
1697 BT_DBG("");
1698
1699 adv_instance->rpa_expired = true;
1700 }
1701
1702 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1703 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1704 u32 flags, u16 adv_data_len, u8 *adv_data,
1705 u16 scan_rsp_len, u8 *scan_rsp_data,
1706 u16 timeout, u16 duration, s8 tx_power,
1707 u32 min_interval, u32 max_interval,
1708 u8 mesh_handle)
1709 {
1710 struct adv_info *adv;
1711
1712 adv = hci_find_adv_instance(hdev, instance);
1713 if (adv) {
1714 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1715 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1716 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1717 } else {
1718 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1719 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1720 return ERR_PTR(-EOVERFLOW);
1721
1722 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1723 if (!adv)
1724 return ERR_PTR(-ENOMEM);
1725
1726 adv->pending = true;
1727 adv->instance = instance;
1728
1729 /* If controller support only one set and the instance is set to
1730 * 1 then there is no option other than using handle 0x00.
1731 */
1732 if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1733 adv->handle = 0x00;
1734 else
1735 adv->handle = instance;
1736
1737 list_add(&adv->list, &hdev->adv_instances);
1738 hdev->adv_instance_cnt++;
1739 }
1740
1741 adv->flags = flags;
1742 adv->min_interval = min_interval;
1743 adv->max_interval = max_interval;
1744 adv->tx_power = tx_power;
1745 /* Defining a mesh_handle changes the timing units to ms,
1746 * rather than seconds, and ties the instance to the requested
1747 * mesh_tx queue.
1748 */
1749 adv->mesh = mesh_handle;
1750
1751 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1752 scan_rsp_len, scan_rsp_data);
1753
1754 adv->timeout = timeout;
1755 adv->remaining_time = timeout;
1756
1757 if (duration == 0)
1758 adv->duration = hdev->def_multi_adv_rotation_duration;
1759 else
1760 adv->duration = duration;
1761
1762 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1763
1764 BT_DBG("%s for %dMR", hdev->name, instance);
1765
1766 return adv;
1767 }
1768
1769 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u8 sid,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1770 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u8 sid,
1771 u32 flags, u8 data_len, u8 *data,
1772 u32 min_interval, u32 max_interval)
1773 {
1774 struct adv_info *adv;
1775
1776 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1777 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1778 min_interval, max_interval, 0);
1779 if (IS_ERR(adv))
1780 return adv;
1781
1782 adv->sid = sid;
1783 adv->periodic = true;
1784 adv->per_adv_data_len = data_len;
1785
1786 if (data)
1787 memcpy(adv->per_adv_data, data, data_len);
1788
1789 return adv;
1790 }
1791
1792 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1793 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1794 u16 adv_data_len, u8 *adv_data,
1795 u16 scan_rsp_len, u8 *scan_rsp_data)
1796 {
1797 struct adv_info *adv;
1798
1799 adv = hci_find_adv_instance(hdev, instance);
1800
1801 /* If advertisement doesn't exist, we can't modify its data */
1802 if (!adv)
1803 return -ENOENT;
1804
1805 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1806 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1807 memcpy(adv->adv_data, adv_data, adv_data_len);
1808 adv->adv_data_len = adv_data_len;
1809 adv->adv_data_changed = true;
1810 }
1811
1812 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1813 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1814 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1815 adv->scan_rsp_len = scan_rsp_len;
1816 adv->scan_rsp_changed = true;
1817 }
1818
1819 /* Mark as changed if there are flags which would affect it */
1820 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1821 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1822 adv->scan_rsp_changed = true;
1823
1824 return 0;
1825 }
1826
1827 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1828 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1829 {
1830 u32 flags;
1831 struct adv_info *adv;
1832
1833 if (instance == 0x00) {
1834 /* Instance 0 always manages the "Tx Power" and "Flags"
1835 * fields
1836 */
1837 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1838
1839 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1840 * corresponds to the "connectable" instance flag.
1841 */
1842 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1843 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1844
1845 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1846 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1847 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1848 flags |= MGMT_ADV_FLAG_DISCOV;
1849
1850 return flags;
1851 }
1852
1853 adv = hci_find_adv_instance(hdev, instance);
1854
1855 /* Return 0 when we got an invalid instance identifier. */
1856 if (!adv)
1857 return 0;
1858
1859 return adv->flags;
1860 }
1861
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1862 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1863 {
1864 struct adv_info *adv;
1865
1866 /* Instance 0x00 always set local name */
1867 if (instance == 0x00)
1868 return true;
1869
1870 adv = hci_find_adv_instance(hdev, instance);
1871 if (!adv)
1872 return false;
1873
1874 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1875 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1876 return true;
1877
1878 return adv->scan_rsp_len ? true : false;
1879 }
1880
1881 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1882 void hci_adv_monitors_clear(struct hci_dev *hdev)
1883 {
1884 struct adv_monitor *monitor;
1885 int handle;
1886
1887 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1888 hci_free_adv_monitor(hdev, monitor);
1889
1890 idr_destroy(&hdev->adv_monitors_idr);
1891 }
1892
1893 /* Frees the monitor structure and do some bookkeepings.
1894 * This function requires the caller holds hdev->lock.
1895 */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1896 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1897 {
1898 struct adv_pattern *pattern;
1899 struct adv_pattern *tmp;
1900
1901 if (!monitor)
1902 return;
1903
1904 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1905 list_del(&pattern->list);
1906 kfree(pattern);
1907 }
1908
1909 if (monitor->handle)
1910 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1911
1912 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED)
1913 hdev->adv_monitors_cnt--;
1914
1915 kfree(monitor);
1916 }
1917
1918 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1919 * also attempts to forward the request to the controller.
1920 * This function requires the caller holds hci_req_sync_lock.
1921 */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1922 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1923 {
1924 int min, max, handle;
1925 int status = 0;
1926
1927 if (!monitor)
1928 return -EINVAL;
1929
1930 hci_dev_lock(hdev);
1931
1932 min = HCI_MIN_ADV_MONITOR_HANDLE;
1933 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1934 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1935 GFP_KERNEL);
1936
1937 hci_dev_unlock(hdev);
1938
1939 if (handle < 0)
1940 return handle;
1941
1942 monitor->handle = handle;
1943
1944 if (!hdev_is_powered(hdev))
1945 return status;
1946
1947 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1948 case HCI_ADV_MONITOR_EXT_NONE:
1949 bt_dev_dbg(hdev, "add monitor %d status %d",
1950 monitor->handle, status);
1951 /* Message was not forwarded to controller - not an error */
1952 break;
1953
1954 case HCI_ADV_MONITOR_EXT_MSFT:
1955 status = msft_add_monitor_pattern(hdev, monitor);
1956 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1957 handle, status);
1958 break;
1959 }
1960
1961 return status;
1962 }
1963
1964 /* Attempts to tell the controller and free the monitor. If somehow the
1965 * controller doesn't have a corresponding handle, remove anyway.
1966 * This function requires the caller holds hci_req_sync_lock.
1967 */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1968 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1969 struct adv_monitor *monitor)
1970 {
1971 int status = 0;
1972 int handle;
1973
1974 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1975 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1976 bt_dev_dbg(hdev, "remove monitor %d status %d",
1977 monitor->handle, status);
1978 goto free_monitor;
1979
1980 case HCI_ADV_MONITOR_EXT_MSFT:
1981 handle = monitor->handle;
1982 status = msft_remove_monitor(hdev, monitor);
1983 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1984 handle, status);
1985 break;
1986 }
1987
1988 /* In case no matching handle registered, just free the monitor */
1989 if (status == -ENOENT)
1990 goto free_monitor;
1991
1992 return status;
1993
1994 free_monitor:
1995 if (status == -ENOENT)
1996 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1997 monitor->handle);
1998 hci_free_adv_monitor(hdev, monitor);
1999
2000 return status;
2001 }
2002
2003 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)2004 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2005 {
2006 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2007
2008 if (!monitor)
2009 return -EINVAL;
2010
2011 return hci_remove_adv_monitor(hdev, monitor);
2012 }
2013
2014 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)2015 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2016 {
2017 struct adv_monitor *monitor;
2018 int idr_next_id = 0;
2019 int status = 0;
2020
2021 while (1) {
2022 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2023 if (!monitor)
2024 break;
2025
2026 status = hci_remove_adv_monitor(hdev, monitor);
2027 if (status)
2028 return status;
2029
2030 idr_next_id++;
2031 }
2032
2033 return status;
2034 }
2035
2036 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2037 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2038 {
2039 return !idr_is_empty(&hdev->adv_monitors_idr);
2040 }
2041
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2042 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2043 {
2044 if (msft_monitor_supported(hdev))
2045 return HCI_ADV_MONITOR_EXT_MSFT;
2046
2047 return HCI_ADV_MONITOR_EXT_NONE;
2048 }
2049
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2050 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2051 bdaddr_t *bdaddr, u8 type)
2052 {
2053 struct bdaddr_list *b;
2054
2055 list_for_each_entry(b, bdaddr_list, list) {
2056 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2057 return b;
2058 }
2059
2060 return NULL;
2061 }
2062
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2063 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2064 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2065 u8 type)
2066 {
2067 struct bdaddr_list_with_irk *b;
2068
2069 list_for_each_entry(b, bdaddr_list, list) {
2070 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2071 return b;
2072 }
2073
2074 return NULL;
2075 }
2076
2077 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2078 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2079 bdaddr_t *bdaddr, u8 type)
2080 {
2081 struct bdaddr_list_with_flags *b;
2082
2083 list_for_each_entry(b, bdaddr_list, list) {
2084 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2085 return b;
2086 }
2087
2088 return NULL;
2089 }
2090
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2091 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2092 {
2093 struct bdaddr_list *b, *n;
2094
2095 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2096 list_del(&b->list);
2097 kfree(b);
2098 }
2099 }
2100
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2101 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2102 {
2103 struct bdaddr_list *entry;
2104
2105 if (!bacmp(bdaddr, BDADDR_ANY))
2106 return -EBADF;
2107
2108 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2109 return -EEXIST;
2110
2111 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2112 if (!entry)
2113 return -ENOMEM;
2114
2115 bacpy(&entry->bdaddr, bdaddr);
2116 entry->bdaddr_type = type;
2117
2118 list_add(&entry->list, list);
2119
2120 return 0;
2121 }
2122
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2123 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2124 u8 type, u8 *peer_irk, u8 *local_irk)
2125 {
2126 struct bdaddr_list_with_irk *entry;
2127
2128 if (!bacmp(bdaddr, BDADDR_ANY))
2129 return -EBADF;
2130
2131 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2132 return -EEXIST;
2133
2134 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2135 if (!entry)
2136 return -ENOMEM;
2137
2138 bacpy(&entry->bdaddr, bdaddr);
2139 entry->bdaddr_type = type;
2140
2141 if (peer_irk)
2142 memcpy(entry->peer_irk, peer_irk, 16);
2143
2144 if (local_irk)
2145 memcpy(entry->local_irk, local_irk, 16);
2146
2147 list_add(&entry->list, list);
2148
2149 return 0;
2150 }
2151
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2152 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2153 u8 type, u32 flags)
2154 {
2155 struct bdaddr_list_with_flags *entry;
2156
2157 if (!bacmp(bdaddr, BDADDR_ANY))
2158 return -EBADF;
2159
2160 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2161 return -EEXIST;
2162
2163 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2164 if (!entry)
2165 return -ENOMEM;
2166
2167 bacpy(&entry->bdaddr, bdaddr);
2168 entry->bdaddr_type = type;
2169 entry->flags = flags;
2170
2171 list_add(&entry->list, list);
2172
2173 return 0;
2174 }
2175
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2176 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2177 {
2178 struct bdaddr_list *entry;
2179
2180 if (!bacmp(bdaddr, BDADDR_ANY)) {
2181 hci_bdaddr_list_clear(list);
2182 return 0;
2183 }
2184
2185 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2186 if (!entry)
2187 return -ENOENT;
2188
2189 list_del(&entry->list);
2190 kfree(entry);
2191
2192 return 0;
2193 }
2194
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2195 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2196 u8 type)
2197 {
2198 struct bdaddr_list_with_irk *entry;
2199
2200 if (!bacmp(bdaddr, BDADDR_ANY)) {
2201 hci_bdaddr_list_clear(list);
2202 return 0;
2203 }
2204
2205 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2206 if (!entry)
2207 return -ENOENT;
2208
2209 list_del(&entry->list);
2210 kfree(entry);
2211
2212 return 0;
2213 }
2214
2215 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2216 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2217 bdaddr_t *addr, u8 addr_type)
2218 {
2219 struct hci_conn_params *params;
2220
2221 list_for_each_entry(params, &hdev->le_conn_params, list) {
2222 if (bacmp(¶ms->addr, addr) == 0 &&
2223 params->addr_type == addr_type) {
2224 return params;
2225 }
2226 }
2227
2228 return NULL;
2229 }
2230
2231 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2232 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2233 bdaddr_t *addr, u8 addr_type)
2234 {
2235 struct hci_conn_params *param;
2236
2237 rcu_read_lock();
2238
2239 list_for_each_entry_rcu(param, list, action) {
2240 if (bacmp(¶m->addr, addr) == 0 &&
2241 param->addr_type == addr_type) {
2242 rcu_read_unlock();
2243 return param;
2244 }
2245 }
2246
2247 rcu_read_unlock();
2248
2249 return NULL;
2250 }
2251
2252 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2253 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2254 {
2255 if (list_empty(¶m->action))
2256 return;
2257
2258 list_del_rcu(¶m->action);
2259 synchronize_rcu();
2260 INIT_LIST_HEAD(¶m->action);
2261 }
2262
2263 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2264 void hci_pend_le_list_add(struct hci_conn_params *param,
2265 struct list_head *list)
2266 {
2267 list_add_rcu(¶m->action, list);
2268 }
2269
2270 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2271 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2272 bdaddr_t *addr, u8 addr_type)
2273 {
2274 struct hci_conn_params *params;
2275
2276 params = hci_conn_params_lookup(hdev, addr, addr_type);
2277 if (params)
2278 return params;
2279
2280 params = kzalloc(sizeof(*params), GFP_KERNEL);
2281 if (!params) {
2282 bt_dev_err(hdev, "out of memory");
2283 return NULL;
2284 }
2285
2286 bacpy(¶ms->addr, addr);
2287 params->addr_type = addr_type;
2288
2289 list_add(¶ms->list, &hdev->le_conn_params);
2290 INIT_LIST_HEAD(¶ms->action);
2291
2292 params->conn_min_interval = hdev->le_conn_min_interval;
2293 params->conn_max_interval = hdev->le_conn_max_interval;
2294 params->conn_latency = hdev->le_conn_latency;
2295 params->supervision_timeout = hdev->le_supv_timeout;
2296 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2297
2298 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2299
2300 return params;
2301 }
2302
hci_conn_params_free(struct hci_conn_params * params)2303 void hci_conn_params_free(struct hci_conn_params *params)
2304 {
2305 hci_pend_le_list_del_init(params);
2306
2307 if (params->conn) {
2308 hci_conn_drop(params->conn);
2309 hci_conn_put(params->conn);
2310 }
2311
2312 list_del(¶ms->list);
2313 kfree(params);
2314 }
2315
2316 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2317 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2318 {
2319 struct hci_conn_params *params;
2320
2321 params = hci_conn_params_lookup(hdev, addr, addr_type);
2322 if (!params)
2323 return;
2324
2325 hci_conn_params_free(params);
2326
2327 hci_update_passive_scan(hdev);
2328
2329 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2330 }
2331
2332 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2333 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2334 {
2335 struct hci_conn_params *params, *tmp;
2336
2337 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2338 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2339 continue;
2340
2341 /* If trying to establish one time connection to disabled
2342 * device, leave the params, but mark them as just once.
2343 */
2344 if (params->explicit_connect) {
2345 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2346 continue;
2347 }
2348
2349 hci_conn_params_free(params);
2350 }
2351
2352 BT_DBG("All LE disabled connection parameters were removed");
2353 }
2354
2355 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2356 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2357 {
2358 struct hci_conn_params *params, *tmp;
2359
2360 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2361 hci_conn_params_free(params);
2362
2363 BT_DBG("All LE connection parameters were removed");
2364 }
2365
2366 /* Copy the Identity Address of the controller.
2367 *
2368 * If the controller has a public BD_ADDR, then by default use that one.
2369 * If this is a LE only controller without a public address, default to
2370 * the static random address.
2371 *
2372 * For debugging purposes it is possible to force controllers with a
2373 * public address to use the static random address instead.
2374 *
2375 * In case BR/EDR has been disabled on a dual-mode controller and
2376 * userspace has configured a static address, then that address
2377 * becomes the identity address instead of the public BR/EDR address.
2378 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2379 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2380 u8 *bdaddr_type)
2381 {
2382 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2383 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2384 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2385 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2386 bacpy(bdaddr, &hdev->static_addr);
2387 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2388 } else {
2389 bacpy(bdaddr, &hdev->bdaddr);
2390 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2391 }
2392 }
2393
hci_clear_wake_reason(struct hci_dev * hdev)2394 static void hci_clear_wake_reason(struct hci_dev *hdev)
2395 {
2396 hci_dev_lock(hdev);
2397
2398 hdev->wake_reason = 0;
2399 bacpy(&hdev->wake_addr, BDADDR_ANY);
2400 hdev->wake_addr_type = 0;
2401
2402 hci_dev_unlock(hdev);
2403 }
2404
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2405 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2406 void *data)
2407 {
2408 struct hci_dev *hdev =
2409 container_of(nb, struct hci_dev, suspend_notifier);
2410 int ret = 0;
2411
2412 /* Userspace has full control of this device. Do nothing. */
2413 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2414 return NOTIFY_DONE;
2415
2416 /* To avoid a potential race with hci_unregister_dev. */
2417 hci_dev_hold(hdev);
2418
2419 switch (action) {
2420 case PM_HIBERNATION_PREPARE:
2421 case PM_SUSPEND_PREPARE:
2422 ret = hci_suspend_dev(hdev);
2423 break;
2424 case PM_POST_HIBERNATION:
2425 case PM_POST_SUSPEND:
2426 ret = hci_resume_dev(hdev);
2427 break;
2428 }
2429
2430 if (ret)
2431 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2432 action, ret);
2433
2434 hci_dev_put(hdev);
2435 return NOTIFY_DONE;
2436 }
2437
2438 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2439 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2440 {
2441 struct hci_dev *hdev;
2442 unsigned int alloc_size;
2443
2444 alloc_size = sizeof(*hdev);
2445 if (sizeof_priv) {
2446 /* Fixme: May need ALIGN-ment? */
2447 alloc_size += sizeof_priv;
2448 }
2449
2450 hdev = kzalloc(alloc_size, GFP_KERNEL);
2451 if (!hdev)
2452 return NULL;
2453
2454 if (init_srcu_struct(&hdev->srcu)) {
2455 kfree(hdev);
2456 return NULL;
2457 }
2458
2459 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2460 hdev->esco_type = (ESCO_HV1);
2461 hdev->link_mode = (HCI_LM_ACCEPT);
2462 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2463 hdev->io_capability = 0x03; /* No Input No Output */
2464 hdev->manufacturer = 0xffff; /* Default to internal use */
2465 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2466 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2467 hdev->adv_instance_cnt = 0;
2468 hdev->cur_adv_instance = 0x00;
2469 hdev->adv_instance_timeout = 0;
2470
2471 hdev->advmon_allowlist_duration = 300;
2472 hdev->advmon_no_filter_duration = 500;
2473 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2474
2475 hdev->sniff_max_interval = 800;
2476 hdev->sniff_min_interval = 80;
2477
2478 hdev->le_adv_channel_map = 0x07;
2479 hdev->le_adv_min_interval = 0x0800;
2480 hdev->le_adv_max_interval = 0x0800;
2481 hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2482 hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2483 hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2484 hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2485 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2486 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2487 hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2488 hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2489 hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2490 hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2491 hdev->le_conn_min_interval = 0x0018;
2492 hdev->le_conn_max_interval = 0x0028;
2493 hdev->le_conn_latency = 0x0000;
2494 hdev->le_supv_timeout = 0x002a;
2495 hdev->le_def_tx_len = 0x001b;
2496 hdev->le_def_tx_time = 0x0148;
2497 hdev->le_max_tx_len = 0x001b;
2498 hdev->le_max_tx_time = 0x0148;
2499 hdev->le_max_rx_len = 0x001b;
2500 hdev->le_max_rx_time = 0x0148;
2501 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2502 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2503 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2504 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2505 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2506 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2507 hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2508 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2509 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2510
2511 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2512 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2513 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2514 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2515 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2516 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2517
2518 /* default 1.28 sec page scan */
2519 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2520 hdev->def_page_scan_int = 0x0800;
2521 hdev->def_page_scan_window = 0x0012;
2522
2523 mutex_init(&hdev->lock);
2524 mutex_init(&hdev->req_lock);
2525 mutex_init(&hdev->mgmt_pending_lock);
2526
2527 ida_init(&hdev->unset_handle_ida);
2528
2529 INIT_LIST_HEAD(&hdev->mesh_pending);
2530 INIT_LIST_HEAD(&hdev->mgmt_pending);
2531 INIT_LIST_HEAD(&hdev->reject_list);
2532 INIT_LIST_HEAD(&hdev->accept_list);
2533 INIT_LIST_HEAD(&hdev->uuids);
2534 INIT_LIST_HEAD(&hdev->link_keys);
2535 INIT_LIST_HEAD(&hdev->long_term_keys);
2536 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2537 INIT_LIST_HEAD(&hdev->remote_oob_data);
2538 INIT_LIST_HEAD(&hdev->le_accept_list);
2539 INIT_LIST_HEAD(&hdev->le_resolv_list);
2540 INIT_LIST_HEAD(&hdev->le_conn_params);
2541 INIT_LIST_HEAD(&hdev->pend_le_conns);
2542 INIT_LIST_HEAD(&hdev->pend_le_reports);
2543 INIT_LIST_HEAD(&hdev->conn_hash.list);
2544 INIT_LIST_HEAD(&hdev->adv_instances);
2545 INIT_LIST_HEAD(&hdev->blocked_keys);
2546 INIT_LIST_HEAD(&hdev->monitored_devices);
2547
2548 INIT_LIST_HEAD(&hdev->local_codecs);
2549 INIT_WORK(&hdev->rx_work, hci_rx_work);
2550 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2551 INIT_WORK(&hdev->tx_work, hci_tx_work);
2552 INIT_WORK(&hdev->power_on, hci_power_on);
2553 INIT_WORK(&hdev->error_reset, hci_error_reset);
2554
2555 hci_cmd_sync_init(hdev);
2556
2557 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2558
2559 skb_queue_head_init(&hdev->rx_q);
2560 skb_queue_head_init(&hdev->cmd_q);
2561 skb_queue_head_init(&hdev->raw_q);
2562
2563 init_waitqueue_head(&hdev->req_wait_q);
2564
2565 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2566 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2567
2568 hci_devcd_setup(hdev);
2569
2570 hci_init_sysfs(hdev);
2571 discovery_init(hdev);
2572
2573 return hdev;
2574 }
2575 EXPORT_SYMBOL(hci_alloc_dev_priv);
2576
2577 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2578 void hci_free_dev(struct hci_dev *hdev)
2579 {
2580 /* will free via device release */
2581 put_device(&hdev->dev);
2582 }
2583 EXPORT_SYMBOL(hci_free_dev);
2584
2585 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2586 int hci_register_dev(struct hci_dev *hdev)
2587 {
2588 int id, error;
2589
2590 if (!hdev->open || !hdev->close || !hdev->send)
2591 return -EINVAL;
2592
2593 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2594 if (id < 0)
2595 return id;
2596
2597 error = dev_set_name(&hdev->dev, "hci%u", id);
2598 if (error)
2599 return error;
2600
2601 hdev->name = dev_name(&hdev->dev);
2602 hdev->id = id;
2603
2604 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2605
2606 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2607 if (!hdev->workqueue) {
2608 error = -ENOMEM;
2609 goto err;
2610 }
2611
2612 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2613 hdev->name);
2614 if (!hdev->req_workqueue) {
2615 destroy_workqueue(hdev->workqueue);
2616 error = -ENOMEM;
2617 goto err;
2618 }
2619
2620 if (!IS_ERR_OR_NULL(bt_debugfs))
2621 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2622
2623 error = device_add(&hdev->dev);
2624 if (error < 0)
2625 goto err_wqueue;
2626
2627 hci_leds_init(hdev);
2628
2629 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2630 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2631 hdev);
2632 if (hdev->rfkill) {
2633 if (rfkill_register(hdev->rfkill) < 0) {
2634 rfkill_destroy(hdev->rfkill);
2635 hdev->rfkill = NULL;
2636 }
2637 }
2638
2639 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2640 hci_dev_set_flag(hdev, HCI_RFKILLED);
2641
2642 hci_dev_set_flag(hdev, HCI_SETUP);
2643 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2644
2645 /* Assume BR/EDR support until proven otherwise (such as
2646 * through reading supported features during init.
2647 */
2648 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2649
2650 write_lock(&hci_dev_list_lock);
2651 list_add(&hdev->list, &hci_dev_list);
2652 write_unlock(&hci_dev_list_lock);
2653
2654 /* Devices that are marked for raw-only usage are unconfigured
2655 * and should not be included in normal operation.
2656 */
2657 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2658 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2659
2660 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2661 * callback.
2662 */
2663 if (hdev->wakeup)
2664 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2665
2666 hci_sock_dev_event(hdev, HCI_DEV_REG);
2667 hci_dev_hold(hdev);
2668
2669 error = hci_register_suspend_notifier(hdev);
2670 if (error)
2671 BT_WARN("register suspend notifier failed error:%d\n", error);
2672
2673 queue_work(hdev->req_workqueue, &hdev->power_on);
2674
2675 idr_init(&hdev->adv_monitors_idr);
2676 msft_register(hdev);
2677
2678 return id;
2679
2680 err_wqueue:
2681 debugfs_remove_recursive(hdev->debugfs);
2682 destroy_workqueue(hdev->workqueue);
2683 destroy_workqueue(hdev->req_workqueue);
2684 err:
2685 ida_free(&hci_index_ida, hdev->id);
2686
2687 return error;
2688 }
2689 EXPORT_SYMBOL(hci_register_dev);
2690
2691 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2692 void hci_unregister_dev(struct hci_dev *hdev)
2693 {
2694 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2695
2696 mutex_lock(&hdev->unregister_lock);
2697 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2698 mutex_unlock(&hdev->unregister_lock);
2699
2700 write_lock(&hci_dev_list_lock);
2701 list_del(&hdev->list);
2702 write_unlock(&hci_dev_list_lock);
2703
2704 synchronize_srcu(&hdev->srcu);
2705 cleanup_srcu_struct(&hdev->srcu);
2706
2707 disable_work_sync(&hdev->rx_work);
2708 disable_work_sync(&hdev->cmd_work);
2709 disable_work_sync(&hdev->tx_work);
2710 disable_work_sync(&hdev->power_on);
2711 disable_work_sync(&hdev->error_reset);
2712
2713 hci_cmd_sync_clear(hdev);
2714
2715 hci_unregister_suspend_notifier(hdev);
2716
2717 hci_dev_do_close(hdev);
2718
2719 if (!test_bit(HCI_INIT, &hdev->flags) &&
2720 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2721 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2722 hci_dev_lock(hdev);
2723 mgmt_index_removed(hdev);
2724 hci_dev_unlock(hdev);
2725 }
2726
2727 /* mgmt_index_removed should take care of emptying the
2728 * pending list */
2729 BUG_ON(!list_empty(&hdev->mgmt_pending));
2730
2731 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2732
2733 if (hdev->rfkill) {
2734 rfkill_unregister(hdev->rfkill);
2735 rfkill_destroy(hdev->rfkill);
2736 }
2737
2738 device_del(&hdev->dev);
2739 /* Actual cleanup is deferred until hci_release_dev(). */
2740 hci_dev_put(hdev);
2741 }
2742 EXPORT_SYMBOL(hci_unregister_dev);
2743
2744 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2745 void hci_release_dev(struct hci_dev *hdev)
2746 {
2747 debugfs_remove_recursive(hdev->debugfs);
2748 kfree_const(hdev->hw_info);
2749 kfree_const(hdev->fw_info);
2750
2751 destroy_workqueue(hdev->workqueue);
2752 destroy_workqueue(hdev->req_workqueue);
2753
2754 hci_dev_lock(hdev);
2755 hci_bdaddr_list_clear(&hdev->reject_list);
2756 hci_bdaddr_list_clear(&hdev->accept_list);
2757 hci_uuids_clear(hdev);
2758 hci_link_keys_clear(hdev);
2759 hci_smp_ltks_clear(hdev);
2760 hci_smp_irks_clear(hdev);
2761 hci_remote_oob_data_clear(hdev);
2762 hci_adv_instances_clear(hdev);
2763 hci_adv_monitors_clear(hdev);
2764 hci_bdaddr_list_clear(&hdev->le_accept_list);
2765 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2766 hci_conn_params_clear_all(hdev);
2767 hci_discovery_filter_clear(hdev);
2768 hci_blocked_keys_clear(hdev);
2769 hci_codec_list_clear(&hdev->local_codecs);
2770 msft_release(hdev);
2771 hci_dev_unlock(hdev);
2772
2773 ida_destroy(&hdev->unset_handle_ida);
2774 ida_free(&hci_index_ida, hdev->id);
2775 kfree_skb(hdev->sent_cmd);
2776 kfree_skb(hdev->req_skb);
2777 kfree_skb(hdev->recv_event);
2778 kfree(hdev);
2779 }
2780 EXPORT_SYMBOL(hci_release_dev);
2781
hci_register_suspend_notifier(struct hci_dev * hdev)2782 int hci_register_suspend_notifier(struct hci_dev *hdev)
2783 {
2784 int ret = 0;
2785
2786 if (!hdev->suspend_notifier.notifier_call &&
2787 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2788 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2789 ret = register_pm_notifier(&hdev->suspend_notifier);
2790 }
2791
2792 return ret;
2793 }
2794
hci_unregister_suspend_notifier(struct hci_dev * hdev)2795 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2796 {
2797 int ret = 0;
2798
2799 if (hdev->suspend_notifier.notifier_call) {
2800 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2801 if (!ret)
2802 hdev->suspend_notifier.notifier_call = NULL;
2803 }
2804
2805 return ret;
2806 }
2807
2808 /* Cancel ongoing command synchronously:
2809 *
2810 * - Cancel command timer
2811 * - Reset command counter
2812 * - Cancel command request
2813 */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2814 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2815 {
2816 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2817
2818 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
2819 disable_delayed_work_sync(&hdev->cmd_timer);
2820 disable_delayed_work_sync(&hdev->ncmd_timer);
2821 } else {
2822 cancel_delayed_work_sync(&hdev->cmd_timer);
2823 cancel_delayed_work_sync(&hdev->ncmd_timer);
2824 }
2825
2826 atomic_set(&hdev->cmd_cnt, 1);
2827
2828 hci_cmd_sync_cancel_sync(hdev, err);
2829 }
2830
2831 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2832 int hci_suspend_dev(struct hci_dev *hdev)
2833 {
2834 int ret;
2835
2836 bt_dev_dbg(hdev, "");
2837
2838 /* Suspend should only act on when powered. */
2839 if (!hdev_is_powered(hdev) ||
2840 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2841 return 0;
2842
2843 /* If powering down don't attempt to suspend */
2844 if (mgmt_powering_down(hdev))
2845 return 0;
2846
2847 /* Cancel potentially blocking sync operation before suspend */
2848 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2849
2850 hci_req_sync_lock(hdev);
2851 ret = hci_suspend_sync(hdev);
2852 hci_req_sync_unlock(hdev);
2853
2854 hci_clear_wake_reason(hdev);
2855 mgmt_suspending(hdev, hdev->suspend_state);
2856
2857 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2858 return ret;
2859 }
2860 EXPORT_SYMBOL(hci_suspend_dev);
2861
2862 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2863 int hci_resume_dev(struct hci_dev *hdev)
2864 {
2865 int ret;
2866
2867 bt_dev_dbg(hdev, "");
2868
2869 /* Resume should only act on when powered. */
2870 if (!hdev_is_powered(hdev) ||
2871 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2872 return 0;
2873
2874 /* If powering down don't attempt to resume */
2875 if (mgmt_powering_down(hdev))
2876 return 0;
2877
2878 hci_req_sync_lock(hdev);
2879 ret = hci_resume_sync(hdev);
2880 hci_req_sync_unlock(hdev);
2881
2882 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2883 hdev->wake_addr_type);
2884
2885 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2886 return ret;
2887 }
2888 EXPORT_SYMBOL(hci_resume_dev);
2889
2890 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2891 int hci_reset_dev(struct hci_dev *hdev)
2892 {
2893 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2894 struct sk_buff *skb;
2895
2896 skb = bt_skb_alloc(3, GFP_ATOMIC);
2897 if (!skb)
2898 return -ENOMEM;
2899
2900 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2901 skb_put_data(skb, hw_err, 3);
2902
2903 bt_dev_err(hdev, "Injecting HCI hardware error event");
2904
2905 /* Send Hardware Error to upper stack */
2906 return hci_recv_frame(hdev, skb);
2907 }
2908 EXPORT_SYMBOL(hci_reset_dev);
2909
hci_dev_classify_pkt_type(struct hci_dev * hdev,struct sk_buff * skb)2910 static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
2911 {
2912 if (hdev->classify_pkt_type)
2913 return hdev->classify_pkt_type(hdev, skb);
2914
2915 return hci_skb_pkt_type(skb);
2916 }
2917
2918 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2919 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2920 {
2921 u8 dev_pkt_type;
2922
2923 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2924 && !test_bit(HCI_INIT, &hdev->flags))) {
2925 kfree_skb(skb);
2926 return -ENXIO;
2927 }
2928
2929 /* Check if the driver agree with packet type classification */
2930 dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
2931 if (hci_skb_pkt_type(skb) != dev_pkt_type) {
2932 hci_skb_pkt_type(skb) = dev_pkt_type;
2933 }
2934
2935 switch (hci_skb_pkt_type(skb)) {
2936 case HCI_EVENT_PKT:
2937 break;
2938 case HCI_ACLDATA_PKT:
2939 /* Detect if ISO packet has been sent as ACL */
2940 if (hci_conn_num(hdev, CIS_LINK) ||
2941 hci_conn_num(hdev, BIS_LINK)) {
2942 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2943 __u8 type;
2944
2945 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2946 if (type == CIS_LINK || type == BIS_LINK)
2947 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2948 }
2949 break;
2950 case HCI_SCODATA_PKT:
2951 break;
2952 case HCI_ISODATA_PKT:
2953 break;
2954 case HCI_DRV_PKT:
2955 break;
2956 default:
2957 kfree_skb(skb);
2958 return -EINVAL;
2959 }
2960
2961 /* Incoming skb */
2962 bt_cb(skb)->incoming = 1;
2963
2964 /* Time stamp */
2965 __net_timestamp(skb);
2966
2967 skb_queue_tail(&hdev->rx_q, skb);
2968 queue_work(hdev->workqueue, &hdev->rx_work);
2969
2970 return 0;
2971 }
2972 EXPORT_SYMBOL(hci_recv_frame);
2973
2974 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2975 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2976 {
2977 /* Mark as diagnostic packet */
2978 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2979
2980 /* Time stamp */
2981 __net_timestamp(skb);
2982
2983 skb_queue_tail(&hdev->rx_q, skb);
2984 queue_work(hdev->workqueue, &hdev->rx_work);
2985
2986 return 0;
2987 }
2988 EXPORT_SYMBOL(hci_recv_diag);
2989
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2990 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2991 {
2992 va_list vargs;
2993
2994 va_start(vargs, fmt);
2995 kfree_const(hdev->hw_info);
2996 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2997 va_end(vargs);
2998 }
2999 EXPORT_SYMBOL(hci_set_hw_info);
3000
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)3001 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3002 {
3003 va_list vargs;
3004
3005 va_start(vargs, fmt);
3006 kfree_const(hdev->fw_info);
3007 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3008 va_end(vargs);
3009 }
3010 EXPORT_SYMBOL(hci_set_fw_info);
3011
3012 /* ---- Interface to upper protocols ---- */
3013
hci_register_cb(struct hci_cb * cb)3014 int hci_register_cb(struct hci_cb *cb)
3015 {
3016 BT_DBG("%p name %s", cb, cb->name);
3017
3018 mutex_lock(&hci_cb_list_lock);
3019 list_add_tail(&cb->list, &hci_cb_list);
3020 mutex_unlock(&hci_cb_list_lock);
3021
3022 return 0;
3023 }
3024 EXPORT_SYMBOL(hci_register_cb);
3025
hci_unregister_cb(struct hci_cb * cb)3026 int hci_unregister_cb(struct hci_cb *cb)
3027 {
3028 BT_DBG("%p name %s", cb, cb->name);
3029
3030 mutex_lock(&hci_cb_list_lock);
3031 list_del(&cb->list);
3032 mutex_unlock(&hci_cb_list_lock);
3033
3034 return 0;
3035 }
3036 EXPORT_SYMBOL(hci_unregister_cb);
3037
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3038 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3039 {
3040 int err;
3041
3042 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3043 skb->len);
3044
3045 /* Time stamp */
3046 __net_timestamp(skb);
3047
3048 /* Send copy to monitor */
3049 hci_send_to_monitor(hdev, skb);
3050
3051 if (atomic_read(&hdev->promisc)) {
3052 /* Send copy to the sockets */
3053 hci_send_to_sock(hdev, skb);
3054 }
3055
3056 /* Get rid of skb owner, prior to sending to the driver. */
3057 skb_orphan(skb);
3058
3059 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3060 kfree_skb(skb);
3061 return -EINVAL;
3062 }
3063
3064 if (hci_skb_pkt_type(skb) == HCI_DRV_PKT) {
3065 /* Intercept HCI Drv packet here and don't go with hdev->send
3066 * callback.
3067 */
3068 err = hci_drv_process_cmd(hdev, skb);
3069 kfree_skb(skb);
3070 return err;
3071 }
3072
3073 err = hdev->send(hdev, skb);
3074 if (err < 0) {
3075 bt_dev_err(hdev, "sending frame failed (%d)", err);
3076 kfree_skb(skb);
3077 return err;
3078 }
3079
3080 return 0;
3081 }
3082
hci_send_conn_frame(struct hci_dev * hdev,struct hci_conn * conn,struct sk_buff * skb)3083 static int hci_send_conn_frame(struct hci_dev *hdev, struct hci_conn *conn,
3084 struct sk_buff *skb)
3085 {
3086 hci_conn_tx_queue(conn, skb);
3087 return hci_send_frame(hdev, skb);
3088 }
3089
3090 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3091 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3092 const void *param)
3093 {
3094 struct sk_buff *skb;
3095
3096 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3097
3098 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3099 if (!skb) {
3100 bt_dev_err(hdev, "no memory for command");
3101 return -ENOMEM;
3102 }
3103
3104 /* Stand-alone HCI commands must be flagged as
3105 * single-command requests.
3106 */
3107 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3108
3109 skb_queue_tail(&hdev->cmd_q, skb);
3110 queue_work(hdev->workqueue, &hdev->cmd_work);
3111
3112 return 0;
3113 }
3114
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3115 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3116 const void *param)
3117 {
3118 struct sk_buff *skb;
3119
3120 if (hci_opcode_ogf(opcode) != 0x3f) {
3121 /* A controller receiving a command shall respond with either
3122 * a Command Status Event or a Command Complete Event.
3123 * Therefore, all standard HCI commands must be sent via the
3124 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3125 * Some vendors do not comply with this rule for vendor-specific
3126 * commands and do not return any event. We want to support
3127 * unresponded commands for such cases only.
3128 */
3129 bt_dev_err(hdev, "unresponded command not supported");
3130 return -EINVAL;
3131 }
3132
3133 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3134 if (!skb) {
3135 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3136 opcode);
3137 return -ENOMEM;
3138 }
3139
3140 hci_send_frame(hdev, skb);
3141
3142 return 0;
3143 }
3144 EXPORT_SYMBOL(__hci_cmd_send);
3145
3146 /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3147 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3148 {
3149 struct hci_command_hdr *hdr;
3150
3151 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3152 return NULL;
3153
3154 hdr = (void *)skb->data;
3155
3156 if (hdr->opcode != cpu_to_le16(opcode))
3157 return NULL;
3158
3159 return skb->data + HCI_COMMAND_HDR_SIZE;
3160 }
3161
3162 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3163 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3164 {
3165 void *data;
3166
3167 /* Check if opcode matches last sent command */
3168 data = hci_cmd_data(hdev->sent_cmd, opcode);
3169 if (!data)
3170 /* Check if opcode matches last request */
3171 data = hci_cmd_data(hdev->req_skb, opcode);
3172
3173 return data;
3174 }
3175
3176 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3177 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3178 {
3179 struct hci_event_hdr *hdr;
3180 int offset;
3181
3182 if (!hdev->recv_event)
3183 return NULL;
3184
3185 hdr = (void *)hdev->recv_event->data;
3186 offset = sizeof(*hdr);
3187
3188 if (hdr->evt != event) {
3189 /* In case of LE metaevent check the subevent match */
3190 if (hdr->evt == HCI_EV_LE_META) {
3191 struct hci_ev_le_meta *ev;
3192
3193 ev = (void *)hdev->recv_event->data + offset;
3194 offset += sizeof(*ev);
3195 if (ev->subevent == event)
3196 goto found;
3197 }
3198 return NULL;
3199 }
3200
3201 found:
3202 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3203
3204 return hdev->recv_event->data + offset;
3205 }
3206
3207 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3208 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3209 {
3210 struct hci_acl_hdr *hdr;
3211 int len = skb->len;
3212
3213 skb_push(skb, HCI_ACL_HDR_SIZE);
3214 skb_reset_transport_header(skb);
3215 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3216 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3217 hdr->dlen = cpu_to_le16(len);
3218 }
3219
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3220 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3221 struct sk_buff *skb, __u16 flags)
3222 {
3223 struct hci_conn *conn = chan->conn;
3224 struct hci_dev *hdev = conn->hdev;
3225 struct sk_buff *list;
3226
3227 skb->len = skb_headlen(skb);
3228 skb->data_len = 0;
3229
3230 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3231
3232 hci_add_acl_hdr(skb, conn->handle, flags);
3233
3234 list = skb_shinfo(skb)->frag_list;
3235 if (!list) {
3236 /* Non fragmented */
3237 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3238
3239 skb_queue_tail(queue, skb);
3240 } else {
3241 /* Fragmented */
3242 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3243
3244 skb_shinfo(skb)->frag_list = NULL;
3245
3246 /* Queue all fragments atomically. We need to use spin_lock_bh
3247 * here because of 6LoWPAN links, as there this function is
3248 * called from softirq and using normal spin lock could cause
3249 * deadlocks.
3250 */
3251 spin_lock_bh(&queue->lock);
3252
3253 __skb_queue_tail(queue, skb);
3254
3255 flags &= ~ACL_START;
3256 flags |= ACL_CONT;
3257 do {
3258 skb = list; list = list->next;
3259
3260 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3261 hci_add_acl_hdr(skb, conn->handle, flags);
3262
3263 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3264
3265 __skb_queue_tail(queue, skb);
3266 } while (list);
3267
3268 spin_unlock_bh(&queue->lock);
3269 }
3270 }
3271
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3272 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3273 {
3274 struct hci_dev *hdev = chan->conn->hdev;
3275
3276 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3277
3278 hci_queue_acl(chan, &chan->data_q, skb, flags);
3279
3280 queue_work(hdev->workqueue, &hdev->tx_work);
3281 }
3282
3283 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3284 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3285 {
3286 struct hci_dev *hdev = conn->hdev;
3287 struct hci_sco_hdr hdr;
3288
3289 BT_DBG("%s len %d", hdev->name, skb->len);
3290
3291 hdr.handle = cpu_to_le16(conn->handle);
3292 hdr.dlen = skb->len;
3293
3294 skb_push(skb, HCI_SCO_HDR_SIZE);
3295 skb_reset_transport_header(skb);
3296 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3297
3298 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3299
3300 skb_queue_tail(&conn->data_q, skb);
3301 queue_work(hdev->workqueue, &hdev->tx_work);
3302 }
3303
3304 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3305 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3306 {
3307 struct hci_iso_hdr *hdr;
3308 int len = skb->len;
3309
3310 skb_push(skb, HCI_ISO_HDR_SIZE);
3311 skb_reset_transport_header(skb);
3312 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3313 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3314 hdr->dlen = cpu_to_le16(len);
3315 }
3316
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3317 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3318 struct sk_buff *skb)
3319 {
3320 struct hci_dev *hdev = conn->hdev;
3321 struct sk_buff *list;
3322 __u16 flags;
3323
3324 skb->len = skb_headlen(skb);
3325 skb->data_len = 0;
3326
3327 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3328
3329 list = skb_shinfo(skb)->frag_list;
3330
3331 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3332 hci_add_iso_hdr(skb, conn->handle, flags);
3333
3334 if (!list) {
3335 /* Non fragmented */
3336 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3337
3338 skb_queue_tail(queue, skb);
3339 } else {
3340 /* Fragmented */
3341 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3342
3343 skb_shinfo(skb)->frag_list = NULL;
3344
3345 __skb_queue_tail(queue, skb);
3346
3347 do {
3348 skb = list; list = list->next;
3349
3350 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3351 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3352 0x00);
3353 hci_add_iso_hdr(skb, conn->handle, flags);
3354
3355 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3356
3357 __skb_queue_tail(queue, skb);
3358 } while (list);
3359 }
3360 }
3361
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3362 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3363 {
3364 struct hci_dev *hdev = conn->hdev;
3365
3366 BT_DBG("%s len %d", hdev->name, skb->len);
3367
3368 hci_queue_iso(conn, &conn->data_q, skb);
3369
3370 queue_work(hdev->workqueue, &hdev->tx_work);
3371 }
3372
3373 /* ---- HCI TX task (outgoing data) ---- */
3374
3375 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3376 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3377 {
3378 struct hci_dev *hdev;
3379 int cnt, q;
3380
3381 if (!conn) {
3382 *quote = 0;
3383 return;
3384 }
3385
3386 hdev = conn->hdev;
3387
3388 switch (conn->type) {
3389 case ACL_LINK:
3390 cnt = hdev->acl_cnt;
3391 break;
3392 case SCO_LINK:
3393 case ESCO_LINK:
3394 cnt = hdev->sco_cnt;
3395 break;
3396 case LE_LINK:
3397 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3398 break;
3399 case CIS_LINK:
3400 case BIS_LINK:
3401 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3402 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3403 break;
3404 default:
3405 cnt = 0;
3406 bt_dev_err(hdev, "unknown link type %d", conn->type);
3407 }
3408
3409 q = cnt / num;
3410 *quote = q ? q : 1;
3411 }
3412
hci_low_sent(struct hci_dev * hdev,__u8 type,__u8 type2,int * quote)3413 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3414 __u8 type2, int *quote)
3415 {
3416 struct hci_conn_hash *h = &hdev->conn_hash;
3417 struct hci_conn *conn = NULL, *c;
3418 unsigned int num = 0, min = ~0;
3419
3420 /* We don't have to lock device here. Connections are always
3421 * added and removed with TX task disabled. */
3422
3423 rcu_read_lock();
3424
3425 list_for_each_entry_rcu(c, &h->list, list) {
3426 if ((c->type != type && c->type != type2) ||
3427 skb_queue_empty(&c->data_q))
3428 continue;
3429
3430 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3431 continue;
3432
3433 num++;
3434
3435 if (c->sent < min) {
3436 min = c->sent;
3437 conn = c;
3438 }
3439
3440 if (hci_conn_num(hdev, type) == num)
3441 break;
3442 }
3443
3444 rcu_read_unlock();
3445
3446 hci_quote_sent(conn, num, quote);
3447
3448 BT_DBG("conn %p quote %d", conn, *quote);
3449 return conn;
3450 }
3451
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3452 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3453 {
3454 struct hci_conn_hash *h = &hdev->conn_hash;
3455 struct hci_conn *c;
3456
3457 bt_dev_err(hdev, "link tx timeout");
3458
3459 hci_dev_lock(hdev);
3460
3461 /* Kill stalled connections */
3462 list_for_each_entry(c, &h->list, list) {
3463 if (c->type == type && c->sent) {
3464 bt_dev_err(hdev, "killing stalled connection %pMR",
3465 &c->dst);
3466 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3467 }
3468 }
3469
3470 hci_dev_unlock(hdev);
3471 }
3472
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3473 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3474 int *quote)
3475 {
3476 struct hci_conn_hash *h = &hdev->conn_hash;
3477 struct hci_chan *chan = NULL;
3478 unsigned int num = 0, min = ~0, cur_prio = 0;
3479 struct hci_conn *conn;
3480 int conn_num = 0;
3481
3482 BT_DBG("%s", hdev->name);
3483
3484 rcu_read_lock();
3485
3486 list_for_each_entry_rcu(conn, &h->list, list) {
3487 struct hci_chan *tmp;
3488
3489 if (conn->type != type)
3490 continue;
3491
3492 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3493 continue;
3494
3495 conn_num++;
3496
3497 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3498 struct sk_buff *skb;
3499
3500 if (skb_queue_empty(&tmp->data_q))
3501 continue;
3502
3503 skb = skb_peek(&tmp->data_q);
3504 if (skb->priority < cur_prio)
3505 continue;
3506
3507 if (skb->priority > cur_prio) {
3508 num = 0;
3509 min = ~0;
3510 cur_prio = skb->priority;
3511 }
3512
3513 num++;
3514
3515 if (conn->sent < min) {
3516 min = conn->sent;
3517 chan = tmp;
3518 }
3519 }
3520
3521 if (hci_conn_num(hdev, type) == conn_num)
3522 break;
3523 }
3524
3525 rcu_read_unlock();
3526
3527 if (!chan)
3528 return NULL;
3529
3530 hci_quote_sent(chan->conn, num, quote);
3531
3532 BT_DBG("chan %p quote %d", chan, *quote);
3533 return chan;
3534 }
3535
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3536 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3537 {
3538 struct hci_conn_hash *h = &hdev->conn_hash;
3539 struct hci_conn *conn;
3540 int num = 0;
3541
3542 BT_DBG("%s", hdev->name);
3543
3544 rcu_read_lock();
3545
3546 list_for_each_entry_rcu(conn, &h->list, list) {
3547 struct hci_chan *chan;
3548
3549 if (conn->type != type)
3550 continue;
3551
3552 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3553 continue;
3554
3555 num++;
3556
3557 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3558 struct sk_buff *skb;
3559
3560 if (chan->sent) {
3561 chan->sent = 0;
3562 continue;
3563 }
3564
3565 if (skb_queue_empty(&chan->data_q))
3566 continue;
3567
3568 skb = skb_peek(&chan->data_q);
3569 if (skb->priority >= HCI_PRIO_MAX - 1)
3570 continue;
3571
3572 skb->priority = HCI_PRIO_MAX - 1;
3573
3574 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3575 skb->priority);
3576 }
3577
3578 if (hci_conn_num(hdev, type) == num)
3579 break;
3580 }
3581
3582 rcu_read_unlock();
3583
3584 }
3585
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3586 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3587 {
3588 unsigned long last_tx;
3589
3590 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3591 return;
3592
3593 switch (type) {
3594 case LE_LINK:
3595 last_tx = hdev->le_last_tx;
3596 break;
3597 default:
3598 last_tx = hdev->acl_last_tx;
3599 break;
3600 }
3601
3602 /* tx timeout must be longer than maximum link supervision timeout
3603 * (40.9 seconds)
3604 */
3605 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3606 hci_link_tx_to(hdev, type);
3607 }
3608
3609 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev,__u8 type)3610 static void hci_sched_sco(struct hci_dev *hdev, __u8 type)
3611 {
3612 struct hci_conn *conn;
3613 struct sk_buff *skb;
3614 int quote, *cnt;
3615 unsigned int pkts = hdev->sco_pkts;
3616
3617 bt_dev_dbg(hdev, "type %u", type);
3618
3619 if (!hci_conn_num(hdev, type) || !pkts)
3620 return;
3621
3622 /* Use sco_pkts if flow control has not been enabled which will limit
3623 * the amount of buffer sent in a row.
3624 */
3625 if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
3626 cnt = &pkts;
3627 else
3628 cnt = &hdev->sco_cnt;
3629
3630 while (*cnt && (conn = hci_low_sent(hdev, type, type, "e))) {
3631 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3632 BT_DBG("skb %p len %d", skb, skb->len);
3633 hci_send_conn_frame(hdev, conn, skb);
3634
3635 conn->sent++;
3636 if (conn->sent == ~0)
3637 conn->sent = 0;
3638 (*cnt)--;
3639 }
3640 }
3641
3642 /* Rescheduled if all packets were sent and flow control is not enabled
3643 * as there could be more packets queued that could not be sent and
3644 * since no HCI_EV_NUM_COMP_PKTS event will be generated the reschedule
3645 * needs to be forced.
3646 */
3647 if (!pkts && !hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
3648 queue_work(hdev->workqueue, &hdev->tx_work);
3649 }
3650
hci_sched_acl_pkt(struct hci_dev * hdev)3651 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3652 {
3653 unsigned int cnt = hdev->acl_cnt;
3654 struct hci_chan *chan;
3655 struct sk_buff *skb;
3656 int quote;
3657
3658 __check_timeout(hdev, cnt, ACL_LINK);
3659
3660 while (hdev->acl_cnt &&
3661 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3662 u32 priority = (skb_peek(&chan->data_q))->priority;
3663 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3664 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3665 skb->len, skb->priority);
3666
3667 /* Stop if priority has changed */
3668 if (skb->priority < priority)
3669 break;
3670
3671 skb = skb_dequeue(&chan->data_q);
3672
3673 hci_conn_enter_active_mode(chan->conn,
3674 bt_cb(skb)->force_active);
3675
3676 hci_send_conn_frame(hdev, chan->conn, skb);
3677 hdev->acl_last_tx = jiffies;
3678
3679 hdev->acl_cnt--;
3680 chan->sent++;
3681 chan->conn->sent++;
3682
3683 /* Send pending SCO packets right away */
3684 hci_sched_sco(hdev, SCO_LINK);
3685 hci_sched_sco(hdev, ESCO_LINK);
3686 }
3687 }
3688
3689 if (cnt != hdev->acl_cnt)
3690 hci_prio_recalculate(hdev, ACL_LINK);
3691 }
3692
hci_sched_acl(struct hci_dev * hdev)3693 static void hci_sched_acl(struct hci_dev *hdev)
3694 {
3695 BT_DBG("%s", hdev->name);
3696
3697 /* No ACL link over BR/EDR controller */
3698 if (!hci_conn_num(hdev, ACL_LINK))
3699 return;
3700
3701 hci_sched_acl_pkt(hdev);
3702 }
3703
hci_sched_le(struct hci_dev * hdev)3704 static void hci_sched_le(struct hci_dev *hdev)
3705 {
3706 struct hci_chan *chan;
3707 struct sk_buff *skb;
3708 int quote, *cnt, tmp;
3709
3710 BT_DBG("%s", hdev->name);
3711
3712 if (!hci_conn_num(hdev, LE_LINK))
3713 return;
3714
3715 cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3716
3717 __check_timeout(hdev, *cnt, LE_LINK);
3718
3719 tmp = *cnt;
3720 while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3721 u32 priority = (skb_peek(&chan->data_q))->priority;
3722 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3723 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3724 skb->len, skb->priority);
3725
3726 /* Stop if priority has changed */
3727 if (skb->priority < priority)
3728 break;
3729
3730 skb = skb_dequeue(&chan->data_q);
3731
3732 hci_send_conn_frame(hdev, chan->conn, skb);
3733 hdev->le_last_tx = jiffies;
3734
3735 (*cnt)--;
3736 chan->sent++;
3737 chan->conn->sent++;
3738
3739 /* Send pending SCO packets right away */
3740 hci_sched_sco(hdev, SCO_LINK);
3741 hci_sched_sco(hdev, ESCO_LINK);
3742 }
3743 }
3744
3745 if (*cnt != tmp)
3746 hci_prio_recalculate(hdev, LE_LINK);
3747 }
3748
3749 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3750 static void hci_sched_iso(struct hci_dev *hdev)
3751 {
3752 struct hci_conn *conn;
3753 struct sk_buff *skb;
3754 int quote, *cnt;
3755
3756 BT_DBG("%s", hdev->name);
3757
3758 if (!hci_conn_num(hdev, CIS_LINK) &&
3759 !hci_conn_num(hdev, BIS_LINK))
3760 return;
3761
3762 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3763 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3764 while (*cnt && (conn = hci_low_sent(hdev, CIS_LINK, BIS_LINK,
3765 "e))) {
3766 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3767 BT_DBG("skb %p len %d", skb, skb->len);
3768 hci_send_conn_frame(hdev, conn, skb);
3769
3770 conn->sent++;
3771 if (conn->sent == ~0)
3772 conn->sent = 0;
3773 (*cnt)--;
3774 }
3775 }
3776 }
3777
hci_tx_work(struct work_struct * work)3778 static void hci_tx_work(struct work_struct *work)
3779 {
3780 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3781 struct sk_buff *skb;
3782
3783 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3784 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3785
3786 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3787 /* Schedule queues and send stuff to HCI driver */
3788 hci_sched_sco(hdev, SCO_LINK);
3789 hci_sched_sco(hdev, ESCO_LINK);
3790 hci_sched_iso(hdev);
3791 hci_sched_acl(hdev);
3792 hci_sched_le(hdev);
3793 }
3794
3795 /* Send next queued raw (unknown type) packet */
3796 while ((skb = skb_dequeue(&hdev->raw_q)))
3797 hci_send_frame(hdev, skb);
3798 }
3799
3800 /* ----- HCI RX task (incoming data processing) ----- */
3801
3802 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3803 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3804 {
3805 struct hci_acl_hdr *hdr;
3806 struct hci_conn *conn;
3807 __u16 handle, flags;
3808
3809 hdr = skb_pull_data(skb, sizeof(*hdr));
3810 if (!hdr) {
3811 bt_dev_err(hdev, "ACL packet too small");
3812 goto drop;
3813 }
3814
3815 handle = __le16_to_cpu(hdr->handle);
3816 flags = hci_flags(handle);
3817 handle = hci_handle(handle);
3818
3819 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3820 handle, flags);
3821
3822 hdev->stat.acl_rx++;
3823
3824 hci_dev_lock(hdev);
3825 conn = hci_conn_hash_lookup_handle(hdev, handle);
3826 hci_dev_unlock(hdev);
3827
3828 if (conn) {
3829 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3830
3831 /* Send to upper protocol */
3832 l2cap_recv_acldata(conn, skb, flags);
3833 return;
3834 } else {
3835 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3836 handle);
3837 }
3838
3839 drop:
3840 kfree_skb(skb);
3841 }
3842
3843 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3844 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3845 {
3846 struct hci_sco_hdr *hdr;
3847 struct hci_conn *conn;
3848 __u16 handle, flags;
3849
3850 hdr = skb_pull_data(skb, sizeof(*hdr));
3851 if (!hdr) {
3852 bt_dev_err(hdev, "SCO packet too small");
3853 goto drop;
3854 }
3855
3856 handle = __le16_to_cpu(hdr->handle);
3857 flags = hci_flags(handle);
3858 handle = hci_handle(handle);
3859
3860 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3861 handle, flags);
3862
3863 hdev->stat.sco_rx++;
3864
3865 hci_dev_lock(hdev);
3866 conn = hci_conn_hash_lookup_handle(hdev, handle);
3867 hci_dev_unlock(hdev);
3868
3869 if (conn) {
3870 /* Send to upper protocol */
3871 hci_skb_pkt_status(skb) = flags & 0x03;
3872 sco_recv_scodata(conn, skb);
3873 return;
3874 } else {
3875 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3876 handle);
3877 }
3878
3879 drop:
3880 kfree_skb(skb);
3881 }
3882
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3883 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3884 {
3885 struct hci_iso_hdr *hdr;
3886 struct hci_conn *conn;
3887 __u16 handle, flags;
3888
3889 hdr = skb_pull_data(skb, sizeof(*hdr));
3890 if (!hdr) {
3891 bt_dev_err(hdev, "ISO packet too small");
3892 goto drop;
3893 }
3894
3895 handle = __le16_to_cpu(hdr->handle);
3896 flags = hci_flags(handle);
3897 handle = hci_handle(handle);
3898
3899 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3900 handle, flags);
3901
3902 hci_dev_lock(hdev);
3903 conn = hci_conn_hash_lookup_handle(hdev, handle);
3904 hci_dev_unlock(hdev);
3905
3906 if (!conn) {
3907 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3908 handle);
3909 goto drop;
3910 }
3911
3912 /* Send to upper protocol */
3913 iso_recv(conn, skb, flags);
3914 return;
3915
3916 drop:
3917 kfree_skb(skb);
3918 }
3919
hci_req_is_complete(struct hci_dev * hdev)3920 static bool hci_req_is_complete(struct hci_dev *hdev)
3921 {
3922 struct sk_buff *skb;
3923
3924 skb = skb_peek(&hdev->cmd_q);
3925 if (!skb)
3926 return true;
3927
3928 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3929 }
3930
hci_resend_last(struct hci_dev * hdev)3931 static void hci_resend_last(struct hci_dev *hdev)
3932 {
3933 struct hci_command_hdr *sent;
3934 struct sk_buff *skb;
3935 u16 opcode;
3936
3937 if (!hdev->sent_cmd)
3938 return;
3939
3940 sent = (void *) hdev->sent_cmd->data;
3941 opcode = __le16_to_cpu(sent->opcode);
3942 if (opcode == HCI_OP_RESET)
3943 return;
3944
3945 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3946 if (!skb)
3947 return;
3948
3949 skb_queue_head(&hdev->cmd_q, skb);
3950 queue_work(hdev->workqueue, &hdev->cmd_work);
3951 }
3952
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3953 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3954 hci_req_complete_t *req_complete,
3955 hci_req_complete_skb_t *req_complete_skb)
3956 {
3957 struct sk_buff *skb;
3958 unsigned long flags;
3959
3960 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3961
3962 /* If the completed command doesn't match the last one that was
3963 * sent we need to do special handling of it.
3964 */
3965 if (!hci_sent_cmd_data(hdev, opcode)) {
3966 /* Some CSR based controllers generate a spontaneous
3967 * reset complete event during init and any pending
3968 * command will never be completed. In such a case we
3969 * need to resend whatever was the last sent
3970 * command.
3971 */
3972 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3973 hci_resend_last(hdev);
3974
3975 return;
3976 }
3977
3978 /* If we reach this point this event matches the last command sent */
3979 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3980
3981 /* If the command succeeded and there's still more commands in
3982 * this request the request is not yet complete.
3983 */
3984 if (!status && !hci_req_is_complete(hdev))
3985 return;
3986
3987 skb = hdev->req_skb;
3988
3989 /* If this was the last command in a request the complete
3990 * callback would be found in hdev->req_skb instead of the
3991 * command queue (hdev->cmd_q).
3992 */
3993 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3994 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3995 return;
3996 }
3997
3998 if (skb && bt_cb(skb)->hci.req_complete) {
3999 *req_complete = bt_cb(skb)->hci.req_complete;
4000 return;
4001 }
4002
4003 /* Remove all pending commands belonging to this request */
4004 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4005 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4006 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4007 __skb_queue_head(&hdev->cmd_q, skb);
4008 break;
4009 }
4010
4011 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4012 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4013 else
4014 *req_complete = bt_cb(skb)->hci.req_complete;
4015 dev_kfree_skb_irq(skb);
4016 }
4017 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4018 }
4019
hci_rx_work(struct work_struct * work)4020 static void hci_rx_work(struct work_struct *work)
4021 {
4022 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4023 struct sk_buff *skb;
4024
4025 BT_DBG("%s", hdev->name);
4026
4027 /* The kcov_remote functions used for collecting packet parsing
4028 * coverage information from this background thread and associate
4029 * the coverage with the syscall's thread which originally injected
4030 * the packet. This helps fuzzing the kernel.
4031 */
4032 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4033 kcov_remote_start_common(skb_get_kcov_handle(skb));
4034
4035 /* Send copy to monitor */
4036 hci_send_to_monitor(hdev, skb);
4037
4038 if (atomic_read(&hdev->promisc)) {
4039 /* Send copy to the sockets */
4040 hci_send_to_sock(hdev, skb);
4041 }
4042
4043 /* If the device has been opened in HCI_USER_CHANNEL,
4044 * the userspace has exclusive access to device.
4045 * When device is HCI_INIT, we still need to process
4046 * the data packets to the driver in order
4047 * to complete its setup().
4048 */
4049 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4050 !test_bit(HCI_INIT, &hdev->flags)) {
4051 kfree_skb(skb);
4052 continue;
4053 }
4054
4055 if (test_bit(HCI_INIT, &hdev->flags)) {
4056 /* Don't process data packets in this states. */
4057 switch (hci_skb_pkt_type(skb)) {
4058 case HCI_ACLDATA_PKT:
4059 case HCI_SCODATA_PKT:
4060 case HCI_ISODATA_PKT:
4061 kfree_skb(skb);
4062 continue;
4063 }
4064 }
4065
4066 /* Process frame */
4067 switch (hci_skb_pkt_type(skb)) {
4068 case HCI_EVENT_PKT:
4069 BT_DBG("%s Event packet", hdev->name);
4070 hci_event_packet(hdev, skb);
4071 break;
4072
4073 case HCI_ACLDATA_PKT:
4074 BT_DBG("%s ACL data packet", hdev->name);
4075 hci_acldata_packet(hdev, skb);
4076 break;
4077
4078 case HCI_SCODATA_PKT:
4079 BT_DBG("%s SCO data packet", hdev->name);
4080 hci_scodata_packet(hdev, skb);
4081 break;
4082
4083 case HCI_ISODATA_PKT:
4084 BT_DBG("%s ISO data packet", hdev->name);
4085 hci_isodata_packet(hdev, skb);
4086 break;
4087
4088 default:
4089 kfree_skb(skb);
4090 break;
4091 }
4092 }
4093 }
4094
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4095 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4096 {
4097 int err;
4098
4099 bt_dev_dbg(hdev, "skb %p", skb);
4100
4101 kfree_skb(hdev->sent_cmd);
4102
4103 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4104 if (!hdev->sent_cmd) {
4105 skb_queue_head(&hdev->cmd_q, skb);
4106 queue_work(hdev->workqueue, &hdev->cmd_work);
4107 return;
4108 }
4109
4110 if (hci_skb_opcode(skb) != HCI_OP_NOP) {
4111 err = hci_send_frame(hdev, skb);
4112 if (err < 0) {
4113 hci_cmd_sync_cancel_sync(hdev, -err);
4114 return;
4115 }
4116 atomic_dec(&hdev->cmd_cnt);
4117 }
4118
4119 if (hdev->req_status == HCI_REQ_PEND &&
4120 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4121 kfree_skb(hdev->req_skb);
4122 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4123 }
4124 }
4125
hci_cmd_work(struct work_struct * work)4126 static void hci_cmd_work(struct work_struct *work)
4127 {
4128 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4129 struct sk_buff *skb;
4130
4131 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4132 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4133
4134 /* Send queued commands */
4135 if (atomic_read(&hdev->cmd_cnt)) {
4136 skb = skb_dequeue(&hdev->cmd_q);
4137 if (!skb)
4138 return;
4139
4140 hci_send_cmd_sync(hdev, skb);
4141
4142 rcu_read_lock();
4143 if (test_bit(HCI_RESET, &hdev->flags) ||
4144 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4145 cancel_delayed_work(&hdev->cmd_timer);
4146 else
4147 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4148 HCI_CMD_TIMEOUT);
4149 rcu_read_unlock();
4150 }
4151 }
4152