1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <linux/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48 #include "hci_codec.h"
49
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
53
54 /* HCI device list */
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
57
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
63
64 /* Get HCI device by index.
65 * Device is held on return. */
hci_dev_get(int index)66 struct hci_dev *hci_dev_get(int index)
67 {
68 struct hci_dev *hdev = NULL, *d;
69
70 BT_DBG("%d", index);
71
72 if (index < 0)
73 return NULL;
74
75 read_lock(&hci_dev_list_lock);
76 list_for_each_entry(d, &hci_dev_list, list) {
77 if (d->id == index) {
78 hdev = hci_dev_hold(d);
79 break;
80 }
81 }
82 read_unlock(&hci_dev_list_lock);
83 return hdev;
84 }
85
86 /* ---- Inquiry support ---- */
87
hci_discovery_active(struct hci_dev * hdev)88 bool hci_discovery_active(struct hci_dev *hdev)
89 {
90 struct discovery_state *discov = &hdev->discovery;
91
92 switch (discov->state) {
93 case DISCOVERY_FINDING:
94 case DISCOVERY_RESOLVING:
95 return true;
96
97 default:
98 return false;
99 }
100 }
101
hci_discovery_set_state(struct hci_dev * hdev,int state)102 void hci_discovery_set_state(struct hci_dev *hdev, int state)
103 {
104 int old_state = hdev->discovery.state;
105
106 if (old_state == state)
107 return;
108
109 hdev->discovery.state = state;
110
111 switch (state) {
112 case DISCOVERY_STOPPED:
113 hci_update_passive_scan(hdev);
114
115 if (old_state != DISCOVERY_STARTING)
116 mgmt_discovering(hdev, 0);
117 break;
118 case DISCOVERY_STARTING:
119 break;
120 case DISCOVERY_FINDING:
121 mgmt_discovering(hdev, 1);
122 break;
123 case DISCOVERY_RESOLVING:
124 break;
125 case DISCOVERY_STOPPING:
126 break;
127 }
128
129 bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
130 }
131
hci_inquiry_cache_flush(struct hci_dev * hdev)132 void hci_inquiry_cache_flush(struct hci_dev *hdev)
133 {
134 struct discovery_state *cache = &hdev->discovery;
135 struct inquiry_entry *p, *n;
136
137 list_for_each_entry_safe(p, n, &cache->all, all) {
138 list_del(&p->all);
139 kfree(p);
140 }
141
142 INIT_LIST_HEAD(&cache->unknown);
143 INIT_LIST_HEAD(&cache->resolve);
144 }
145
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)146 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
147 bdaddr_t *bdaddr)
148 {
149 struct discovery_state *cache = &hdev->discovery;
150 struct inquiry_entry *e;
151
152 BT_DBG("cache %p, %pMR", cache, bdaddr);
153
154 list_for_each_entry(e, &cache->all, all) {
155 if (!bacmp(&e->data.bdaddr, bdaddr))
156 return e;
157 }
158
159 return NULL;
160 }
161
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)162 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
163 bdaddr_t *bdaddr)
164 {
165 struct discovery_state *cache = &hdev->discovery;
166 struct inquiry_entry *e;
167
168 BT_DBG("cache %p, %pMR", cache, bdaddr);
169
170 list_for_each_entry(e, &cache->unknown, list) {
171 if (!bacmp(&e->data.bdaddr, bdaddr))
172 return e;
173 }
174
175 return NULL;
176 }
177
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)178 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
179 bdaddr_t *bdaddr,
180 int state)
181 {
182 struct discovery_state *cache = &hdev->discovery;
183 struct inquiry_entry *e;
184
185 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
186
187 list_for_each_entry(e, &cache->resolve, list) {
188 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
189 return e;
190 if (!bacmp(&e->data.bdaddr, bdaddr))
191 return e;
192 }
193
194 return NULL;
195 }
196
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)197 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
198 struct inquiry_entry *ie)
199 {
200 struct discovery_state *cache = &hdev->discovery;
201 struct list_head *pos = &cache->resolve;
202 struct inquiry_entry *p;
203
204 list_del(&ie->list);
205
206 list_for_each_entry(p, &cache->resolve, list) {
207 if (p->name_state != NAME_PENDING &&
208 abs(p->data.rssi) >= abs(ie->data.rssi))
209 break;
210 pos = &p->list;
211 }
212
213 list_add(&ie->list, pos);
214 }
215
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)216 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
217 bool name_known)
218 {
219 struct discovery_state *cache = &hdev->discovery;
220 struct inquiry_entry *ie;
221 u32 flags = 0;
222
223 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
224
225 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
226
227 if (!data->ssp_mode)
228 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
229
230 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
231 if (ie) {
232 if (!ie->data.ssp_mode)
233 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
234
235 if (ie->name_state == NAME_NEEDED &&
236 data->rssi != ie->data.rssi) {
237 ie->data.rssi = data->rssi;
238 hci_inquiry_cache_update_resolve(hdev, ie);
239 }
240
241 goto update;
242 }
243
244 /* Entry not in the cache. Add new one. */
245 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
246 if (!ie) {
247 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
248 goto done;
249 }
250
251 list_add(&ie->all, &cache->all);
252
253 if (name_known) {
254 ie->name_state = NAME_KNOWN;
255 } else {
256 ie->name_state = NAME_NOT_KNOWN;
257 list_add(&ie->list, &cache->unknown);
258 }
259
260 update:
261 if (name_known && ie->name_state != NAME_KNOWN &&
262 ie->name_state != NAME_PENDING) {
263 ie->name_state = NAME_KNOWN;
264 list_del(&ie->list);
265 }
266
267 memcpy(&ie->data, data, sizeof(*data));
268 ie->timestamp = jiffies;
269 cache->timestamp = jiffies;
270
271 if (ie->name_state == NAME_NOT_KNOWN)
272 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
273
274 done:
275 return flags;
276 }
277
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)278 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
279 {
280 struct discovery_state *cache = &hdev->discovery;
281 struct inquiry_info *info = (struct inquiry_info *) buf;
282 struct inquiry_entry *e;
283 int copied = 0;
284
285 list_for_each_entry(e, &cache->all, all) {
286 struct inquiry_data *data = &e->data;
287
288 if (copied >= num)
289 break;
290
291 bacpy(&info->bdaddr, &data->bdaddr);
292 info->pscan_rep_mode = data->pscan_rep_mode;
293 info->pscan_period_mode = data->pscan_period_mode;
294 info->pscan_mode = data->pscan_mode;
295 memcpy(info->dev_class, data->dev_class, 3);
296 info->clock_offset = data->clock_offset;
297
298 info++;
299 copied++;
300 }
301
302 BT_DBG("cache %p, copied %d", cache, copied);
303 return copied;
304 }
305
hci_inquiry(void __user * arg)306 int hci_inquiry(void __user *arg)
307 {
308 __u8 __user *ptr = arg;
309 struct hci_inquiry_req ir;
310 struct hci_dev *hdev;
311 int err = 0, do_inquiry = 0, max_rsp;
312 __u8 *buf;
313
314 if (copy_from_user(&ir, ptr, sizeof(ir)))
315 return -EFAULT;
316
317 hdev = hci_dev_get(ir.dev_id);
318 if (!hdev)
319 return -ENODEV;
320
321 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
322 err = -EBUSY;
323 goto done;
324 }
325
326 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
327 err = -EOPNOTSUPP;
328 goto done;
329 }
330
331 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
332 err = -EOPNOTSUPP;
333 goto done;
334 }
335
336 /* Restrict maximum inquiry length to 60 seconds */
337 if (ir.length > 60) {
338 err = -EINVAL;
339 goto done;
340 }
341
342 hci_dev_lock(hdev);
343 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
344 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
345 hci_inquiry_cache_flush(hdev);
346 do_inquiry = 1;
347 }
348 hci_dev_unlock(hdev);
349
350 if (do_inquiry) {
351 hci_req_sync_lock(hdev);
352 err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
353 hci_req_sync_unlock(hdev);
354
355 if (err < 0)
356 goto done;
357
358 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
359 * cleared). If it is interrupted by a signal, return -EINTR.
360 */
361 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
362 TASK_INTERRUPTIBLE)) {
363 err = -EINTR;
364 goto done;
365 }
366 }
367
368 /* for unlimited number of responses we will use buffer with
369 * 255 entries
370 */
371 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
372
373 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
374 * copy it to the user space.
375 */
376 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
377 if (!buf) {
378 err = -ENOMEM;
379 goto done;
380 }
381
382 hci_dev_lock(hdev);
383 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
384 hci_dev_unlock(hdev);
385
386 BT_DBG("num_rsp %d", ir.num_rsp);
387
388 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
389 ptr += sizeof(ir);
390 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
391 ir.num_rsp))
392 err = -EFAULT;
393 } else
394 err = -EFAULT;
395
396 kfree(buf);
397
398 done:
399 hci_dev_put(hdev);
400 return err;
401 }
402
hci_dev_do_open(struct hci_dev * hdev)403 static int hci_dev_do_open(struct hci_dev *hdev)
404 {
405 int ret = 0;
406
407 BT_DBG("%s %p", hdev->name, hdev);
408
409 hci_req_sync_lock(hdev);
410
411 ret = hci_dev_open_sync(hdev);
412
413 hci_req_sync_unlock(hdev);
414 return ret;
415 }
416
417 /* ---- HCI ioctl helpers ---- */
418
hci_dev_open(__u16 dev)419 int hci_dev_open(__u16 dev)
420 {
421 struct hci_dev *hdev;
422 int err;
423
424 hdev = hci_dev_get(dev);
425 if (!hdev)
426 return -ENODEV;
427
428 /* Devices that are marked as unconfigured can only be powered
429 * up as user channel. Trying to bring them up as normal devices
430 * will result into a failure. Only user channel operation is
431 * possible.
432 *
433 * When this function is called for a user channel, the flag
434 * HCI_USER_CHANNEL will be set first before attempting to
435 * open the device.
436 */
437 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
438 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
439 err = -EOPNOTSUPP;
440 goto done;
441 }
442
443 /* We need to ensure that no other power on/off work is pending
444 * before proceeding to call hci_dev_do_open. This is
445 * particularly important if the setup procedure has not yet
446 * completed.
447 */
448 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
449 cancel_delayed_work(&hdev->power_off);
450
451 /* After this call it is guaranteed that the setup procedure
452 * has finished. This means that error conditions like RFKILL
453 * or no valid public or static random address apply.
454 */
455 flush_workqueue(hdev->req_workqueue);
456
457 /* For controllers not using the management interface and that
458 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
459 * so that pairing works for them. Once the management interface
460 * is in use this bit will be cleared again and userspace has
461 * to explicitly enable it.
462 */
463 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
464 !hci_dev_test_flag(hdev, HCI_MGMT))
465 hci_dev_set_flag(hdev, HCI_BONDABLE);
466
467 err = hci_dev_do_open(hdev);
468
469 done:
470 hci_dev_put(hdev);
471 return err;
472 }
473
hci_dev_do_close(struct hci_dev * hdev)474 int hci_dev_do_close(struct hci_dev *hdev)
475 {
476 int err;
477
478 BT_DBG("%s %p", hdev->name, hdev);
479
480 hci_req_sync_lock(hdev);
481
482 err = hci_dev_close_sync(hdev);
483
484 hci_req_sync_unlock(hdev);
485
486 return err;
487 }
488
hci_dev_close(__u16 dev)489 int hci_dev_close(__u16 dev)
490 {
491 struct hci_dev *hdev;
492 int err;
493
494 hdev = hci_dev_get(dev);
495 if (!hdev)
496 return -ENODEV;
497
498 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
499 err = -EBUSY;
500 goto done;
501 }
502
503 cancel_work_sync(&hdev->power_on);
504 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
505 cancel_delayed_work(&hdev->power_off);
506
507 err = hci_dev_do_close(hdev);
508
509 done:
510 hci_dev_put(hdev);
511 return err;
512 }
513
hci_dev_do_reset(struct hci_dev * hdev)514 static int hci_dev_do_reset(struct hci_dev *hdev)
515 {
516 int ret;
517
518 BT_DBG("%s %p", hdev->name, hdev);
519
520 hci_req_sync_lock(hdev);
521
522 /* Drop queues */
523 skb_queue_purge(&hdev->rx_q);
524 skb_queue_purge(&hdev->cmd_q);
525
526 /* Cancel these to avoid queueing non-chained pending work */
527 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
528 /* Wait for
529 *
530 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
531 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
532 *
533 * inside RCU section to see the flag or complete scheduling.
534 */
535 synchronize_rcu();
536 /* Explicitly cancel works in case scheduled after setting the flag. */
537 cancel_delayed_work(&hdev->cmd_timer);
538 cancel_delayed_work(&hdev->ncmd_timer);
539
540 /* Avoid potential lockdep warnings from the *_flush() calls by
541 * ensuring the workqueue is empty up front.
542 */
543 drain_workqueue(hdev->workqueue);
544
545 hci_dev_lock(hdev);
546 hci_inquiry_cache_flush(hdev);
547 hci_conn_hash_flush(hdev);
548 hci_dev_unlock(hdev);
549
550 if (hdev->flush)
551 hdev->flush(hdev);
552
553 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
554
555 atomic_set(&hdev->cmd_cnt, 1);
556 hdev->acl_cnt = 0;
557 hdev->sco_cnt = 0;
558 hdev->le_cnt = 0;
559 hdev->iso_cnt = 0;
560
561 ret = hci_reset_sync(hdev);
562
563 hci_req_sync_unlock(hdev);
564 return ret;
565 }
566
hci_dev_reset(__u16 dev)567 int hci_dev_reset(__u16 dev)
568 {
569 struct hci_dev *hdev;
570 int err;
571
572 hdev = hci_dev_get(dev);
573 if (!hdev)
574 return -ENODEV;
575
576 if (!test_bit(HCI_UP, &hdev->flags)) {
577 err = -ENETDOWN;
578 goto done;
579 }
580
581 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
582 err = -EBUSY;
583 goto done;
584 }
585
586 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
587 err = -EOPNOTSUPP;
588 goto done;
589 }
590
591 err = hci_dev_do_reset(hdev);
592
593 done:
594 hci_dev_put(hdev);
595 return err;
596 }
597
hci_dev_reset_stat(__u16 dev)598 int hci_dev_reset_stat(__u16 dev)
599 {
600 struct hci_dev *hdev;
601 int ret = 0;
602
603 hdev = hci_dev_get(dev);
604 if (!hdev)
605 return -ENODEV;
606
607 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
608 ret = -EBUSY;
609 goto done;
610 }
611
612 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
613 ret = -EOPNOTSUPP;
614 goto done;
615 }
616
617 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
618
619 done:
620 hci_dev_put(hdev);
621 return ret;
622 }
623
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)624 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
625 {
626 bool conn_changed, discov_changed;
627
628 BT_DBG("%s scan 0x%02x", hdev->name, scan);
629
630 if ((scan & SCAN_PAGE))
631 conn_changed = !hci_dev_test_and_set_flag(hdev,
632 HCI_CONNECTABLE);
633 else
634 conn_changed = hci_dev_test_and_clear_flag(hdev,
635 HCI_CONNECTABLE);
636
637 if ((scan & SCAN_INQUIRY)) {
638 discov_changed = !hci_dev_test_and_set_flag(hdev,
639 HCI_DISCOVERABLE);
640 } else {
641 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
642 discov_changed = hci_dev_test_and_clear_flag(hdev,
643 HCI_DISCOVERABLE);
644 }
645
646 if (!hci_dev_test_flag(hdev, HCI_MGMT))
647 return;
648
649 if (conn_changed || discov_changed) {
650 /* In case this was disabled through mgmt */
651 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
652
653 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
654 hci_update_adv_data(hdev, hdev->cur_adv_instance);
655
656 mgmt_new_settings(hdev);
657 }
658 }
659
hci_dev_cmd(unsigned int cmd,void __user * arg)660 int hci_dev_cmd(unsigned int cmd, void __user *arg)
661 {
662 struct hci_dev *hdev;
663 struct hci_dev_req dr;
664 __le16 policy;
665 int err = 0;
666
667 if (copy_from_user(&dr, arg, sizeof(dr)))
668 return -EFAULT;
669
670 hdev = hci_dev_get(dr.dev_id);
671 if (!hdev)
672 return -ENODEV;
673
674 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
675 err = -EBUSY;
676 goto done;
677 }
678
679 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
680 err = -EOPNOTSUPP;
681 goto done;
682 }
683
684 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
685 err = -EOPNOTSUPP;
686 goto done;
687 }
688
689 switch (cmd) {
690 case HCISETAUTH:
691 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
692 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
693 break;
694
695 case HCISETENCRYPT:
696 if (!lmp_encrypt_capable(hdev)) {
697 err = -EOPNOTSUPP;
698 break;
699 }
700
701 if (!test_bit(HCI_AUTH, &hdev->flags)) {
702 /* Auth must be enabled first */
703 err = hci_cmd_sync_status(hdev,
704 HCI_OP_WRITE_AUTH_ENABLE,
705 1, &dr.dev_opt,
706 HCI_CMD_TIMEOUT);
707 if (err)
708 break;
709 }
710
711 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
712 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
713 break;
714
715 case HCISETSCAN:
716 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
717 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
718
719 /* Ensure that the connectable and discoverable states
720 * get correctly modified as this was a non-mgmt change.
721 */
722 if (!err)
723 hci_update_passive_scan_state(hdev, dr.dev_opt);
724 break;
725
726 case HCISETLINKPOL:
727 policy = cpu_to_le16(dr.dev_opt);
728
729 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
730 2, &policy, HCI_CMD_TIMEOUT);
731 break;
732
733 case HCISETLINKMODE:
734 hdev->link_mode = ((__u16) dr.dev_opt) &
735 (HCI_LM_MASTER | HCI_LM_ACCEPT);
736 break;
737
738 case HCISETPTYPE:
739 if (hdev->pkt_type == (__u16) dr.dev_opt)
740 break;
741
742 hdev->pkt_type = (__u16) dr.dev_opt;
743 mgmt_phy_configuration_changed(hdev, NULL);
744 break;
745
746 case HCISETACLMTU:
747 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
748 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
749 break;
750
751 case HCISETSCOMTU:
752 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
753 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
754 break;
755
756 default:
757 err = -EINVAL;
758 break;
759 }
760
761 done:
762 hci_dev_put(hdev);
763 return err;
764 }
765
hci_get_dev_list(void __user * arg)766 int hci_get_dev_list(void __user *arg)
767 {
768 struct hci_dev *hdev;
769 struct hci_dev_list_req *dl;
770 struct hci_dev_req *dr;
771 int n = 0, err;
772 __u16 dev_num;
773
774 if (get_user(dev_num, (__u16 __user *) arg))
775 return -EFAULT;
776
777 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
778 return -EINVAL;
779
780 dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
781 if (!dl)
782 return -ENOMEM;
783
784 dl->dev_num = dev_num;
785 dr = dl->dev_req;
786
787 read_lock(&hci_dev_list_lock);
788 list_for_each_entry(hdev, &hci_dev_list, list) {
789 unsigned long flags = hdev->flags;
790
791 /* When the auto-off is configured it means the transport
792 * is running, but in that case still indicate that the
793 * device is actually down.
794 */
795 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
796 flags &= ~BIT(HCI_UP);
797
798 dr[n].dev_id = hdev->id;
799 dr[n].dev_opt = flags;
800
801 if (++n >= dev_num)
802 break;
803 }
804 read_unlock(&hci_dev_list_lock);
805
806 dl->dev_num = n;
807 err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
808 kfree(dl);
809
810 return err ? -EFAULT : 0;
811 }
812
hci_get_dev_info(void __user * arg)813 int hci_get_dev_info(void __user *arg)
814 {
815 struct hci_dev *hdev;
816 struct hci_dev_info di;
817 unsigned long flags;
818 int err = 0;
819
820 if (copy_from_user(&di, arg, sizeof(di)))
821 return -EFAULT;
822
823 hdev = hci_dev_get(di.dev_id);
824 if (!hdev)
825 return -ENODEV;
826
827 /* When the auto-off is configured it means the transport
828 * is running, but in that case still indicate that the
829 * device is actually down.
830 */
831 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
832 flags = hdev->flags & ~BIT(HCI_UP);
833 else
834 flags = hdev->flags;
835
836 strscpy(di.name, hdev->name, sizeof(di.name));
837 di.bdaddr = hdev->bdaddr;
838 di.type = (hdev->bus & 0x0f);
839 di.flags = flags;
840 di.pkt_type = hdev->pkt_type;
841 if (lmp_bredr_capable(hdev)) {
842 di.acl_mtu = hdev->acl_mtu;
843 di.acl_pkts = hdev->acl_pkts;
844 di.sco_mtu = hdev->sco_mtu;
845 di.sco_pkts = hdev->sco_pkts;
846 } else {
847 di.acl_mtu = hdev->le_mtu;
848 di.acl_pkts = hdev->le_pkts;
849 di.sco_mtu = 0;
850 di.sco_pkts = 0;
851 }
852 di.link_policy = hdev->link_policy;
853 di.link_mode = hdev->link_mode;
854
855 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
856 memcpy(&di.features, &hdev->features, sizeof(di.features));
857
858 if (copy_to_user(arg, &di, sizeof(di)))
859 err = -EFAULT;
860
861 hci_dev_put(hdev);
862
863 return err;
864 }
865
866 /* ---- Interface to HCI drivers ---- */
867
hci_dev_do_poweroff(struct hci_dev * hdev)868 static int hci_dev_do_poweroff(struct hci_dev *hdev)
869 {
870 int err;
871
872 BT_DBG("%s %p", hdev->name, hdev);
873
874 hci_req_sync_lock(hdev);
875
876 err = hci_set_powered_sync(hdev, false);
877
878 hci_req_sync_unlock(hdev);
879
880 return err;
881 }
882
hci_rfkill_set_block(void * data,bool blocked)883 static int hci_rfkill_set_block(void *data, bool blocked)
884 {
885 struct hci_dev *hdev = data;
886 int err;
887
888 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
889
890 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
891 return -EBUSY;
892
893 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
894 return 0;
895
896 if (blocked) {
897 hci_dev_set_flag(hdev, HCI_RFKILLED);
898
899 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
900 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
901 err = hci_dev_do_poweroff(hdev);
902 if (err) {
903 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
904 err);
905
906 /* Make sure the device is still closed even if
907 * anything during power off sequence (eg.
908 * disconnecting devices) failed.
909 */
910 hci_dev_do_close(hdev);
911 }
912 }
913 } else {
914 hci_dev_clear_flag(hdev, HCI_RFKILLED);
915 }
916
917 return 0;
918 }
919
920 static const struct rfkill_ops hci_rfkill_ops = {
921 .set_block = hci_rfkill_set_block,
922 };
923
hci_power_on(struct work_struct * work)924 static void hci_power_on(struct work_struct *work)
925 {
926 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
927 int err;
928
929 BT_DBG("%s", hdev->name);
930
931 if (test_bit(HCI_UP, &hdev->flags) &&
932 hci_dev_test_flag(hdev, HCI_MGMT) &&
933 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
934 cancel_delayed_work(&hdev->power_off);
935 err = hci_powered_update_sync(hdev);
936 mgmt_power_on(hdev, err);
937 return;
938 }
939
940 err = hci_dev_do_open(hdev);
941 if (err < 0) {
942 hci_dev_lock(hdev);
943 mgmt_set_powered_failed(hdev, err);
944 hci_dev_unlock(hdev);
945 return;
946 }
947
948 /* During the HCI setup phase, a few error conditions are
949 * ignored and they need to be checked now. If they are still
950 * valid, it is important to turn the device back off.
951 */
952 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
953 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
954 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
955 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
956 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
957 hci_dev_do_close(hdev);
958 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
959 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
960 HCI_AUTO_OFF_TIMEOUT);
961 }
962
963 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
964 /* For unconfigured devices, set the HCI_RAW flag
965 * so that userspace can easily identify them.
966 */
967 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
968 set_bit(HCI_RAW, &hdev->flags);
969
970 /* For fully configured devices, this will send
971 * the Index Added event. For unconfigured devices,
972 * it will send Unconfigued Index Added event.
973 *
974 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
975 * and no event will be send.
976 */
977 mgmt_index_added(hdev);
978 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
979 /* When the controller is now configured, then it
980 * is important to clear the HCI_RAW flag.
981 */
982 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
983 clear_bit(HCI_RAW, &hdev->flags);
984
985 /* Powering on the controller with HCI_CONFIG set only
986 * happens with the transition from unconfigured to
987 * configured. This will send the Index Added event.
988 */
989 mgmt_index_added(hdev);
990 }
991 }
992
hci_power_off(struct work_struct * work)993 static void hci_power_off(struct work_struct *work)
994 {
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
996 power_off.work);
997
998 BT_DBG("%s", hdev->name);
999
1000 hci_dev_do_close(hdev);
1001 }
1002
hci_error_reset(struct work_struct * work)1003 static void hci_error_reset(struct work_struct *work)
1004 {
1005 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1006
1007 hci_dev_hold(hdev);
1008 BT_DBG("%s", hdev->name);
1009
1010 if (hdev->hw_error)
1011 hdev->hw_error(hdev, hdev->hw_error_code);
1012 else
1013 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1014
1015 if (!hci_dev_do_close(hdev))
1016 hci_dev_do_open(hdev);
1017
1018 hci_dev_put(hdev);
1019 }
1020
hci_uuids_clear(struct hci_dev * hdev)1021 void hci_uuids_clear(struct hci_dev *hdev)
1022 {
1023 struct bt_uuid *uuid, *tmp;
1024
1025 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1026 list_del(&uuid->list);
1027 kfree(uuid);
1028 }
1029 }
1030
hci_link_keys_clear(struct hci_dev * hdev)1031 void hci_link_keys_clear(struct hci_dev *hdev)
1032 {
1033 struct link_key *key, *tmp;
1034
1035 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1036 list_del_rcu(&key->list);
1037 kfree_rcu(key, rcu);
1038 }
1039 }
1040
hci_smp_ltks_clear(struct hci_dev * hdev)1041 void hci_smp_ltks_clear(struct hci_dev *hdev)
1042 {
1043 struct smp_ltk *k, *tmp;
1044
1045 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1046 list_del_rcu(&k->list);
1047 kfree_rcu(k, rcu);
1048 }
1049 }
1050
hci_smp_irks_clear(struct hci_dev * hdev)1051 void hci_smp_irks_clear(struct hci_dev *hdev)
1052 {
1053 struct smp_irk *k, *tmp;
1054
1055 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1056 list_del_rcu(&k->list);
1057 kfree_rcu(k, rcu);
1058 }
1059 }
1060
hci_blocked_keys_clear(struct hci_dev * hdev)1061 void hci_blocked_keys_clear(struct hci_dev *hdev)
1062 {
1063 struct blocked_key *b, *tmp;
1064
1065 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1066 list_del_rcu(&b->list);
1067 kfree_rcu(b, rcu);
1068 }
1069 }
1070
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1071 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1072 {
1073 bool blocked = false;
1074 struct blocked_key *b;
1075
1076 rcu_read_lock();
1077 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1078 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1079 blocked = true;
1080 break;
1081 }
1082 }
1083
1084 rcu_read_unlock();
1085 return blocked;
1086 }
1087
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1088 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1089 {
1090 struct link_key *k;
1091
1092 rcu_read_lock();
1093 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1094 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1095 rcu_read_unlock();
1096
1097 if (hci_is_blocked_key(hdev,
1098 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1099 k->val)) {
1100 bt_dev_warn_ratelimited(hdev,
1101 "Link key blocked for %pMR",
1102 &k->bdaddr);
1103 return NULL;
1104 }
1105
1106 return k;
1107 }
1108 }
1109 rcu_read_unlock();
1110
1111 return NULL;
1112 }
1113
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1114 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1115 u8 key_type, u8 old_key_type)
1116 {
1117 /* Legacy key */
1118 if (key_type < 0x03)
1119 return true;
1120
1121 /* Debug keys are insecure so don't store them persistently */
1122 if (key_type == HCI_LK_DEBUG_COMBINATION)
1123 return false;
1124
1125 /* Changed combination key and there's no previous one */
1126 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1127 return false;
1128
1129 /* Security mode 3 case */
1130 if (!conn)
1131 return true;
1132
1133 /* BR/EDR key derived using SC from an LE link */
1134 if (conn->type == LE_LINK)
1135 return true;
1136
1137 /* Neither local nor remote side had no-bonding as requirement */
1138 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1139 return true;
1140
1141 /* Local side had dedicated bonding as requirement */
1142 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1143 return true;
1144
1145 /* Remote side had dedicated bonding as requirement */
1146 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1147 return true;
1148
1149 /* If none of the above criteria match, then don't store the key
1150 * persistently */
1151 return false;
1152 }
1153
ltk_role(u8 type)1154 static u8 ltk_role(u8 type)
1155 {
1156 if (type == SMP_LTK)
1157 return HCI_ROLE_MASTER;
1158
1159 return HCI_ROLE_SLAVE;
1160 }
1161
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1162 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1163 u8 addr_type, u8 role)
1164 {
1165 struct smp_ltk *k;
1166
1167 rcu_read_lock();
1168 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1169 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1170 continue;
1171
1172 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1173 rcu_read_unlock();
1174
1175 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1176 k->val)) {
1177 bt_dev_warn_ratelimited(hdev,
1178 "LTK blocked for %pMR",
1179 &k->bdaddr);
1180 return NULL;
1181 }
1182
1183 return k;
1184 }
1185 }
1186 rcu_read_unlock();
1187
1188 return NULL;
1189 }
1190
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1191 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1192 {
1193 struct smp_irk *irk_to_return = NULL;
1194 struct smp_irk *irk;
1195
1196 rcu_read_lock();
1197 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1198 if (!bacmp(&irk->rpa, rpa)) {
1199 irk_to_return = irk;
1200 goto done;
1201 }
1202 }
1203
1204 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1205 if (smp_irk_matches(hdev, irk->val, rpa)) {
1206 bacpy(&irk->rpa, rpa);
1207 irk_to_return = irk;
1208 goto done;
1209 }
1210 }
1211
1212 done:
1213 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1214 irk_to_return->val)) {
1215 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1216 &irk_to_return->bdaddr);
1217 irk_to_return = NULL;
1218 }
1219
1220 rcu_read_unlock();
1221
1222 return irk_to_return;
1223 }
1224
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1225 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1226 u8 addr_type)
1227 {
1228 struct smp_irk *irk_to_return = NULL;
1229 struct smp_irk *irk;
1230
1231 /* Identity Address must be public or static random */
1232 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1233 return NULL;
1234
1235 rcu_read_lock();
1236 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1237 if (addr_type == irk->addr_type &&
1238 bacmp(bdaddr, &irk->bdaddr) == 0) {
1239 irk_to_return = irk;
1240 goto done;
1241 }
1242 }
1243
1244 done:
1245
1246 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1247 irk_to_return->val)) {
1248 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1249 &irk_to_return->bdaddr);
1250 irk_to_return = NULL;
1251 }
1252
1253 rcu_read_unlock();
1254
1255 return irk_to_return;
1256 }
1257
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1258 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1259 bdaddr_t *bdaddr, u8 *val, u8 type,
1260 u8 pin_len, bool *persistent)
1261 {
1262 struct link_key *key, *old_key;
1263 u8 old_key_type;
1264
1265 old_key = hci_find_link_key(hdev, bdaddr);
1266 if (old_key) {
1267 old_key_type = old_key->type;
1268 key = old_key;
1269 } else {
1270 old_key_type = conn ? conn->key_type : 0xff;
1271 key = kzalloc(sizeof(*key), GFP_KERNEL);
1272 if (!key)
1273 return NULL;
1274 list_add_rcu(&key->list, &hdev->link_keys);
1275 }
1276
1277 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1278
1279 /* Some buggy controller combinations generate a changed
1280 * combination key for legacy pairing even when there's no
1281 * previous key */
1282 if (type == HCI_LK_CHANGED_COMBINATION &&
1283 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1284 type = HCI_LK_COMBINATION;
1285 if (conn)
1286 conn->key_type = type;
1287 }
1288
1289 bacpy(&key->bdaddr, bdaddr);
1290 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1291 key->pin_len = pin_len;
1292
1293 if (type == HCI_LK_CHANGED_COMBINATION)
1294 key->type = old_key_type;
1295 else
1296 key->type = type;
1297
1298 if (persistent)
1299 *persistent = hci_persistent_key(hdev, conn, type,
1300 old_key_type);
1301
1302 return key;
1303 }
1304
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1305 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1306 u8 addr_type, u8 type, u8 authenticated,
1307 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1308 {
1309 struct smp_ltk *key, *old_key;
1310 u8 role = ltk_role(type);
1311
1312 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1313 if (old_key)
1314 key = old_key;
1315 else {
1316 key = kzalloc(sizeof(*key), GFP_KERNEL);
1317 if (!key)
1318 return NULL;
1319 list_add_rcu(&key->list, &hdev->long_term_keys);
1320 }
1321
1322 bacpy(&key->bdaddr, bdaddr);
1323 key->bdaddr_type = addr_type;
1324 memcpy(key->val, tk, sizeof(key->val));
1325 key->authenticated = authenticated;
1326 key->ediv = ediv;
1327 key->rand = rand;
1328 key->enc_size = enc_size;
1329 key->type = type;
1330
1331 return key;
1332 }
1333
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1334 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1335 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1336 {
1337 struct smp_irk *irk;
1338
1339 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1340 if (!irk) {
1341 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1342 if (!irk)
1343 return NULL;
1344
1345 bacpy(&irk->bdaddr, bdaddr);
1346 irk->addr_type = addr_type;
1347
1348 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1349 }
1350
1351 memcpy(irk->val, val, 16);
1352 bacpy(&irk->rpa, rpa);
1353
1354 return irk;
1355 }
1356
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1357 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1358 {
1359 struct link_key *key;
1360
1361 key = hci_find_link_key(hdev, bdaddr);
1362 if (!key)
1363 return -ENOENT;
1364
1365 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1366
1367 list_del_rcu(&key->list);
1368 kfree_rcu(key, rcu);
1369
1370 return 0;
1371 }
1372
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1373 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1374 {
1375 struct smp_ltk *k, *tmp;
1376 int removed = 0;
1377
1378 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1379 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1380 continue;
1381
1382 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1383
1384 list_del_rcu(&k->list);
1385 kfree_rcu(k, rcu);
1386 removed++;
1387 }
1388
1389 return removed ? 0 : -ENOENT;
1390 }
1391
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1392 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1393 {
1394 struct smp_irk *k, *tmp;
1395
1396 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1397 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1398 continue;
1399
1400 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1401
1402 list_del_rcu(&k->list);
1403 kfree_rcu(k, rcu);
1404 }
1405 }
1406
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1407 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1408 {
1409 struct smp_ltk *k;
1410 struct smp_irk *irk;
1411 u8 addr_type;
1412
1413 if (type == BDADDR_BREDR) {
1414 if (hci_find_link_key(hdev, bdaddr))
1415 return true;
1416 return false;
1417 }
1418
1419 /* Convert to HCI addr type which struct smp_ltk uses */
1420 if (type == BDADDR_LE_PUBLIC)
1421 addr_type = ADDR_LE_DEV_PUBLIC;
1422 else
1423 addr_type = ADDR_LE_DEV_RANDOM;
1424
1425 irk = hci_get_irk(hdev, bdaddr, addr_type);
1426 if (irk) {
1427 bdaddr = &irk->bdaddr;
1428 addr_type = irk->addr_type;
1429 }
1430
1431 rcu_read_lock();
1432 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1433 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1434 rcu_read_unlock();
1435 return true;
1436 }
1437 }
1438 rcu_read_unlock();
1439
1440 return false;
1441 }
1442
1443 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1444 static void hci_cmd_timeout(struct work_struct *work)
1445 {
1446 struct hci_dev *hdev = container_of(work, struct hci_dev,
1447 cmd_timer.work);
1448
1449 if (hdev->req_skb) {
1450 u16 opcode = hci_skb_opcode(hdev->req_skb);
1451
1452 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1453
1454 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1455 } else {
1456 bt_dev_err(hdev, "command tx timeout");
1457 }
1458
1459 if (hdev->cmd_timeout)
1460 hdev->cmd_timeout(hdev);
1461
1462 atomic_set(&hdev->cmd_cnt, 1);
1463 queue_work(hdev->workqueue, &hdev->cmd_work);
1464 }
1465
1466 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1467 static void hci_ncmd_timeout(struct work_struct *work)
1468 {
1469 struct hci_dev *hdev = container_of(work, struct hci_dev,
1470 ncmd_timer.work);
1471
1472 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1473
1474 /* During HCI_INIT phase no events can be injected if the ncmd timer
1475 * triggers since the procedure has its own timeout handling.
1476 */
1477 if (test_bit(HCI_INIT, &hdev->flags))
1478 return;
1479
1480 /* This is an irrecoverable state, inject hardware error event */
1481 hci_reset_dev(hdev);
1482 }
1483
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1484 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1485 bdaddr_t *bdaddr, u8 bdaddr_type)
1486 {
1487 struct oob_data *data;
1488
1489 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1490 if (bacmp(bdaddr, &data->bdaddr) != 0)
1491 continue;
1492 if (data->bdaddr_type != bdaddr_type)
1493 continue;
1494 return data;
1495 }
1496
1497 return NULL;
1498 }
1499
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1500 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1501 u8 bdaddr_type)
1502 {
1503 struct oob_data *data;
1504
1505 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1506 if (!data)
1507 return -ENOENT;
1508
1509 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1510
1511 list_del(&data->list);
1512 kfree(data);
1513
1514 return 0;
1515 }
1516
hci_remote_oob_data_clear(struct hci_dev * hdev)1517 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1518 {
1519 struct oob_data *data, *n;
1520
1521 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1522 list_del(&data->list);
1523 kfree(data);
1524 }
1525 }
1526
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1527 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1528 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1529 u8 *hash256, u8 *rand256)
1530 {
1531 struct oob_data *data;
1532
1533 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1534 if (!data) {
1535 data = kmalloc(sizeof(*data), GFP_KERNEL);
1536 if (!data)
1537 return -ENOMEM;
1538
1539 bacpy(&data->bdaddr, bdaddr);
1540 data->bdaddr_type = bdaddr_type;
1541 list_add(&data->list, &hdev->remote_oob_data);
1542 }
1543
1544 if (hash192 && rand192) {
1545 memcpy(data->hash192, hash192, sizeof(data->hash192));
1546 memcpy(data->rand192, rand192, sizeof(data->rand192));
1547 if (hash256 && rand256)
1548 data->present = 0x03;
1549 } else {
1550 memset(data->hash192, 0, sizeof(data->hash192));
1551 memset(data->rand192, 0, sizeof(data->rand192));
1552 if (hash256 && rand256)
1553 data->present = 0x02;
1554 else
1555 data->present = 0x00;
1556 }
1557
1558 if (hash256 && rand256) {
1559 memcpy(data->hash256, hash256, sizeof(data->hash256));
1560 memcpy(data->rand256, rand256, sizeof(data->rand256));
1561 } else {
1562 memset(data->hash256, 0, sizeof(data->hash256));
1563 memset(data->rand256, 0, sizeof(data->rand256));
1564 if (hash192 && rand192)
1565 data->present = 0x01;
1566 }
1567
1568 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1569
1570 return 0;
1571 }
1572
1573 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1574 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1575 {
1576 struct adv_info *adv_instance;
1577
1578 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1579 if (adv_instance->instance == instance)
1580 return adv_instance;
1581 }
1582
1583 return NULL;
1584 }
1585
1586 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1587 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1588 {
1589 struct adv_info *cur_instance;
1590
1591 cur_instance = hci_find_adv_instance(hdev, instance);
1592 if (!cur_instance)
1593 return NULL;
1594
1595 if (cur_instance == list_last_entry(&hdev->adv_instances,
1596 struct adv_info, list))
1597 return list_first_entry(&hdev->adv_instances,
1598 struct adv_info, list);
1599 else
1600 return list_next_entry(cur_instance, list);
1601 }
1602
1603 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1604 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1605 {
1606 struct adv_info *adv_instance;
1607
1608 adv_instance = hci_find_adv_instance(hdev, instance);
1609 if (!adv_instance)
1610 return -ENOENT;
1611
1612 BT_DBG("%s removing %dMR", hdev->name, instance);
1613
1614 if (hdev->cur_adv_instance == instance) {
1615 if (hdev->adv_instance_timeout) {
1616 cancel_delayed_work(&hdev->adv_instance_expire);
1617 hdev->adv_instance_timeout = 0;
1618 }
1619 hdev->cur_adv_instance = 0x00;
1620 }
1621
1622 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1623
1624 list_del(&adv_instance->list);
1625 kfree(adv_instance);
1626
1627 hdev->adv_instance_cnt--;
1628
1629 return 0;
1630 }
1631
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1632 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1633 {
1634 struct adv_info *adv_instance, *n;
1635
1636 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1637 adv_instance->rpa_expired = rpa_expired;
1638 }
1639
1640 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1641 void hci_adv_instances_clear(struct hci_dev *hdev)
1642 {
1643 struct adv_info *adv_instance, *n;
1644
1645 if (hdev->adv_instance_timeout) {
1646 disable_delayed_work(&hdev->adv_instance_expire);
1647 hdev->adv_instance_timeout = 0;
1648 }
1649
1650 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1651 disable_delayed_work_sync(&adv_instance->rpa_expired_cb);
1652 list_del(&adv_instance->list);
1653 kfree(adv_instance);
1654 }
1655
1656 hdev->adv_instance_cnt = 0;
1657 hdev->cur_adv_instance = 0x00;
1658 }
1659
adv_instance_rpa_expired(struct work_struct * work)1660 static void adv_instance_rpa_expired(struct work_struct *work)
1661 {
1662 struct adv_info *adv_instance = container_of(work, struct adv_info,
1663 rpa_expired_cb.work);
1664
1665 BT_DBG("");
1666
1667 adv_instance->rpa_expired = true;
1668 }
1669
1670 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1671 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1672 u32 flags, u16 adv_data_len, u8 *adv_data,
1673 u16 scan_rsp_len, u8 *scan_rsp_data,
1674 u16 timeout, u16 duration, s8 tx_power,
1675 u32 min_interval, u32 max_interval,
1676 u8 mesh_handle)
1677 {
1678 struct adv_info *adv;
1679
1680 adv = hci_find_adv_instance(hdev, instance);
1681 if (adv) {
1682 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1683 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1684 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1685 } else {
1686 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1687 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1688 return ERR_PTR(-EOVERFLOW);
1689
1690 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1691 if (!adv)
1692 return ERR_PTR(-ENOMEM);
1693
1694 adv->pending = true;
1695 adv->instance = instance;
1696
1697 /* If controller support only one set and the instance is set to
1698 * 1 then there is no option other than using handle 0x00.
1699 */
1700 if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1701 adv->handle = 0x00;
1702 else
1703 adv->handle = instance;
1704
1705 list_add(&adv->list, &hdev->adv_instances);
1706 hdev->adv_instance_cnt++;
1707 }
1708
1709 adv->flags = flags;
1710 adv->min_interval = min_interval;
1711 adv->max_interval = max_interval;
1712 adv->tx_power = tx_power;
1713 /* Defining a mesh_handle changes the timing units to ms,
1714 * rather than seconds, and ties the instance to the requested
1715 * mesh_tx queue.
1716 */
1717 adv->mesh = mesh_handle;
1718
1719 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1720 scan_rsp_len, scan_rsp_data);
1721
1722 adv->timeout = timeout;
1723 adv->remaining_time = timeout;
1724
1725 if (duration == 0)
1726 adv->duration = hdev->def_multi_adv_rotation_duration;
1727 else
1728 adv->duration = duration;
1729
1730 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1731
1732 BT_DBG("%s for %dMR", hdev->name, instance);
1733
1734 return adv;
1735 }
1736
1737 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1738 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1739 u32 flags, u8 data_len, u8 *data,
1740 u32 min_interval, u32 max_interval)
1741 {
1742 struct adv_info *adv;
1743
1744 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1745 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1746 min_interval, max_interval, 0);
1747 if (IS_ERR(adv))
1748 return adv;
1749
1750 adv->periodic = true;
1751 adv->per_adv_data_len = data_len;
1752
1753 if (data)
1754 memcpy(adv->per_adv_data, data, data_len);
1755
1756 return adv;
1757 }
1758
1759 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1760 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1761 u16 adv_data_len, u8 *adv_data,
1762 u16 scan_rsp_len, u8 *scan_rsp_data)
1763 {
1764 struct adv_info *adv;
1765
1766 adv = hci_find_adv_instance(hdev, instance);
1767
1768 /* If advertisement doesn't exist, we can't modify its data */
1769 if (!adv)
1770 return -ENOENT;
1771
1772 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1773 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1774 memcpy(adv->adv_data, adv_data, adv_data_len);
1775 adv->adv_data_len = adv_data_len;
1776 adv->adv_data_changed = true;
1777 }
1778
1779 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1780 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1781 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1782 adv->scan_rsp_len = scan_rsp_len;
1783 adv->scan_rsp_changed = true;
1784 }
1785
1786 /* Mark as changed if there are flags which would affect it */
1787 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1788 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1789 adv->scan_rsp_changed = true;
1790
1791 return 0;
1792 }
1793
1794 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1795 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1796 {
1797 u32 flags;
1798 struct adv_info *adv;
1799
1800 if (instance == 0x00) {
1801 /* Instance 0 always manages the "Tx Power" and "Flags"
1802 * fields
1803 */
1804 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1805
1806 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1807 * corresponds to the "connectable" instance flag.
1808 */
1809 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1810 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1811
1812 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1813 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1814 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1815 flags |= MGMT_ADV_FLAG_DISCOV;
1816
1817 return flags;
1818 }
1819
1820 adv = hci_find_adv_instance(hdev, instance);
1821
1822 /* Return 0 when we got an invalid instance identifier. */
1823 if (!adv)
1824 return 0;
1825
1826 return adv->flags;
1827 }
1828
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1829 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1830 {
1831 struct adv_info *adv;
1832
1833 /* Instance 0x00 always set local name */
1834 if (instance == 0x00)
1835 return true;
1836
1837 adv = hci_find_adv_instance(hdev, instance);
1838 if (!adv)
1839 return false;
1840
1841 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1842 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1843 return true;
1844
1845 return adv->scan_rsp_len ? true : false;
1846 }
1847
1848 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1849 void hci_adv_monitors_clear(struct hci_dev *hdev)
1850 {
1851 struct adv_monitor *monitor;
1852 int handle;
1853
1854 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1855 hci_free_adv_monitor(hdev, monitor);
1856
1857 idr_destroy(&hdev->adv_monitors_idr);
1858 }
1859
1860 /* Frees the monitor structure and do some bookkeepings.
1861 * This function requires the caller holds hdev->lock.
1862 */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1863 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1864 {
1865 struct adv_pattern *pattern;
1866 struct adv_pattern *tmp;
1867
1868 if (!monitor)
1869 return;
1870
1871 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1872 list_del(&pattern->list);
1873 kfree(pattern);
1874 }
1875
1876 if (monitor->handle)
1877 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1878
1879 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1880 hdev->adv_monitors_cnt--;
1881 mgmt_adv_monitor_removed(hdev, monitor->handle);
1882 }
1883
1884 kfree(monitor);
1885 }
1886
1887 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1888 * also attempts to forward the request to the controller.
1889 * This function requires the caller holds hci_req_sync_lock.
1890 */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1891 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1892 {
1893 int min, max, handle;
1894 int status = 0;
1895
1896 if (!monitor)
1897 return -EINVAL;
1898
1899 hci_dev_lock(hdev);
1900
1901 min = HCI_MIN_ADV_MONITOR_HANDLE;
1902 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1903 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1904 GFP_KERNEL);
1905
1906 hci_dev_unlock(hdev);
1907
1908 if (handle < 0)
1909 return handle;
1910
1911 monitor->handle = handle;
1912
1913 if (!hdev_is_powered(hdev))
1914 return status;
1915
1916 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1917 case HCI_ADV_MONITOR_EXT_NONE:
1918 bt_dev_dbg(hdev, "add monitor %d status %d",
1919 monitor->handle, status);
1920 /* Message was not forwarded to controller - not an error */
1921 break;
1922
1923 case HCI_ADV_MONITOR_EXT_MSFT:
1924 status = msft_add_monitor_pattern(hdev, monitor);
1925 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1926 handle, status);
1927 break;
1928 }
1929
1930 return status;
1931 }
1932
1933 /* Attempts to tell the controller and free the monitor. If somehow the
1934 * controller doesn't have a corresponding handle, remove anyway.
1935 * This function requires the caller holds hci_req_sync_lock.
1936 */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1937 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1938 struct adv_monitor *monitor)
1939 {
1940 int status = 0;
1941 int handle;
1942
1943 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1944 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1945 bt_dev_dbg(hdev, "remove monitor %d status %d",
1946 monitor->handle, status);
1947 goto free_monitor;
1948
1949 case HCI_ADV_MONITOR_EXT_MSFT:
1950 handle = monitor->handle;
1951 status = msft_remove_monitor(hdev, monitor);
1952 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1953 handle, status);
1954 break;
1955 }
1956
1957 /* In case no matching handle registered, just free the monitor */
1958 if (status == -ENOENT)
1959 goto free_monitor;
1960
1961 return status;
1962
1963 free_monitor:
1964 if (status == -ENOENT)
1965 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1966 monitor->handle);
1967 hci_free_adv_monitor(hdev, monitor);
1968
1969 return status;
1970 }
1971
1972 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)1973 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1974 {
1975 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1976
1977 if (!monitor)
1978 return -EINVAL;
1979
1980 return hci_remove_adv_monitor(hdev, monitor);
1981 }
1982
1983 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)1984 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1985 {
1986 struct adv_monitor *monitor;
1987 int idr_next_id = 0;
1988 int status = 0;
1989
1990 while (1) {
1991 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1992 if (!monitor)
1993 break;
1994
1995 status = hci_remove_adv_monitor(hdev, monitor);
1996 if (status)
1997 return status;
1998
1999 idr_next_id++;
2000 }
2001
2002 return status;
2003 }
2004
2005 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2006 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2007 {
2008 return !idr_is_empty(&hdev->adv_monitors_idr);
2009 }
2010
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2011 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2012 {
2013 if (msft_monitor_supported(hdev))
2014 return HCI_ADV_MONITOR_EXT_MSFT;
2015
2016 return HCI_ADV_MONITOR_EXT_NONE;
2017 }
2018
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2019 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2020 bdaddr_t *bdaddr, u8 type)
2021 {
2022 struct bdaddr_list *b;
2023
2024 list_for_each_entry(b, bdaddr_list, list) {
2025 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2026 return b;
2027 }
2028
2029 return NULL;
2030 }
2031
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2032 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2033 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2034 u8 type)
2035 {
2036 struct bdaddr_list_with_irk *b;
2037
2038 list_for_each_entry(b, bdaddr_list, list) {
2039 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2040 return b;
2041 }
2042
2043 return NULL;
2044 }
2045
2046 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2047 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2048 bdaddr_t *bdaddr, u8 type)
2049 {
2050 struct bdaddr_list_with_flags *b;
2051
2052 list_for_each_entry(b, bdaddr_list, list) {
2053 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2054 return b;
2055 }
2056
2057 return NULL;
2058 }
2059
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2060 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2061 {
2062 struct bdaddr_list *b, *n;
2063
2064 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2065 list_del(&b->list);
2066 kfree(b);
2067 }
2068 }
2069
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2070 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2071 {
2072 struct bdaddr_list *entry;
2073
2074 if (!bacmp(bdaddr, BDADDR_ANY))
2075 return -EBADF;
2076
2077 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2078 return -EEXIST;
2079
2080 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2081 if (!entry)
2082 return -ENOMEM;
2083
2084 bacpy(&entry->bdaddr, bdaddr);
2085 entry->bdaddr_type = type;
2086
2087 list_add(&entry->list, list);
2088
2089 return 0;
2090 }
2091
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2092 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2093 u8 type, u8 *peer_irk, u8 *local_irk)
2094 {
2095 struct bdaddr_list_with_irk *entry;
2096
2097 if (!bacmp(bdaddr, BDADDR_ANY))
2098 return -EBADF;
2099
2100 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2101 return -EEXIST;
2102
2103 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2104 if (!entry)
2105 return -ENOMEM;
2106
2107 bacpy(&entry->bdaddr, bdaddr);
2108 entry->bdaddr_type = type;
2109
2110 if (peer_irk)
2111 memcpy(entry->peer_irk, peer_irk, 16);
2112
2113 if (local_irk)
2114 memcpy(entry->local_irk, local_irk, 16);
2115
2116 list_add(&entry->list, list);
2117
2118 return 0;
2119 }
2120
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2121 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2122 u8 type, u32 flags)
2123 {
2124 struct bdaddr_list_with_flags *entry;
2125
2126 if (!bacmp(bdaddr, BDADDR_ANY))
2127 return -EBADF;
2128
2129 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2130 return -EEXIST;
2131
2132 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2133 if (!entry)
2134 return -ENOMEM;
2135
2136 bacpy(&entry->bdaddr, bdaddr);
2137 entry->bdaddr_type = type;
2138 entry->flags = flags;
2139
2140 list_add(&entry->list, list);
2141
2142 return 0;
2143 }
2144
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2145 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2146 {
2147 struct bdaddr_list *entry;
2148
2149 if (!bacmp(bdaddr, BDADDR_ANY)) {
2150 hci_bdaddr_list_clear(list);
2151 return 0;
2152 }
2153
2154 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2155 if (!entry)
2156 return -ENOENT;
2157
2158 list_del(&entry->list);
2159 kfree(entry);
2160
2161 return 0;
2162 }
2163
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2164 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2165 u8 type)
2166 {
2167 struct bdaddr_list_with_irk *entry;
2168
2169 if (!bacmp(bdaddr, BDADDR_ANY)) {
2170 hci_bdaddr_list_clear(list);
2171 return 0;
2172 }
2173
2174 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2175 if (!entry)
2176 return -ENOENT;
2177
2178 list_del(&entry->list);
2179 kfree(entry);
2180
2181 return 0;
2182 }
2183
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2184 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2185 u8 type)
2186 {
2187 struct bdaddr_list_with_flags *entry;
2188
2189 if (!bacmp(bdaddr, BDADDR_ANY)) {
2190 hci_bdaddr_list_clear(list);
2191 return 0;
2192 }
2193
2194 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2195 if (!entry)
2196 return -ENOENT;
2197
2198 list_del(&entry->list);
2199 kfree(entry);
2200
2201 return 0;
2202 }
2203
2204 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2205 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2206 bdaddr_t *addr, u8 addr_type)
2207 {
2208 struct hci_conn_params *params;
2209
2210 list_for_each_entry(params, &hdev->le_conn_params, list) {
2211 if (bacmp(¶ms->addr, addr) == 0 &&
2212 params->addr_type == addr_type) {
2213 return params;
2214 }
2215 }
2216
2217 return NULL;
2218 }
2219
2220 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2221 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2222 bdaddr_t *addr, u8 addr_type)
2223 {
2224 struct hci_conn_params *param;
2225
2226 rcu_read_lock();
2227
2228 list_for_each_entry_rcu(param, list, action) {
2229 if (bacmp(¶m->addr, addr) == 0 &&
2230 param->addr_type == addr_type) {
2231 rcu_read_unlock();
2232 return param;
2233 }
2234 }
2235
2236 rcu_read_unlock();
2237
2238 return NULL;
2239 }
2240
2241 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2242 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2243 {
2244 if (list_empty(¶m->action))
2245 return;
2246
2247 list_del_rcu(¶m->action);
2248 synchronize_rcu();
2249 INIT_LIST_HEAD(¶m->action);
2250 }
2251
2252 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2253 void hci_pend_le_list_add(struct hci_conn_params *param,
2254 struct list_head *list)
2255 {
2256 list_add_rcu(¶m->action, list);
2257 }
2258
2259 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2260 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2261 bdaddr_t *addr, u8 addr_type)
2262 {
2263 struct hci_conn_params *params;
2264
2265 params = hci_conn_params_lookup(hdev, addr, addr_type);
2266 if (params)
2267 return params;
2268
2269 params = kzalloc(sizeof(*params), GFP_KERNEL);
2270 if (!params) {
2271 bt_dev_err(hdev, "out of memory");
2272 return NULL;
2273 }
2274
2275 bacpy(¶ms->addr, addr);
2276 params->addr_type = addr_type;
2277
2278 list_add(¶ms->list, &hdev->le_conn_params);
2279 INIT_LIST_HEAD(¶ms->action);
2280
2281 params->conn_min_interval = hdev->le_conn_min_interval;
2282 params->conn_max_interval = hdev->le_conn_max_interval;
2283 params->conn_latency = hdev->le_conn_latency;
2284 params->supervision_timeout = hdev->le_supv_timeout;
2285 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2286
2287 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2288
2289 return params;
2290 }
2291
hci_conn_params_free(struct hci_conn_params * params)2292 void hci_conn_params_free(struct hci_conn_params *params)
2293 {
2294 hci_pend_le_list_del_init(params);
2295
2296 if (params->conn) {
2297 hci_conn_drop(params->conn);
2298 hci_conn_put(params->conn);
2299 }
2300
2301 list_del(¶ms->list);
2302 kfree(params);
2303 }
2304
2305 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2306 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2307 {
2308 struct hci_conn_params *params;
2309
2310 params = hci_conn_params_lookup(hdev, addr, addr_type);
2311 if (!params)
2312 return;
2313
2314 hci_conn_params_free(params);
2315
2316 hci_update_passive_scan(hdev);
2317
2318 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2319 }
2320
2321 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2322 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2323 {
2324 struct hci_conn_params *params, *tmp;
2325
2326 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2327 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2328 continue;
2329
2330 /* If trying to establish one time connection to disabled
2331 * device, leave the params, but mark them as just once.
2332 */
2333 if (params->explicit_connect) {
2334 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2335 continue;
2336 }
2337
2338 hci_conn_params_free(params);
2339 }
2340
2341 BT_DBG("All LE disabled connection parameters were removed");
2342 }
2343
2344 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2345 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2346 {
2347 struct hci_conn_params *params, *tmp;
2348
2349 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2350 hci_conn_params_free(params);
2351
2352 BT_DBG("All LE connection parameters were removed");
2353 }
2354
2355 /* Copy the Identity Address of the controller.
2356 *
2357 * If the controller has a public BD_ADDR, then by default use that one.
2358 * If this is a LE only controller without a public address, default to
2359 * the static random address.
2360 *
2361 * For debugging purposes it is possible to force controllers with a
2362 * public address to use the static random address instead.
2363 *
2364 * In case BR/EDR has been disabled on a dual-mode controller and
2365 * userspace has configured a static address, then that address
2366 * becomes the identity address instead of the public BR/EDR address.
2367 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2368 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2369 u8 *bdaddr_type)
2370 {
2371 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2372 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2373 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2374 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2375 bacpy(bdaddr, &hdev->static_addr);
2376 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2377 } else {
2378 bacpy(bdaddr, &hdev->bdaddr);
2379 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2380 }
2381 }
2382
hci_clear_wake_reason(struct hci_dev * hdev)2383 static void hci_clear_wake_reason(struct hci_dev *hdev)
2384 {
2385 hci_dev_lock(hdev);
2386
2387 hdev->wake_reason = 0;
2388 bacpy(&hdev->wake_addr, BDADDR_ANY);
2389 hdev->wake_addr_type = 0;
2390
2391 hci_dev_unlock(hdev);
2392 }
2393
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2394 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2395 void *data)
2396 {
2397 struct hci_dev *hdev =
2398 container_of(nb, struct hci_dev, suspend_notifier);
2399 int ret = 0;
2400
2401 /* Userspace has full control of this device. Do nothing. */
2402 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2403 return NOTIFY_DONE;
2404
2405 /* To avoid a potential race with hci_unregister_dev. */
2406 hci_dev_hold(hdev);
2407
2408 switch (action) {
2409 case PM_HIBERNATION_PREPARE:
2410 case PM_SUSPEND_PREPARE:
2411 ret = hci_suspend_dev(hdev);
2412 break;
2413 case PM_POST_HIBERNATION:
2414 case PM_POST_SUSPEND:
2415 ret = hci_resume_dev(hdev);
2416 break;
2417 }
2418
2419 if (ret)
2420 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2421 action, ret);
2422
2423 hci_dev_put(hdev);
2424 return NOTIFY_DONE;
2425 }
2426
2427 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2428 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2429 {
2430 struct hci_dev *hdev;
2431 unsigned int alloc_size;
2432
2433 alloc_size = sizeof(*hdev);
2434 if (sizeof_priv) {
2435 /* Fixme: May need ALIGN-ment? */
2436 alloc_size += sizeof_priv;
2437 }
2438
2439 hdev = kzalloc(alloc_size, GFP_KERNEL);
2440 if (!hdev)
2441 return NULL;
2442
2443 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2444 hdev->esco_type = (ESCO_HV1);
2445 hdev->link_mode = (HCI_LM_ACCEPT);
2446 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2447 hdev->io_capability = 0x03; /* No Input No Output */
2448 hdev->manufacturer = 0xffff; /* Default to internal use */
2449 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2450 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2451 hdev->adv_instance_cnt = 0;
2452 hdev->cur_adv_instance = 0x00;
2453 hdev->adv_instance_timeout = 0;
2454
2455 hdev->advmon_allowlist_duration = 300;
2456 hdev->advmon_no_filter_duration = 500;
2457 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2458
2459 hdev->sniff_max_interval = 800;
2460 hdev->sniff_min_interval = 80;
2461
2462 hdev->le_adv_channel_map = 0x07;
2463 hdev->le_adv_min_interval = 0x0800;
2464 hdev->le_adv_max_interval = 0x0800;
2465 hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2466 hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2467 hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2468 hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2469 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2470 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2471 hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2472 hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2473 hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2474 hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2475 hdev->le_conn_min_interval = 0x0018;
2476 hdev->le_conn_max_interval = 0x0028;
2477 hdev->le_conn_latency = 0x0000;
2478 hdev->le_supv_timeout = 0x002a;
2479 hdev->le_def_tx_len = 0x001b;
2480 hdev->le_def_tx_time = 0x0148;
2481 hdev->le_max_tx_len = 0x001b;
2482 hdev->le_max_tx_time = 0x0148;
2483 hdev->le_max_rx_len = 0x001b;
2484 hdev->le_max_rx_time = 0x0148;
2485 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2486 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2487 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2488 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2489 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2490 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2491 hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2492 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2493 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2494
2495 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2496 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2497 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2498 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2499 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2500 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2501
2502 /* default 1.28 sec page scan */
2503 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2504 hdev->def_page_scan_int = 0x0800;
2505 hdev->def_page_scan_window = 0x0012;
2506
2507 mutex_init(&hdev->lock);
2508 mutex_init(&hdev->req_lock);
2509
2510 ida_init(&hdev->unset_handle_ida);
2511
2512 INIT_LIST_HEAD(&hdev->mesh_pending);
2513 INIT_LIST_HEAD(&hdev->mgmt_pending);
2514 INIT_LIST_HEAD(&hdev->reject_list);
2515 INIT_LIST_HEAD(&hdev->accept_list);
2516 INIT_LIST_HEAD(&hdev->uuids);
2517 INIT_LIST_HEAD(&hdev->link_keys);
2518 INIT_LIST_HEAD(&hdev->long_term_keys);
2519 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2520 INIT_LIST_HEAD(&hdev->remote_oob_data);
2521 INIT_LIST_HEAD(&hdev->le_accept_list);
2522 INIT_LIST_HEAD(&hdev->le_resolv_list);
2523 INIT_LIST_HEAD(&hdev->le_conn_params);
2524 INIT_LIST_HEAD(&hdev->pend_le_conns);
2525 INIT_LIST_HEAD(&hdev->pend_le_reports);
2526 INIT_LIST_HEAD(&hdev->conn_hash.list);
2527 INIT_LIST_HEAD(&hdev->adv_instances);
2528 INIT_LIST_HEAD(&hdev->blocked_keys);
2529 INIT_LIST_HEAD(&hdev->monitored_devices);
2530
2531 INIT_LIST_HEAD(&hdev->local_codecs);
2532 INIT_WORK(&hdev->rx_work, hci_rx_work);
2533 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2534 INIT_WORK(&hdev->tx_work, hci_tx_work);
2535 INIT_WORK(&hdev->power_on, hci_power_on);
2536 INIT_WORK(&hdev->error_reset, hci_error_reset);
2537
2538 hci_cmd_sync_init(hdev);
2539
2540 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2541
2542 skb_queue_head_init(&hdev->rx_q);
2543 skb_queue_head_init(&hdev->cmd_q);
2544 skb_queue_head_init(&hdev->raw_q);
2545
2546 init_waitqueue_head(&hdev->req_wait_q);
2547
2548 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2549 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2550
2551 hci_devcd_setup(hdev);
2552
2553 hci_init_sysfs(hdev);
2554 discovery_init(hdev);
2555
2556 return hdev;
2557 }
2558 EXPORT_SYMBOL(hci_alloc_dev_priv);
2559
2560 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2561 void hci_free_dev(struct hci_dev *hdev)
2562 {
2563 /* will free via device release */
2564 put_device(&hdev->dev);
2565 }
2566 EXPORT_SYMBOL(hci_free_dev);
2567
2568 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2569 int hci_register_dev(struct hci_dev *hdev)
2570 {
2571 int id, error;
2572
2573 if (!hdev->open || !hdev->close || !hdev->send)
2574 return -EINVAL;
2575
2576 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2577 if (id < 0)
2578 return id;
2579
2580 error = dev_set_name(&hdev->dev, "hci%u", id);
2581 if (error)
2582 return error;
2583
2584 hdev->name = dev_name(&hdev->dev);
2585 hdev->id = id;
2586
2587 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2588
2589 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2590 if (!hdev->workqueue) {
2591 error = -ENOMEM;
2592 goto err;
2593 }
2594
2595 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2596 hdev->name);
2597 if (!hdev->req_workqueue) {
2598 destroy_workqueue(hdev->workqueue);
2599 error = -ENOMEM;
2600 goto err;
2601 }
2602
2603 if (!IS_ERR_OR_NULL(bt_debugfs))
2604 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2605
2606 error = device_add(&hdev->dev);
2607 if (error < 0)
2608 goto err_wqueue;
2609
2610 hci_leds_init(hdev);
2611
2612 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2613 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2614 hdev);
2615 if (hdev->rfkill) {
2616 if (rfkill_register(hdev->rfkill) < 0) {
2617 rfkill_destroy(hdev->rfkill);
2618 hdev->rfkill = NULL;
2619 }
2620 }
2621
2622 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2623 hci_dev_set_flag(hdev, HCI_RFKILLED);
2624
2625 hci_dev_set_flag(hdev, HCI_SETUP);
2626 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2627
2628 /* Assume BR/EDR support until proven otherwise (such as
2629 * through reading supported features during init.
2630 */
2631 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2632
2633 write_lock(&hci_dev_list_lock);
2634 list_add(&hdev->list, &hci_dev_list);
2635 write_unlock(&hci_dev_list_lock);
2636
2637 /* Devices that are marked for raw-only usage are unconfigured
2638 * and should not be included in normal operation.
2639 */
2640 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2641 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2642
2643 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2644 * callback.
2645 */
2646 if (hdev->wakeup)
2647 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2648
2649 hci_sock_dev_event(hdev, HCI_DEV_REG);
2650 hci_dev_hold(hdev);
2651
2652 error = hci_register_suspend_notifier(hdev);
2653 if (error)
2654 BT_WARN("register suspend notifier failed error:%d\n", error);
2655
2656 queue_work(hdev->req_workqueue, &hdev->power_on);
2657
2658 idr_init(&hdev->adv_monitors_idr);
2659 msft_register(hdev);
2660
2661 return id;
2662
2663 err_wqueue:
2664 debugfs_remove_recursive(hdev->debugfs);
2665 destroy_workqueue(hdev->workqueue);
2666 destroy_workqueue(hdev->req_workqueue);
2667 err:
2668 ida_free(&hci_index_ida, hdev->id);
2669
2670 return error;
2671 }
2672 EXPORT_SYMBOL(hci_register_dev);
2673
2674 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2675 void hci_unregister_dev(struct hci_dev *hdev)
2676 {
2677 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2678
2679 mutex_lock(&hdev->unregister_lock);
2680 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2681 mutex_unlock(&hdev->unregister_lock);
2682
2683 write_lock(&hci_dev_list_lock);
2684 list_del(&hdev->list);
2685 write_unlock(&hci_dev_list_lock);
2686
2687 disable_work_sync(&hdev->rx_work);
2688 disable_work_sync(&hdev->cmd_work);
2689 disable_work_sync(&hdev->tx_work);
2690 disable_work_sync(&hdev->power_on);
2691 disable_work_sync(&hdev->error_reset);
2692
2693 hci_cmd_sync_clear(hdev);
2694
2695 hci_unregister_suspend_notifier(hdev);
2696
2697 hci_dev_do_close(hdev);
2698
2699 if (!test_bit(HCI_INIT, &hdev->flags) &&
2700 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2701 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2702 hci_dev_lock(hdev);
2703 mgmt_index_removed(hdev);
2704 hci_dev_unlock(hdev);
2705 }
2706
2707 /* mgmt_index_removed should take care of emptying the
2708 * pending list */
2709 BUG_ON(!list_empty(&hdev->mgmt_pending));
2710
2711 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2712
2713 if (hdev->rfkill) {
2714 rfkill_unregister(hdev->rfkill);
2715 rfkill_destroy(hdev->rfkill);
2716 }
2717
2718 device_del(&hdev->dev);
2719 /* Actual cleanup is deferred until hci_release_dev(). */
2720 hci_dev_put(hdev);
2721 }
2722 EXPORT_SYMBOL(hci_unregister_dev);
2723
2724 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2725 void hci_release_dev(struct hci_dev *hdev)
2726 {
2727 debugfs_remove_recursive(hdev->debugfs);
2728 kfree_const(hdev->hw_info);
2729 kfree_const(hdev->fw_info);
2730
2731 destroy_workqueue(hdev->workqueue);
2732 destroy_workqueue(hdev->req_workqueue);
2733
2734 hci_dev_lock(hdev);
2735 hci_bdaddr_list_clear(&hdev->reject_list);
2736 hci_bdaddr_list_clear(&hdev->accept_list);
2737 hci_uuids_clear(hdev);
2738 hci_link_keys_clear(hdev);
2739 hci_smp_ltks_clear(hdev);
2740 hci_smp_irks_clear(hdev);
2741 hci_remote_oob_data_clear(hdev);
2742 hci_adv_instances_clear(hdev);
2743 hci_adv_monitors_clear(hdev);
2744 hci_bdaddr_list_clear(&hdev->le_accept_list);
2745 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2746 hci_conn_params_clear_all(hdev);
2747 hci_discovery_filter_clear(hdev);
2748 hci_blocked_keys_clear(hdev);
2749 hci_codec_list_clear(&hdev->local_codecs);
2750 msft_release(hdev);
2751 hci_dev_unlock(hdev);
2752
2753 ida_destroy(&hdev->unset_handle_ida);
2754 ida_free(&hci_index_ida, hdev->id);
2755 kfree_skb(hdev->sent_cmd);
2756 kfree_skb(hdev->req_skb);
2757 kfree_skb(hdev->recv_event);
2758 kfree(hdev);
2759 }
2760 EXPORT_SYMBOL(hci_release_dev);
2761
hci_register_suspend_notifier(struct hci_dev * hdev)2762 int hci_register_suspend_notifier(struct hci_dev *hdev)
2763 {
2764 int ret = 0;
2765
2766 if (!hdev->suspend_notifier.notifier_call &&
2767 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2768 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2769 ret = register_pm_notifier(&hdev->suspend_notifier);
2770 }
2771
2772 return ret;
2773 }
2774
hci_unregister_suspend_notifier(struct hci_dev * hdev)2775 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2776 {
2777 int ret = 0;
2778
2779 if (hdev->suspend_notifier.notifier_call) {
2780 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2781 if (!ret)
2782 hdev->suspend_notifier.notifier_call = NULL;
2783 }
2784
2785 return ret;
2786 }
2787
2788 /* Cancel ongoing command synchronously:
2789 *
2790 * - Cancel command timer
2791 * - Reset command counter
2792 * - Cancel command request
2793 */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2794 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2795 {
2796 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2797
2798 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
2799 disable_delayed_work_sync(&hdev->cmd_timer);
2800 disable_delayed_work_sync(&hdev->ncmd_timer);
2801 } else {
2802 cancel_delayed_work_sync(&hdev->cmd_timer);
2803 cancel_delayed_work_sync(&hdev->ncmd_timer);
2804 }
2805
2806 atomic_set(&hdev->cmd_cnt, 1);
2807
2808 hci_cmd_sync_cancel_sync(hdev, err);
2809 }
2810
2811 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2812 int hci_suspend_dev(struct hci_dev *hdev)
2813 {
2814 int ret;
2815
2816 bt_dev_dbg(hdev, "");
2817
2818 /* Suspend should only act on when powered. */
2819 if (!hdev_is_powered(hdev) ||
2820 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2821 return 0;
2822
2823 /* If powering down don't attempt to suspend */
2824 if (mgmt_powering_down(hdev))
2825 return 0;
2826
2827 /* Cancel potentially blocking sync operation before suspend */
2828 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2829
2830 hci_req_sync_lock(hdev);
2831 ret = hci_suspend_sync(hdev);
2832 hci_req_sync_unlock(hdev);
2833
2834 hci_clear_wake_reason(hdev);
2835 mgmt_suspending(hdev, hdev->suspend_state);
2836
2837 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2838 return ret;
2839 }
2840 EXPORT_SYMBOL(hci_suspend_dev);
2841
2842 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2843 int hci_resume_dev(struct hci_dev *hdev)
2844 {
2845 int ret;
2846
2847 bt_dev_dbg(hdev, "");
2848
2849 /* Resume should only act on when powered. */
2850 if (!hdev_is_powered(hdev) ||
2851 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2852 return 0;
2853
2854 /* If powering down don't attempt to resume */
2855 if (mgmt_powering_down(hdev))
2856 return 0;
2857
2858 hci_req_sync_lock(hdev);
2859 ret = hci_resume_sync(hdev);
2860 hci_req_sync_unlock(hdev);
2861
2862 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2863 hdev->wake_addr_type);
2864
2865 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2866 return ret;
2867 }
2868 EXPORT_SYMBOL(hci_resume_dev);
2869
2870 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2871 int hci_reset_dev(struct hci_dev *hdev)
2872 {
2873 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2874 struct sk_buff *skb;
2875
2876 skb = bt_skb_alloc(3, GFP_ATOMIC);
2877 if (!skb)
2878 return -ENOMEM;
2879
2880 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2881 skb_put_data(skb, hw_err, 3);
2882
2883 bt_dev_err(hdev, "Injecting HCI hardware error event");
2884
2885 /* Send Hardware Error to upper stack */
2886 return hci_recv_frame(hdev, skb);
2887 }
2888 EXPORT_SYMBOL(hci_reset_dev);
2889
hci_dev_classify_pkt_type(struct hci_dev * hdev,struct sk_buff * skb)2890 static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
2891 {
2892 if (hdev->classify_pkt_type)
2893 return hdev->classify_pkt_type(hdev, skb);
2894
2895 return hci_skb_pkt_type(skb);
2896 }
2897
2898 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2899 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2900 {
2901 u8 dev_pkt_type;
2902
2903 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2904 && !test_bit(HCI_INIT, &hdev->flags))) {
2905 kfree_skb(skb);
2906 return -ENXIO;
2907 }
2908
2909 /* Check if the driver agree with packet type classification */
2910 dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
2911 if (hci_skb_pkt_type(skb) != dev_pkt_type) {
2912 hci_skb_pkt_type(skb) = dev_pkt_type;
2913 }
2914
2915 switch (hci_skb_pkt_type(skb)) {
2916 case HCI_EVENT_PKT:
2917 break;
2918 case HCI_ACLDATA_PKT:
2919 /* Detect if ISO packet has been sent as ACL */
2920 if (hci_conn_num(hdev, ISO_LINK)) {
2921 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2922 __u8 type;
2923
2924 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2925 if (type == ISO_LINK)
2926 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2927 }
2928 break;
2929 case HCI_SCODATA_PKT:
2930 break;
2931 case HCI_ISODATA_PKT:
2932 break;
2933 default:
2934 kfree_skb(skb);
2935 return -EINVAL;
2936 }
2937
2938 /* Incoming skb */
2939 bt_cb(skb)->incoming = 1;
2940
2941 /* Time stamp */
2942 __net_timestamp(skb);
2943
2944 skb_queue_tail(&hdev->rx_q, skb);
2945 queue_work(hdev->workqueue, &hdev->rx_work);
2946
2947 return 0;
2948 }
2949 EXPORT_SYMBOL(hci_recv_frame);
2950
2951 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2952 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2953 {
2954 /* Mark as diagnostic packet */
2955 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2956
2957 /* Time stamp */
2958 __net_timestamp(skb);
2959
2960 skb_queue_tail(&hdev->rx_q, skb);
2961 queue_work(hdev->workqueue, &hdev->rx_work);
2962
2963 return 0;
2964 }
2965 EXPORT_SYMBOL(hci_recv_diag);
2966
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2967 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2968 {
2969 va_list vargs;
2970
2971 va_start(vargs, fmt);
2972 kfree_const(hdev->hw_info);
2973 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2974 va_end(vargs);
2975 }
2976 EXPORT_SYMBOL(hci_set_hw_info);
2977
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2978 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2979 {
2980 va_list vargs;
2981
2982 va_start(vargs, fmt);
2983 kfree_const(hdev->fw_info);
2984 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2985 va_end(vargs);
2986 }
2987 EXPORT_SYMBOL(hci_set_fw_info);
2988
2989 /* ---- Interface to upper protocols ---- */
2990
hci_register_cb(struct hci_cb * cb)2991 int hci_register_cb(struct hci_cb *cb)
2992 {
2993 BT_DBG("%p name %s", cb, cb->name);
2994
2995 list_add_tail_rcu(&cb->list, &hci_cb_list);
2996
2997 return 0;
2998 }
2999 EXPORT_SYMBOL(hci_register_cb);
3000
hci_unregister_cb(struct hci_cb * cb)3001 int hci_unregister_cb(struct hci_cb *cb)
3002 {
3003 BT_DBG("%p name %s", cb, cb->name);
3004
3005 list_del_rcu(&cb->list);
3006 synchronize_rcu();
3007
3008 return 0;
3009 }
3010 EXPORT_SYMBOL(hci_unregister_cb);
3011
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3012 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3013 {
3014 int err;
3015
3016 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3017 skb->len);
3018
3019 /* Time stamp */
3020 __net_timestamp(skb);
3021
3022 /* Send copy to monitor */
3023 hci_send_to_monitor(hdev, skb);
3024
3025 if (atomic_read(&hdev->promisc)) {
3026 /* Send copy to the sockets */
3027 hci_send_to_sock(hdev, skb);
3028 }
3029
3030 /* Get rid of skb owner, prior to sending to the driver. */
3031 skb_orphan(skb);
3032
3033 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3034 kfree_skb(skb);
3035 return -EINVAL;
3036 }
3037
3038 err = hdev->send(hdev, skb);
3039 if (err < 0) {
3040 bt_dev_err(hdev, "sending frame failed (%d)", err);
3041 kfree_skb(skb);
3042 return err;
3043 }
3044
3045 return 0;
3046 }
3047
3048 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3049 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3050 const void *param)
3051 {
3052 struct sk_buff *skb;
3053
3054 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3055
3056 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3057 if (!skb) {
3058 bt_dev_err(hdev, "no memory for command");
3059 return -ENOMEM;
3060 }
3061
3062 /* Stand-alone HCI commands must be flagged as
3063 * single-command requests.
3064 */
3065 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3066
3067 skb_queue_tail(&hdev->cmd_q, skb);
3068 queue_work(hdev->workqueue, &hdev->cmd_work);
3069
3070 return 0;
3071 }
3072
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3073 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3074 const void *param)
3075 {
3076 struct sk_buff *skb;
3077
3078 if (hci_opcode_ogf(opcode) != 0x3f) {
3079 /* A controller receiving a command shall respond with either
3080 * a Command Status Event or a Command Complete Event.
3081 * Therefore, all standard HCI commands must be sent via the
3082 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3083 * Some vendors do not comply with this rule for vendor-specific
3084 * commands and do not return any event. We want to support
3085 * unresponded commands for such cases only.
3086 */
3087 bt_dev_err(hdev, "unresponded command not supported");
3088 return -EINVAL;
3089 }
3090
3091 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3092 if (!skb) {
3093 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3094 opcode);
3095 return -ENOMEM;
3096 }
3097
3098 hci_send_frame(hdev, skb);
3099
3100 return 0;
3101 }
3102 EXPORT_SYMBOL(__hci_cmd_send);
3103
3104 /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3105 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3106 {
3107 struct hci_command_hdr *hdr;
3108
3109 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3110 return NULL;
3111
3112 hdr = (void *)skb->data;
3113
3114 if (hdr->opcode != cpu_to_le16(opcode))
3115 return NULL;
3116
3117 return skb->data + HCI_COMMAND_HDR_SIZE;
3118 }
3119
3120 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3121 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3122 {
3123 void *data;
3124
3125 /* Check if opcode matches last sent command */
3126 data = hci_cmd_data(hdev->sent_cmd, opcode);
3127 if (!data)
3128 /* Check if opcode matches last request */
3129 data = hci_cmd_data(hdev->req_skb, opcode);
3130
3131 return data;
3132 }
3133
3134 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3135 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3136 {
3137 struct hci_event_hdr *hdr;
3138 int offset;
3139
3140 if (!hdev->recv_event)
3141 return NULL;
3142
3143 hdr = (void *)hdev->recv_event->data;
3144 offset = sizeof(*hdr);
3145
3146 if (hdr->evt != event) {
3147 /* In case of LE metaevent check the subevent match */
3148 if (hdr->evt == HCI_EV_LE_META) {
3149 struct hci_ev_le_meta *ev;
3150
3151 ev = (void *)hdev->recv_event->data + offset;
3152 offset += sizeof(*ev);
3153 if (ev->subevent == event)
3154 goto found;
3155 }
3156 return NULL;
3157 }
3158
3159 found:
3160 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3161
3162 return hdev->recv_event->data + offset;
3163 }
3164
3165 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3166 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3167 {
3168 struct hci_acl_hdr *hdr;
3169 int len = skb->len;
3170
3171 skb_push(skb, HCI_ACL_HDR_SIZE);
3172 skb_reset_transport_header(skb);
3173 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3174 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3175 hdr->dlen = cpu_to_le16(len);
3176 }
3177
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3178 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3179 struct sk_buff *skb, __u16 flags)
3180 {
3181 struct hci_conn *conn = chan->conn;
3182 struct hci_dev *hdev = conn->hdev;
3183 struct sk_buff *list;
3184
3185 skb->len = skb_headlen(skb);
3186 skb->data_len = 0;
3187
3188 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3189
3190 hci_add_acl_hdr(skb, conn->handle, flags);
3191
3192 list = skb_shinfo(skb)->frag_list;
3193 if (!list) {
3194 /* Non fragmented */
3195 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3196
3197 skb_queue_tail(queue, skb);
3198 } else {
3199 /* Fragmented */
3200 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3201
3202 skb_shinfo(skb)->frag_list = NULL;
3203
3204 /* Queue all fragments atomically. We need to use spin_lock_bh
3205 * here because of 6LoWPAN links, as there this function is
3206 * called from softirq and using normal spin lock could cause
3207 * deadlocks.
3208 */
3209 spin_lock_bh(&queue->lock);
3210
3211 __skb_queue_tail(queue, skb);
3212
3213 flags &= ~ACL_START;
3214 flags |= ACL_CONT;
3215 do {
3216 skb = list; list = list->next;
3217
3218 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3219 hci_add_acl_hdr(skb, conn->handle, flags);
3220
3221 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3222
3223 __skb_queue_tail(queue, skb);
3224 } while (list);
3225
3226 spin_unlock_bh(&queue->lock);
3227 }
3228 }
3229
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3230 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3231 {
3232 struct hci_dev *hdev = chan->conn->hdev;
3233
3234 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3235
3236 hci_queue_acl(chan, &chan->data_q, skb, flags);
3237
3238 queue_work(hdev->workqueue, &hdev->tx_work);
3239 }
3240
3241 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3242 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3243 {
3244 struct hci_dev *hdev = conn->hdev;
3245 struct hci_sco_hdr hdr;
3246
3247 BT_DBG("%s len %d", hdev->name, skb->len);
3248
3249 hdr.handle = cpu_to_le16(conn->handle);
3250 hdr.dlen = skb->len;
3251
3252 skb_push(skb, HCI_SCO_HDR_SIZE);
3253 skb_reset_transport_header(skb);
3254 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3255
3256 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3257
3258 skb_queue_tail(&conn->data_q, skb);
3259 queue_work(hdev->workqueue, &hdev->tx_work);
3260 }
3261
3262 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3263 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3264 {
3265 struct hci_iso_hdr *hdr;
3266 int len = skb->len;
3267
3268 skb_push(skb, HCI_ISO_HDR_SIZE);
3269 skb_reset_transport_header(skb);
3270 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3271 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3272 hdr->dlen = cpu_to_le16(len);
3273 }
3274
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3275 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3276 struct sk_buff *skb)
3277 {
3278 struct hci_dev *hdev = conn->hdev;
3279 struct sk_buff *list;
3280 __u16 flags;
3281
3282 skb->len = skb_headlen(skb);
3283 skb->data_len = 0;
3284
3285 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3286
3287 list = skb_shinfo(skb)->frag_list;
3288
3289 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3290 hci_add_iso_hdr(skb, conn->handle, flags);
3291
3292 if (!list) {
3293 /* Non fragmented */
3294 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3295
3296 skb_queue_tail(queue, skb);
3297 } else {
3298 /* Fragmented */
3299 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3300
3301 skb_shinfo(skb)->frag_list = NULL;
3302
3303 __skb_queue_tail(queue, skb);
3304
3305 do {
3306 skb = list; list = list->next;
3307
3308 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3309 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3310 0x00);
3311 hci_add_iso_hdr(skb, conn->handle, flags);
3312
3313 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3314
3315 __skb_queue_tail(queue, skb);
3316 } while (list);
3317 }
3318 }
3319
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3320 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3321 {
3322 struct hci_dev *hdev = conn->hdev;
3323
3324 BT_DBG("%s len %d", hdev->name, skb->len);
3325
3326 hci_queue_iso(conn, &conn->data_q, skb);
3327
3328 queue_work(hdev->workqueue, &hdev->tx_work);
3329 }
3330
3331 /* ---- HCI TX task (outgoing data) ---- */
3332
3333 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3334 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3335 {
3336 struct hci_dev *hdev;
3337 int cnt, q;
3338
3339 if (!conn) {
3340 *quote = 0;
3341 return;
3342 }
3343
3344 hdev = conn->hdev;
3345
3346 switch (conn->type) {
3347 case ACL_LINK:
3348 cnt = hdev->acl_cnt;
3349 break;
3350 case SCO_LINK:
3351 case ESCO_LINK:
3352 cnt = hdev->sco_cnt;
3353 break;
3354 case LE_LINK:
3355 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3356 break;
3357 case ISO_LINK:
3358 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3359 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3360 break;
3361 default:
3362 cnt = 0;
3363 bt_dev_err(hdev, "unknown link type %d", conn->type);
3364 }
3365
3366 q = cnt / num;
3367 *quote = q ? q : 1;
3368 }
3369
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3370 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3371 int *quote)
3372 {
3373 struct hci_conn_hash *h = &hdev->conn_hash;
3374 struct hci_conn *conn = NULL, *c;
3375 unsigned int num = 0, min = ~0;
3376
3377 /* We don't have to lock device here. Connections are always
3378 * added and removed with TX task disabled. */
3379
3380 rcu_read_lock();
3381
3382 list_for_each_entry_rcu(c, &h->list, list) {
3383 if (c->type != type || skb_queue_empty(&c->data_q))
3384 continue;
3385
3386 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3387 continue;
3388
3389 num++;
3390
3391 if (c->sent < min) {
3392 min = c->sent;
3393 conn = c;
3394 }
3395
3396 if (hci_conn_num(hdev, type) == num)
3397 break;
3398 }
3399
3400 rcu_read_unlock();
3401
3402 hci_quote_sent(conn, num, quote);
3403
3404 BT_DBG("conn %p quote %d", conn, *quote);
3405 return conn;
3406 }
3407
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3408 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3409 {
3410 struct hci_conn_hash *h = &hdev->conn_hash;
3411 struct hci_conn *c;
3412
3413 bt_dev_err(hdev, "link tx timeout");
3414
3415 rcu_read_lock();
3416
3417 /* Kill stalled connections */
3418 list_for_each_entry_rcu(c, &h->list, list) {
3419 if (c->type == type && c->sent) {
3420 bt_dev_err(hdev, "killing stalled connection %pMR",
3421 &c->dst);
3422 /* hci_disconnect might sleep, so, we have to release
3423 * the RCU read lock before calling it.
3424 */
3425 rcu_read_unlock();
3426 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3427 rcu_read_lock();
3428 }
3429 }
3430
3431 rcu_read_unlock();
3432 }
3433
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3434 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3435 int *quote)
3436 {
3437 struct hci_conn_hash *h = &hdev->conn_hash;
3438 struct hci_chan *chan = NULL;
3439 unsigned int num = 0, min = ~0, cur_prio = 0;
3440 struct hci_conn *conn;
3441 int conn_num = 0;
3442
3443 BT_DBG("%s", hdev->name);
3444
3445 rcu_read_lock();
3446
3447 list_for_each_entry_rcu(conn, &h->list, list) {
3448 struct hci_chan *tmp;
3449
3450 if (conn->type != type)
3451 continue;
3452
3453 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3454 continue;
3455
3456 conn_num++;
3457
3458 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3459 struct sk_buff *skb;
3460
3461 if (skb_queue_empty(&tmp->data_q))
3462 continue;
3463
3464 skb = skb_peek(&tmp->data_q);
3465 if (skb->priority < cur_prio)
3466 continue;
3467
3468 if (skb->priority > cur_prio) {
3469 num = 0;
3470 min = ~0;
3471 cur_prio = skb->priority;
3472 }
3473
3474 num++;
3475
3476 if (conn->sent < min) {
3477 min = conn->sent;
3478 chan = tmp;
3479 }
3480 }
3481
3482 if (hci_conn_num(hdev, type) == conn_num)
3483 break;
3484 }
3485
3486 rcu_read_unlock();
3487
3488 if (!chan)
3489 return NULL;
3490
3491 hci_quote_sent(chan->conn, num, quote);
3492
3493 BT_DBG("chan %p quote %d", chan, *quote);
3494 return chan;
3495 }
3496
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3497 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3498 {
3499 struct hci_conn_hash *h = &hdev->conn_hash;
3500 struct hci_conn *conn;
3501 int num = 0;
3502
3503 BT_DBG("%s", hdev->name);
3504
3505 rcu_read_lock();
3506
3507 list_for_each_entry_rcu(conn, &h->list, list) {
3508 struct hci_chan *chan;
3509
3510 if (conn->type != type)
3511 continue;
3512
3513 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3514 continue;
3515
3516 num++;
3517
3518 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3519 struct sk_buff *skb;
3520
3521 if (chan->sent) {
3522 chan->sent = 0;
3523 continue;
3524 }
3525
3526 if (skb_queue_empty(&chan->data_q))
3527 continue;
3528
3529 skb = skb_peek(&chan->data_q);
3530 if (skb->priority >= HCI_PRIO_MAX - 1)
3531 continue;
3532
3533 skb->priority = HCI_PRIO_MAX - 1;
3534
3535 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3536 skb->priority);
3537 }
3538
3539 if (hci_conn_num(hdev, type) == num)
3540 break;
3541 }
3542
3543 rcu_read_unlock();
3544
3545 }
3546
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3547 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3548 {
3549 unsigned long last_tx;
3550
3551 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3552 return;
3553
3554 switch (type) {
3555 case LE_LINK:
3556 last_tx = hdev->le_last_tx;
3557 break;
3558 default:
3559 last_tx = hdev->acl_last_tx;
3560 break;
3561 }
3562
3563 /* tx timeout must be longer than maximum link supervision timeout
3564 * (40.9 seconds)
3565 */
3566 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3567 hci_link_tx_to(hdev, type);
3568 }
3569
3570 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3571 static void hci_sched_sco(struct hci_dev *hdev)
3572 {
3573 struct hci_conn *conn;
3574 struct sk_buff *skb;
3575 int quote;
3576
3577 BT_DBG("%s", hdev->name);
3578
3579 if (!hci_conn_num(hdev, SCO_LINK))
3580 return;
3581
3582 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3583 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3584 BT_DBG("skb %p len %d", skb, skb->len);
3585 hci_send_frame(hdev, skb);
3586
3587 conn->sent++;
3588 if (conn->sent == ~0)
3589 conn->sent = 0;
3590 }
3591 }
3592 }
3593
hci_sched_esco(struct hci_dev * hdev)3594 static void hci_sched_esco(struct hci_dev *hdev)
3595 {
3596 struct hci_conn *conn;
3597 struct sk_buff *skb;
3598 int quote;
3599
3600 BT_DBG("%s", hdev->name);
3601
3602 if (!hci_conn_num(hdev, ESCO_LINK))
3603 return;
3604
3605 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3606 "e))) {
3607 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3608 BT_DBG("skb %p len %d", skb, skb->len);
3609 hci_send_frame(hdev, skb);
3610
3611 conn->sent++;
3612 if (conn->sent == ~0)
3613 conn->sent = 0;
3614 }
3615 }
3616 }
3617
hci_sched_acl_pkt(struct hci_dev * hdev)3618 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3619 {
3620 unsigned int cnt = hdev->acl_cnt;
3621 struct hci_chan *chan;
3622 struct sk_buff *skb;
3623 int quote;
3624
3625 __check_timeout(hdev, cnt, ACL_LINK);
3626
3627 while (hdev->acl_cnt &&
3628 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3629 u32 priority = (skb_peek(&chan->data_q))->priority;
3630 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3631 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3632 skb->len, skb->priority);
3633
3634 /* Stop if priority has changed */
3635 if (skb->priority < priority)
3636 break;
3637
3638 skb = skb_dequeue(&chan->data_q);
3639
3640 hci_conn_enter_active_mode(chan->conn,
3641 bt_cb(skb)->force_active);
3642
3643 hci_send_frame(hdev, skb);
3644 hdev->acl_last_tx = jiffies;
3645
3646 hdev->acl_cnt--;
3647 chan->sent++;
3648 chan->conn->sent++;
3649
3650 /* Send pending SCO packets right away */
3651 hci_sched_sco(hdev);
3652 hci_sched_esco(hdev);
3653 }
3654 }
3655
3656 if (cnt != hdev->acl_cnt)
3657 hci_prio_recalculate(hdev, ACL_LINK);
3658 }
3659
hci_sched_acl(struct hci_dev * hdev)3660 static void hci_sched_acl(struct hci_dev *hdev)
3661 {
3662 BT_DBG("%s", hdev->name);
3663
3664 /* No ACL link over BR/EDR controller */
3665 if (!hci_conn_num(hdev, ACL_LINK))
3666 return;
3667
3668 hci_sched_acl_pkt(hdev);
3669 }
3670
hci_sched_le(struct hci_dev * hdev)3671 static void hci_sched_le(struct hci_dev *hdev)
3672 {
3673 struct hci_chan *chan;
3674 struct sk_buff *skb;
3675 int quote, *cnt, tmp;
3676
3677 BT_DBG("%s", hdev->name);
3678
3679 if (!hci_conn_num(hdev, LE_LINK))
3680 return;
3681
3682 cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3683
3684 __check_timeout(hdev, *cnt, LE_LINK);
3685
3686 tmp = *cnt;
3687 while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3688 u32 priority = (skb_peek(&chan->data_q))->priority;
3689 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3690 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3691 skb->len, skb->priority);
3692
3693 /* Stop if priority has changed */
3694 if (skb->priority < priority)
3695 break;
3696
3697 skb = skb_dequeue(&chan->data_q);
3698
3699 hci_send_frame(hdev, skb);
3700 hdev->le_last_tx = jiffies;
3701
3702 (*cnt)--;
3703 chan->sent++;
3704 chan->conn->sent++;
3705
3706 /* Send pending SCO packets right away */
3707 hci_sched_sco(hdev);
3708 hci_sched_esco(hdev);
3709 }
3710 }
3711
3712 if (*cnt != tmp)
3713 hci_prio_recalculate(hdev, LE_LINK);
3714 }
3715
3716 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3717 static void hci_sched_iso(struct hci_dev *hdev)
3718 {
3719 struct hci_conn *conn;
3720 struct sk_buff *skb;
3721 int quote, *cnt;
3722
3723 BT_DBG("%s", hdev->name);
3724
3725 if (!hci_conn_num(hdev, ISO_LINK))
3726 return;
3727
3728 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3729 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3730 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3731 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3732 BT_DBG("skb %p len %d", skb, skb->len);
3733 hci_send_frame(hdev, skb);
3734
3735 conn->sent++;
3736 if (conn->sent == ~0)
3737 conn->sent = 0;
3738 (*cnt)--;
3739 }
3740 }
3741 }
3742
hci_tx_work(struct work_struct * work)3743 static void hci_tx_work(struct work_struct *work)
3744 {
3745 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3746 struct sk_buff *skb;
3747
3748 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3749 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3750
3751 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3752 /* Schedule queues and send stuff to HCI driver */
3753 hci_sched_sco(hdev);
3754 hci_sched_esco(hdev);
3755 hci_sched_iso(hdev);
3756 hci_sched_acl(hdev);
3757 hci_sched_le(hdev);
3758 }
3759
3760 /* Send next queued raw (unknown type) packet */
3761 while ((skb = skb_dequeue(&hdev->raw_q)))
3762 hci_send_frame(hdev, skb);
3763 }
3764
3765 /* ----- HCI RX task (incoming data processing) ----- */
3766
3767 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3768 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3769 {
3770 struct hci_acl_hdr *hdr;
3771 struct hci_conn *conn;
3772 __u16 handle, flags;
3773
3774 hdr = skb_pull_data(skb, sizeof(*hdr));
3775 if (!hdr) {
3776 bt_dev_err(hdev, "ACL packet too small");
3777 goto drop;
3778 }
3779
3780 handle = __le16_to_cpu(hdr->handle);
3781 flags = hci_flags(handle);
3782 handle = hci_handle(handle);
3783
3784 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3785 handle, flags);
3786
3787 hdev->stat.acl_rx++;
3788
3789 hci_dev_lock(hdev);
3790 conn = hci_conn_hash_lookup_handle(hdev, handle);
3791 hci_dev_unlock(hdev);
3792
3793 if (conn) {
3794 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3795
3796 /* Send to upper protocol */
3797 l2cap_recv_acldata(conn, skb, flags);
3798 return;
3799 } else {
3800 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3801 handle);
3802 }
3803
3804 drop:
3805 kfree_skb(skb);
3806 }
3807
3808 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3809 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3810 {
3811 struct hci_sco_hdr *hdr;
3812 struct hci_conn *conn;
3813 __u16 handle, flags;
3814
3815 hdr = skb_pull_data(skb, sizeof(*hdr));
3816 if (!hdr) {
3817 bt_dev_err(hdev, "SCO packet too small");
3818 goto drop;
3819 }
3820
3821 handle = __le16_to_cpu(hdr->handle);
3822 flags = hci_flags(handle);
3823 handle = hci_handle(handle);
3824
3825 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3826 handle, flags);
3827
3828 hdev->stat.sco_rx++;
3829
3830 hci_dev_lock(hdev);
3831 conn = hci_conn_hash_lookup_handle(hdev, handle);
3832 hci_dev_unlock(hdev);
3833
3834 if (conn) {
3835 /* Send to upper protocol */
3836 hci_skb_pkt_status(skb) = flags & 0x03;
3837 sco_recv_scodata(conn, skb);
3838 return;
3839 } else {
3840 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3841 handle);
3842 }
3843
3844 drop:
3845 kfree_skb(skb);
3846 }
3847
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3848 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3849 {
3850 struct hci_iso_hdr *hdr;
3851 struct hci_conn *conn;
3852 __u16 handle, flags;
3853
3854 hdr = skb_pull_data(skb, sizeof(*hdr));
3855 if (!hdr) {
3856 bt_dev_err(hdev, "ISO packet too small");
3857 goto drop;
3858 }
3859
3860 handle = __le16_to_cpu(hdr->handle);
3861 flags = hci_flags(handle);
3862 handle = hci_handle(handle);
3863
3864 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3865 handle, flags);
3866
3867 hci_dev_lock(hdev);
3868 conn = hci_conn_hash_lookup_handle(hdev, handle);
3869 hci_dev_unlock(hdev);
3870
3871 if (!conn) {
3872 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3873 handle);
3874 goto drop;
3875 }
3876
3877 /* Send to upper protocol */
3878 iso_recv(conn, skb, flags);
3879 return;
3880
3881 drop:
3882 kfree_skb(skb);
3883 }
3884
hci_req_is_complete(struct hci_dev * hdev)3885 static bool hci_req_is_complete(struct hci_dev *hdev)
3886 {
3887 struct sk_buff *skb;
3888
3889 skb = skb_peek(&hdev->cmd_q);
3890 if (!skb)
3891 return true;
3892
3893 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3894 }
3895
hci_resend_last(struct hci_dev * hdev)3896 static void hci_resend_last(struct hci_dev *hdev)
3897 {
3898 struct hci_command_hdr *sent;
3899 struct sk_buff *skb;
3900 u16 opcode;
3901
3902 if (!hdev->sent_cmd)
3903 return;
3904
3905 sent = (void *) hdev->sent_cmd->data;
3906 opcode = __le16_to_cpu(sent->opcode);
3907 if (opcode == HCI_OP_RESET)
3908 return;
3909
3910 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3911 if (!skb)
3912 return;
3913
3914 skb_queue_head(&hdev->cmd_q, skb);
3915 queue_work(hdev->workqueue, &hdev->cmd_work);
3916 }
3917
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3918 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3919 hci_req_complete_t *req_complete,
3920 hci_req_complete_skb_t *req_complete_skb)
3921 {
3922 struct sk_buff *skb;
3923 unsigned long flags;
3924
3925 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3926
3927 /* If the completed command doesn't match the last one that was
3928 * sent we need to do special handling of it.
3929 */
3930 if (!hci_sent_cmd_data(hdev, opcode)) {
3931 /* Some CSR based controllers generate a spontaneous
3932 * reset complete event during init and any pending
3933 * command will never be completed. In such a case we
3934 * need to resend whatever was the last sent
3935 * command.
3936 */
3937 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3938 hci_resend_last(hdev);
3939
3940 return;
3941 }
3942
3943 /* If we reach this point this event matches the last command sent */
3944 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3945
3946 /* If the command succeeded and there's still more commands in
3947 * this request the request is not yet complete.
3948 */
3949 if (!status && !hci_req_is_complete(hdev))
3950 return;
3951
3952 skb = hdev->req_skb;
3953
3954 /* If this was the last command in a request the complete
3955 * callback would be found in hdev->req_skb instead of the
3956 * command queue (hdev->cmd_q).
3957 */
3958 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3959 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3960 return;
3961 }
3962
3963 if (skb && bt_cb(skb)->hci.req_complete) {
3964 *req_complete = bt_cb(skb)->hci.req_complete;
3965 return;
3966 }
3967
3968 /* Remove all pending commands belonging to this request */
3969 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3970 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3971 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3972 __skb_queue_head(&hdev->cmd_q, skb);
3973 break;
3974 }
3975
3976 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3977 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3978 else
3979 *req_complete = bt_cb(skb)->hci.req_complete;
3980 dev_kfree_skb_irq(skb);
3981 }
3982 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3983 }
3984
hci_rx_work(struct work_struct * work)3985 static void hci_rx_work(struct work_struct *work)
3986 {
3987 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3988 struct sk_buff *skb;
3989
3990 BT_DBG("%s", hdev->name);
3991
3992 /* The kcov_remote functions used for collecting packet parsing
3993 * coverage information from this background thread and associate
3994 * the coverage with the syscall's thread which originally injected
3995 * the packet. This helps fuzzing the kernel.
3996 */
3997 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3998 kcov_remote_start_common(skb_get_kcov_handle(skb));
3999
4000 /* Send copy to monitor */
4001 hci_send_to_monitor(hdev, skb);
4002
4003 if (atomic_read(&hdev->promisc)) {
4004 /* Send copy to the sockets */
4005 hci_send_to_sock(hdev, skb);
4006 }
4007
4008 /* If the device has been opened in HCI_USER_CHANNEL,
4009 * the userspace has exclusive access to device.
4010 * When device is HCI_INIT, we still need to process
4011 * the data packets to the driver in order
4012 * to complete its setup().
4013 */
4014 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4015 !test_bit(HCI_INIT, &hdev->flags)) {
4016 kfree_skb(skb);
4017 continue;
4018 }
4019
4020 if (test_bit(HCI_INIT, &hdev->flags)) {
4021 /* Don't process data packets in this states. */
4022 switch (hci_skb_pkt_type(skb)) {
4023 case HCI_ACLDATA_PKT:
4024 case HCI_SCODATA_PKT:
4025 case HCI_ISODATA_PKT:
4026 kfree_skb(skb);
4027 continue;
4028 }
4029 }
4030
4031 /* Process frame */
4032 switch (hci_skb_pkt_type(skb)) {
4033 case HCI_EVENT_PKT:
4034 BT_DBG("%s Event packet", hdev->name);
4035 hci_event_packet(hdev, skb);
4036 break;
4037
4038 case HCI_ACLDATA_PKT:
4039 BT_DBG("%s ACL data packet", hdev->name);
4040 hci_acldata_packet(hdev, skb);
4041 break;
4042
4043 case HCI_SCODATA_PKT:
4044 BT_DBG("%s SCO data packet", hdev->name);
4045 hci_scodata_packet(hdev, skb);
4046 break;
4047
4048 case HCI_ISODATA_PKT:
4049 BT_DBG("%s ISO data packet", hdev->name);
4050 hci_isodata_packet(hdev, skb);
4051 break;
4052
4053 default:
4054 kfree_skb(skb);
4055 break;
4056 }
4057 }
4058 }
4059
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4060 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4061 {
4062 int err;
4063
4064 bt_dev_dbg(hdev, "skb %p", skb);
4065
4066 kfree_skb(hdev->sent_cmd);
4067
4068 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4069 if (!hdev->sent_cmd) {
4070 skb_queue_head(&hdev->cmd_q, skb);
4071 queue_work(hdev->workqueue, &hdev->cmd_work);
4072 return;
4073 }
4074
4075 err = hci_send_frame(hdev, skb);
4076 if (err < 0) {
4077 hci_cmd_sync_cancel_sync(hdev, -err);
4078 return;
4079 }
4080
4081 if (hdev->req_status == HCI_REQ_PEND &&
4082 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4083 kfree_skb(hdev->req_skb);
4084 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4085 }
4086
4087 atomic_dec(&hdev->cmd_cnt);
4088 }
4089
hci_cmd_work(struct work_struct * work)4090 static void hci_cmd_work(struct work_struct *work)
4091 {
4092 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4093 struct sk_buff *skb;
4094
4095 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4096 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4097
4098 /* Send queued commands */
4099 if (atomic_read(&hdev->cmd_cnt)) {
4100 skb = skb_dequeue(&hdev->cmd_q);
4101 if (!skb)
4102 return;
4103
4104 hci_send_cmd_sync(hdev, skb);
4105
4106 rcu_read_lock();
4107 if (test_bit(HCI_RESET, &hdev->flags) ||
4108 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4109 cancel_delayed_work(&hdev->cmd_timer);
4110 else
4111 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4112 HCI_CMD_TIMEOUT);
4113 rcu_read_unlock();
4114 }
4115 }
4116