xref: /linux/net/bluetooth/hci_core.c (revision cea465a96a294e7bc2537f27a737cfa7c6234b3d)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <linux/unaligned.h>
37 
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42 
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48 #include "hci_codec.h"
49 
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
53 
54 /* HCI device list */
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
57 
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
61 
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
64 
65 /* Get HCI device by index.
66  * Device is held on return. */
67 struct hci_dev *hci_dev_get(int index)
68 {
69 	struct hci_dev *hdev = NULL, *d;
70 
71 	BT_DBG("%d", index);
72 
73 	if (index < 0)
74 		return NULL;
75 
76 	read_lock(&hci_dev_list_lock);
77 	list_for_each_entry(d, &hci_dev_list, list) {
78 		if (d->id == index) {
79 			hdev = hci_dev_hold(d);
80 			break;
81 		}
82 	}
83 	read_unlock(&hci_dev_list_lock);
84 	return hdev;
85 }
86 
87 /* ---- Inquiry support ---- */
88 
89 bool hci_discovery_active(struct hci_dev *hdev)
90 {
91 	struct discovery_state *discov = &hdev->discovery;
92 
93 	switch (discov->state) {
94 	case DISCOVERY_FINDING:
95 	case DISCOVERY_RESOLVING:
96 		return true;
97 
98 	default:
99 		return false;
100 	}
101 }
102 
103 void hci_discovery_set_state(struct hci_dev *hdev, int state)
104 {
105 	int old_state = hdev->discovery.state;
106 
107 	if (old_state == state)
108 		return;
109 
110 	hdev->discovery.state = state;
111 
112 	switch (state) {
113 	case DISCOVERY_STOPPED:
114 		hci_update_passive_scan(hdev);
115 
116 		if (old_state != DISCOVERY_STARTING)
117 			mgmt_discovering(hdev, 0);
118 		break;
119 	case DISCOVERY_STARTING:
120 		break;
121 	case DISCOVERY_FINDING:
122 		mgmt_discovering(hdev, 1);
123 		break;
124 	case DISCOVERY_RESOLVING:
125 		break;
126 	case DISCOVERY_STOPPING:
127 		break;
128 	}
129 
130 	bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
131 }
132 
133 void hci_inquiry_cache_flush(struct hci_dev *hdev)
134 {
135 	struct discovery_state *cache = &hdev->discovery;
136 	struct inquiry_entry *p, *n;
137 
138 	list_for_each_entry_safe(p, n, &cache->all, all) {
139 		list_del(&p->all);
140 		kfree(p);
141 	}
142 
143 	INIT_LIST_HEAD(&cache->unknown);
144 	INIT_LIST_HEAD(&cache->resolve);
145 }
146 
147 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
148 					       bdaddr_t *bdaddr)
149 {
150 	struct discovery_state *cache = &hdev->discovery;
151 	struct inquiry_entry *e;
152 
153 	BT_DBG("cache %p, %pMR", cache, bdaddr);
154 
155 	list_for_each_entry(e, &cache->all, all) {
156 		if (!bacmp(&e->data.bdaddr, bdaddr))
157 			return e;
158 	}
159 
160 	return NULL;
161 }
162 
163 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
164 						       bdaddr_t *bdaddr)
165 {
166 	struct discovery_state *cache = &hdev->discovery;
167 	struct inquiry_entry *e;
168 
169 	BT_DBG("cache %p, %pMR", cache, bdaddr);
170 
171 	list_for_each_entry(e, &cache->unknown, list) {
172 		if (!bacmp(&e->data.bdaddr, bdaddr))
173 			return e;
174 	}
175 
176 	return NULL;
177 }
178 
179 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
180 						       bdaddr_t *bdaddr,
181 						       int state)
182 {
183 	struct discovery_state *cache = &hdev->discovery;
184 	struct inquiry_entry *e;
185 
186 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
187 
188 	list_for_each_entry(e, &cache->resolve, list) {
189 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
190 			return e;
191 		if (!bacmp(&e->data.bdaddr, bdaddr))
192 			return e;
193 	}
194 
195 	return NULL;
196 }
197 
198 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
199 				      struct inquiry_entry *ie)
200 {
201 	struct discovery_state *cache = &hdev->discovery;
202 	struct list_head *pos = &cache->resolve;
203 	struct inquiry_entry *p;
204 
205 	list_del(&ie->list);
206 
207 	list_for_each_entry(p, &cache->resolve, list) {
208 		if (p->name_state != NAME_PENDING &&
209 		    abs(p->data.rssi) >= abs(ie->data.rssi))
210 			break;
211 		pos = &p->list;
212 	}
213 
214 	list_add(&ie->list, pos);
215 }
216 
217 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
218 			     bool name_known)
219 {
220 	struct discovery_state *cache = &hdev->discovery;
221 	struct inquiry_entry *ie;
222 	u32 flags = 0;
223 
224 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
225 
226 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
227 
228 	if (!data->ssp_mode)
229 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
230 
231 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
232 	if (ie) {
233 		if (!ie->data.ssp_mode)
234 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
235 
236 		if (ie->name_state == NAME_NEEDED &&
237 		    data->rssi != ie->data.rssi) {
238 			ie->data.rssi = data->rssi;
239 			hci_inquiry_cache_update_resolve(hdev, ie);
240 		}
241 
242 		goto update;
243 	}
244 
245 	/* Entry not in the cache. Add new one. */
246 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
247 	if (!ie) {
248 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
249 		goto done;
250 	}
251 
252 	list_add(&ie->all, &cache->all);
253 
254 	if (name_known) {
255 		ie->name_state = NAME_KNOWN;
256 	} else {
257 		ie->name_state = NAME_NOT_KNOWN;
258 		list_add(&ie->list, &cache->unknown);
259 	}
260 
261 update:
262 	if (name_known && ie->name_state != NAME_KNOWN &&
263 	    ie->name_state != NAME_PENDING) {
264 		ie->name_state = NAME_KNOWN;
265 		list_del(&ie->list);
266 	}
267 
268 	memcpy(&ie->data, data, sizeof(*data));
269 	ie->timestamp = jiffies;
270 	cache->timestamp = jiffies;
271 
272 	if (ie->name_state == NAME_NOT_KNOWN)
273 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
274 
275 done:
276 	return flags;
277 }
278 
279 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
280 {
281 	struct discovery_state *cache = &hdev->discovery;
282 	struct inquiry_info *info = (struct inquiry_info *) buf;
283 	struct inquiry_entry *e;
284 	int copied = 0;
285 
286 	list_for_each_entry(e, &cache->all, all) {
287 		struct inquiry_data *data = &e->data;
288 
289 		if (copied >= num)
290 			break;
291 
292 		bacpy(&info->bdaddr, &data->bdaddr);
293 		info->pscan_rep_mode	= data->pscan_rep_mode;
294 		info->pscan_period_mode	= data->pscan_period_mode;
295 		info->pscan_mode	= data->pscan_mode;
296 		memcpy(info->dev_class, data->dev_class, 3);
297 		info->clock_offset	= data->clock_offset;
298 
299 		info++;
300 		copied++;
301 	}
302 
303 	BT_DBG("cache %p, copied %d", cache, copied);
304 	return copied;
305 }
306 
307 int hci_inquiry(void __user *arg)
308 {
309 	__u8 __user *ptr = arg;
310 	struct hci_inquiry_req ir;
311 	struct hci_dev *hdev;
312 	int err = 0, do_inquiry = 0, max_rsp;
313 	__u8 *buf;
314 
315 	if (copy_from_user(&ir, ptr, sizeof(ir)))
316 		return -EFAULT;
317 
318 	hdev = hci_dev_get(ir.dev_id);
319 	if (!hdev)
320 		return -ENODEV;
321 
322 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
323 		err = -EBUSY;
324 		goto done;
325 	}
326 
327 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
328 		err = -EOPNOTSUPP;
329 		goto done;
330 	}
331 
332 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
333 		err = -EOPNOTSUPP;
334 		goto done;
335 	}
336 
337 	/* Restrict maximum inquiry length to 60 seconds */
338 	if (ir.length > 60) {
339 		err = -EINVAL;
340 		goto done;
341 	}
342 
343 	hci_dev_lock(hdev);
344 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
345 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
346 		hci_inquiry_cache_flush(hdev);
347 		do_inquiry = 1;
348 	}
349 	hci_dev_unlock(hdev);
350 
351 	if (do_inquiry) {
352 		hci_req_sync_lock(hdev);
353 		err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
354 		hci_req_sync_unlock(hdev);
355 
356 		if (err < 0)
357 			goto done;
358 
359 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
360 		 * cleared). If it is interrupted by a signal, return -EINTR.
361 		 */
362 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
363 				TASK_INTERRUPTIBLE)) {
364 			err = -EINTR;
365 			goto done;
366 		}
367 	}
368 
369 	/* for unlimited number of responses we will use buffer with
370 	 * 255 entries
371 	 */
372 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
373 
374 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
375 	 * copy it to the user space.
376 	 */
377 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
378 	if (!buf) {
379 		err = -ENOMEM;
380 		goto done;
381 	}
382 
383 	hci_dev_lock(hdev);
384 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
385 	hci_dev_unlock(hdev);
386 
387 	BT_DBG("num_rsp %d", ir.num_rsp);
388 
389 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
390 		ptr += sizeof(ir);
391 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
392 				 ir.num_rsp))
393 			err = -EFAULT;
394 	} else
395 		err = -EFAULT;
396 
397 	kfree(buf);
398 
399 done:
400 	hci_dev_put(hdev);
401 	return err;
402 }
403 
404 static int hci_dev_do_open(struct hci_dev *hdev)
405 {
406 	int ret = 0;
407 
408 	BT_DBG("%s %p", hdev->name, hdev);
409 
410 	hci_req_sync_lock(hdev);
411 
412 	ret = hci_dev_open_sync(hdev);
413 
414 	hci_req_sync_unlock(hdev);
415 	return ret;
416 }
417 
418 /* ---- HCI ioctl helpers ---- */
419 
420 int hci_dev_open(__u16 dev)
421 {
422 	struct hci_dev *hdev;
423 	int err;
424 
425 	hdev = hci_dev_get(dev);
426 	if (!hdev)
427 		return -ENODEV;
428 
429 	/* Devices that are marked as unconfigured can only be powered
430 	 * up as user channel. Trying to bring them up as normal devices
431 	 * will result into a failure. Only user channel operation is
432 	 * possible.
433 	 *
434 	 * When this function is called for a user channel, the flag
435 	 * HCI_USER_CHANNEL will be set first before attempting to
436 	 * open the device.
437 	 */
438 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
439 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
440 		err = -EOPNOTSUPP;
441 		goto done;
442 	}
443 
444 	/* We need to ensure that no other power on/off work is pending
445 	 * before proceeding to call hci_dev_do_open. This is
446 	 * particularly important if the setup procedure has not yet
447 	 * completed.
448 	 */
449 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
450 		cancel_delayed_work(&hdev->power_off);
451 
452 	/* After this call it is guaranteed that the setup procedure
453 	 * has finished. This means that error conditions like RFKILL
454 	 * or no valid public or static random address apply.
455 	 */
456 	flush_workqueue(hdev->req_workqueue);
457 
458 	/* For controllers not using the management interface and that
459 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
460 	 * so that pairing works for them. Once the management interface
461 	 * is in use this bit will be cleared again and userspace has
462 	 * to explicitly enable it.
463 	 */
464 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
465 	    !hci_dev_test_flag(hdev, HCI_MGMT))
466 		hci_dev_set_flag(hdev, HCI_BONDABLE);
467 
468 	err = hci_dev_do_open(hdev);
469 
470 done:
471 	hci_dev_put(hdev);
472 	return err;
473 }
474 
475 int hci_dev_do_close(struct hci_dev *hdev)
476 {
477 	int err;
478 
479 	BT_DBG("%s %p", hdev->name, hdev);
480 
481 	hci_req_sync_lock(hdev);
482 
483 	err = hci_dev_close_sync(hdev);
484 
485 	hci_req_sync_unlock(hdev);
486 
487 	return err;
488 }
489 
490 int hci_dev_close(__u16 dev)
491 {
492 	struct hci_dev *hdev;
493 	int err;
494 
495 	hdev = hci_dev_get(dev);
496 	if (!hdev)
497 		return -ENODEV;
498 
499 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
500 		err = -EBUSY;
501 		goto done;
502 	}
503 
504 	cancel_work_sync(&hdev->power_on);
505 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
506 		cancel_delayed_work(&hdev->power_off);
507 
508 	err = hci_dev_do_close(hdev);
509 
510 done:
511 	hci_dev_put(hdev);
512 	return err;
513 }
514 
515 static int hci_dev_do_reset(struct hci_dev *hdev)
516 {
517 	int ret;
518 
519 	BT_DBG("%s %p", hdev->name, hdev);
520 
521 	hci_req_sync_lock(hdev);
522 
523 	/* Drop queues */
524 	skb_queue_purge(&hdev->rx_q);
525 	skb_queue_purge(&hdev->cmd_q);
526 
527 	/* Cancel these to avoid queueing non-chained pending work */
528 	hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
529 	/* Wait for
530 	 *
531 	 *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
532 	 *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
533 	 *
534 	 * inside RCU section to see the flag or complete scheduling.
535 	 */
536 	synchronize_rcu();
537 	/* Explicitly cancel works in case scheduled after setting the flag. */
538 	cancel_delayed_work(&hdev->cmd_timer);
539 	cancel_delayed_work(&hdev->ncmd_timer);
540 
541 	/* Avoid potential lockdep warnings from the *_flush() calls by
542 	 * ensuring the workqueue is empty up front.
543 	 */
544 	drain_workqueue(hdev->workqueue);
545 
546 	hci_dev_lock(hdev);
547 	hci_inquiry_cache_flush(hdev);
548 	hci_conn_hash_flush(hdev);
549 	hci_dev_unlock(hdev);
550 
551 	if (hdev->flush)
552 		hdev->flush(hdev);
553 
554 	hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
555 
556 	atomic_set(&hdev->cmd_cnt, 1);
557 	hdev->acl_cnt = 0;
558 	hdev->sco_cnt = 0;
559 	hdev->le_cnt = 0;
560 	hdev->iso_cnt = 0;
561 
562 	ret = hci_reset_sync(hdev);
563 
564 	hci_req_sync_unlock(hdev);
565 	return ret;
566 }
567 
568 int hci_dev_reset(__u16 dev)
569 {
570 	struct hci_dev *hdev;
571 	int err;
572 
573 	hdev = hci_dev_get(dev);
574 	if (!hdev)
575 		return -ENODEV;
576 
577 	if (!test_bit(HCI_UP, &hdev->flags)) {
578 		err = -ENETDOWN;
579 		goto done;
580 	}
581 
582 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
583 		err = -EBUSY;
584 		goto done;
585 	}
586 
587 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
588 		err = -EOPNOTSUPP;
589 		goto done;
590 	}
591 
592 	err = hci_dev_do_reset(hdev);
593 
594 done:
595 	hci_dev_put(hdev);
596 	return err;
597 }
598 
599 int hci_dev_reset_stat(__u16 dev)
600 {
601 	struct hci_dev *hdev;
602 	int ret = 0;
603 
604 	hdev = hci_dev_get(dev);
605 	if (!hdev)
606 		return -ENODEV;
607 
608 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
609 		ret = -EBUSY;
610 		goto done;
611 	}
612 
613 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
614 		ret = -EOPNOTSUPP;
615 		goto done;
616 	}
617 
618 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
619 
620 done:
621 	hci_dev_put(hdev);
622 	return ret;
623 }
624 
625 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
626 {
627 	bool conn_changed, discov_changed;
628 
629 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
630 
631 	if ((scan & SCAN_PAGE))
632 		conn_changed = !hci_dev_test_and_set_flag(hdev,
633 							  HCI_CONNECTABLE);
634 	else
635 		conn_changed = hci_dev_test_and_clear_flag(hdev,
636 							   HCI_CONNECTABLE);
637 
638 	if ((scan & SCAN_INQUIRY)) {
639 		discov_changed = !hci_dev_test_and_set_flag(hdev,
640 							    HCI_DISCOVERABLE);
641 	} else {
642 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
643 		discov_changed = hci_dev_test_and_clear_flag(hdev,
644 							     HCI_DISCOVERABLE);
645 	}
646 
647 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
648 		return;
649 
650 	if (conn_changed || discov_changed) {
651 		/* In case this was disabled through mgmt */
652 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
653 
654 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
655 			hci_update_adv_data(hdev, hdev->cur_adv_instance);
656 
657 		mgmt_new_settings(hdev);
658 	}
659 }
660 
661 int hci_dev_cmd(unsigned int cmd, void __user *arg)
662 {
663 	struct hci_dev *hdev;
664 	struct hci_dev_req dr;
665 	__le16 policy;
666 	int err = 0;
667 
668 	if (copy_from_user(&dr, arg, sizeof(dr)))
669 		return -EFAULT;
670 
671 	hdev = hci_dev_get(dr.dev_id);
672 	if (!hdev)
673 		return -ENODEV;
674 
675 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
676 		err = -EBUSY;
677 		goto done;
678 	}
679 
680 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
681 		err = -EOPNOTSUPP;
682 		goto done;
683 	}
684 
685 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
686 		err = -EOPNOTSUPP;
687 		goto done;
688 	}
689 
690 	switch (cmd) {
691 	case HCISETAUTH:
692 		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
693 					  1, &dr.dev_opt, HCI_CMD_TIMEOUT);
694 		break;
695 
696 	case HCISETENCRYPT:
697 		if (!lmp_encrypt_capable(hdev)) {
698 			err = -EOPNOTSUPP;
699 			break;
700 		}
701 
702 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
703 			/* Auth must be enabled first */
704 			err = hci_cmd_sync_status(hdev,
705 						  HCI_OP_WRITE_AUTH_ENABLE,
706 						  1, &dr.dev_opt,
707 						  HCI_CMD_TIMEOUT);
708 			if (err)
709 				break;
710 		}
711 
712 		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
713 					  1, &dr.dev_opt, HCI_CMD_TIMEOUT);
714 		break;
715 
716 	case HCISETSCAN:
717 		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
718 					  1, &dr.dev_opt, HCI_CMD_TIMEOUT);
719 
720 		/* Ensure that the connectable and discoverable states
721 		 * get correctly modified as this was a non-mgmt change.
722 		 */
723 		if (!err)
724 			hci_update_passive_scan_state(hdev, dr.dev_opt);
725 		break;
726 
727 	case HCISETLINKPOL:
728 		policy = cpu_to_le16(dr.dev_opt);
729 
730 		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
731 					  2, &policy, HCI_CMD_TIMEOUT);
732 		break;
733 
734 	case HCISETLINKMODE:
735 		hdev->link_mode = ((__u16) dr.dev_opt) &
736 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
737 		break;
738 
739 	case HCISETPTYPE:
740 		if (hdev->pkt_type == (__u16) dr.dev_opt)
741 			break;
742 
743 		hdev->pkt_type = (__u16) dr.dev_opt;
744 		mgmt_phy_configuration_changed(hdev, NULL);
745 		break;
746 
747 	case HCISETACLMTU:
748 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
749 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
750 		break;
751 
752 	case HCISETSCOMTU:
753 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
754 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
755 		break;
756 
757 	default:
758 		err = -EINVAL;
759 		break;
760 	}
761 
762 done:
763 	hci_dev_put(hdev);
764 	return err;
765 }
766 
767 int hci_get_dev_list(void __user *arg)
768 {
769 	struct hci_dev *hdev;
770 	struct hci_dev_list_req *dl;
771 	struct hci_dev_req *dr;
772 	int n = 0, err;
773 	__u16 dev_num;
774 
775 	if (get_user(dev_num, (__u16 __user *) arg))
776 		return -EFAULT;
777 
778 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
779 		return -EINVAL;
780 
781 	dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
782 	if (!dl)
783 		return -ENOMEM;
784 
785 	dl->dev_num = dev_num;
786 	dr = dl->dev_req;
787 
788 	read_lock(&hci_dev_list_lock);
789 	list_for_each_entry(hdev, &hci_dev_list, list) {
790 		unsigned long flags = hdev->flags;
791 
792 		/* When the auto-off is configured it means the transport
793 		 * is running, but in that case still indicate that the
794 		 * device is actually down.
795 		 */
796 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
797 			flags &= ~BIT(HCI_UP);
798 
799 		dr[n].dev_id  = hdev->id;
800 		dr[n].dev_opt = flags;
801 
802 		if (++n >= dev_num)
803 			break;
804 	}
805 	read_unlock(&hci_dev_list_lock);
806 
807 	dl->dev_num = n;
808 	err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
809 	kfree(dl);
810 
811 	return err ? -EFAULT : 0;
812 }
813 
814 int hci_get_dev_info(void __user *arg)
815 {
816 	struct hci_dev *hdev;
817 	struct hci_dev_info di;
818 	unsigned long flags;
819 	int err = 0;
820 
821 	if (copy_from_user(&di, arg, sizeof(di)))
822 		return -EFAULT;
823 
824 	hdev = hci_dev_get(di.dev_id);
825 	if (!hdev)
826 		return -ENODEV;
827 
828 	/* When the auto-off is configured it means the transport
829 	 * is running, but in that case still indicate that the
830 	 * device is actually down.
831 	 */
832 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
833 		flags = hdev->flags & ~BIT(HCI_UP);
834 	else
835 		flags = hdev->flags;
836 
837 	strscpy(di.name, hdev->name, sizeof(di.name));
838 	di.bdaddr   = hdev->bdaddr;
839 	di.type     = (hdev->bus & 0x0f);
840 	di.flags    = flags;
841 	di.pkt_type = hdev->pkt_type;
842 	if (lmp_bredr_capable(hdev)) {
843 		di.acl_mtu  = hdev->acl_mtu;
844 		di.acl_pkts = hdev->acl_pkts;
845 		di.sco_mtu  = hdev->sco_mtu;
846 		di.sco_pkts = hdev->sco_pkts;
847 	} else {
848 		di.acl_mtu  = hdev->le_mtu;
849 		di.acl_pkts = hdev->le_pkts;
850 		di.sco_mtu  = 0;
851 		di.sco_pkts = 0;
852 	}
853 	di.link_policy = hdev->link_policy;
854 	di.link_mode   = hdev->link_mode;
855 
856 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
857 	memcpy(&di.features, &hdev->features, sizeof(di.features));
858 
859 	if (copy_to_user(arg, &di, sizeof(di)))
860 		err = -EFAULT;
861 
862 	hci_dev_put(hdev);
863 
864 	return err;
865 }
866 
867 /* ---- Interface to HCI drivers ---- */
868 
869 static int hci_dev_do_poweroff(struct hci_dev *hdev)
870 {
871 	int err;
872 
873 	BT_DBG("%s %p", hdev->name, hdev);
874 
875 	hci_req_sync_lock(hdev);
876 
877 	err = hci_set_powered_sync(hdev, false);
878 
879 	hci_req_sync_unlock(hdev);
880 
881 	return err;
882 }
883 
884 static int hci_rfkill_set_block(void *data, bool blocked)
885 {
886 	struct hci_dev *hdev = data;
887 	int err;
888 
889 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
890 
891 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
892 		return -EBUSY;
893 
894 	if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
895 		return 0;
896 
897 	if (blocked) {
898 		hci_dev_set_flag(hdev, HCI_RFKILLED);
899 
900 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
901 		    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
902 			err = hci_dev_do_poweroff(hdev);
903 			if (err) {
904 				bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
905 					   err);
906 
907 				/* Make sure the device is still closed even if
908 				 * anything during power off sequence (eg.
909 				 * disconnecting devices) failed.
910 				 */
911 				hci_dev_do_close(hdev);
912 			}
913 		}
914 	} else {
915 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
916 	}
917 
918 	return 0;
919 }
920 
921 static const struct rfkill_ops hci_rfkill_ops = {
922 	.set_block = hci_rfkill_set_block,
923 };
924 
925 static void hci_power_on(struct work_struct *work)
926 {
927 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
928 	int err;
929 
930 	BT_DBG("%s", hdev->name);
931 
932 	if (test_bit(HCI_UP, &hdev->flags) &&
933 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
934 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
935 		cancel_delayed_work(&hdev->power_off);
936 		err = hci_powered_update_sync(hdev);
937 		mgmt_power_on(hdev, err);
938 		return;
939 	}
940 
941 	err = hci_dev_do_open(hdev);
942 	if (err < 0) {
943 		hci_dev_lock(hdev);
944 		mgmt_set_powered_failed(hdev, err);
945 		hci_dev_unlock(hdev);
946 		return;
947 	}
948 
949 	/* During the HCI setup phase, a few error conditions are
950 	 * ignored and they need to be checked now. If they are still
951 	 * valid, it is important to turn the device back off.
952 	 */
953 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
954 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
955 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
956 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
957 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
958 		hci_dev_do_close(hdev);
959 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
960 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
961 				   HCI_AUTO_OFF_TIMEOUT);
962 	}
963 
964 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
965 		/* For unconfigured devices, set the HCI_RAW flag
966 		 * so that userspace can easily identify them.
967 		 */
968 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
969 			set_bit(HCI_RAW, &hdev->flags);
970 
971 		/* For fully configured devices, this will send
972 		 * the Index Added event. For unconfigured devices,
973 		 * it will send Unconfigued Index Added event.
974 		 *
975 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
976 		 * and no event will be send.
977 		 */
978 		mgmt_index_added(hdev);
979 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
980 		/* When the controller is now configured, then it
981 		 * is important to clear the HCI_RAW flag.
982 		 */
983 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
984 			clear_bit(HCI_RAW, &hdev->flags);
985 
986 		/* Powering on the controller with HCI_CONFIG set only
987 		 * happens with the transition from unconfigured to
988 		 * configured. This will send the Index Added event.
989 		 */
990 		mgmt_index_added(hdev);
991 	}
992 }
993 
994 static void hci_power_off(struct work_struct *work)
995 {
996 	struct hci_dev *hdev = container_of(work, struct hci_dev,
997 					    power_off.work);
998 
999 	BT_DBG("%s", hdev->name);
1000 
1001 	hci_dev_do_close(hdev);
1002 }
1003 
1004 static void hci_error_reset(struct work_struct *work)
1005 {
1006 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1007 
1008 	hci_dev_hold(hdev);
1009 	BT_DBG("%s", hdev->name);
1010 
1011 	if (hdev->hw_error)
1012 		hdev->hw_error(hdev, hdev->hw_error_code);
1013 	else
1014 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1015 
1016 	if (!hci_dev_do_close(hdev))
1017 		hci_dev_do_open(hdev);
1018 
1019 	hci_dev_put(hdev);
1020 }
1021 
1022 void hci_uuids_clear(struct hci_dev *hdev)
1023 {
1024 	struct bt_uuid *uuid, *tmp;
1025 
1026 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1027 		list_del(&uuid->list);
1028 		kfree(uuid);
1029 	}
1030 }
1031 
1032 void hci_link_keys_clear(struct hci_dev *hdev)
1033 {
1034 	struct link_key *key, *tmp;
1035 
1036 	list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1037 		list_del_rcu(&key->list);
1038 		kfree_rcu(key, rcu);
1039 	}
1040 }
1041 
1042 void hci_smp_ltks_clear(struct hci_dev *hdev)
1043 {
1044 	struct smp_ltk *k, *tmp;
1045 
1046 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1047 		list_del_rcu(&k->list);
1048 		kfree_rcu(k, rcu);
1049 	}
1050 }
1051 
1052 void hci_smp_irks_clear(struct hci_dev *hdev)
1053 {
1054 	struct smp_irk *k, *tmp;
1055 
1056 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1057 		list_del_rcu(&k->list);
1058 		kfree_rcu(k, rcu);
1059 	}
1060 }
1061 
1062 void hci_blocked_keys_clear(struct hci_dev *hdev)
1063 {
1064 	struct blocked_key *b, *tmp;
1065 
1066 	list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1067 		list_del_rcu(&b->list);
1068 		kfree_rcu(b, rcu);
1069 	}
1070 }
1071 
1072 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1073 {
1074 	bool blocked = false;
1075 	struct blocked_key *b;
1076 
1077 	rcu_read_lock();
1078 	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1079 		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1080 			blocked = true;
1081 			break;
1082 		}
1083 	}
1084 
1085 	rcu_read_unlock();
1086 	return blocked;
1087 }
1088 
1089 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1090 {
1091 	struct link_key *k;
1092 
1093 	rcu_read_lock();
1094 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1095 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
1096 			rcu_read_unlock();
1097 
1098 			if (hci_is_blocked_key(hdev,
1099 					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
1100 					       k->val)) {
1101 				bt_dev_warn_ratelimited(hdev,
1102 							"Link key blocked for %pMR",
1103 							&k->bdaddr);
1104 				return NULL;
1105 			}
1106 
1107 			return k;
1108 		}
1109 	}
1110 	rcu_read_unlock();
1111 
1112 	return NULL;
1113 }
1114 
1115 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1116 			       u8 key_type, u8 old_key_type)
1117 {
1118 	/* Legacy key */
1119 	if (key_type < 0x03)
1120 		return true;
1121 
1122 	/* Debug keys are insecure so don't store them persistently */
1123 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1124 		return false;
1125 
1126 	/* Changed combination key and there's no previous one */
1127 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1128 		return false;
1129 
1130 	/* Security mode 3 case */
1131 	if (!conn)
1132 		return true;
1133 
1134 	/* BR/EDR key derived using SC from an LE link */
1135 	if (conn->type == LE_LINK)
1136 		return true;
1137 
1138 	/* Neither local nor remote side had no-bonding as requirement */
1139 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1140 		return true;
1141 
1142 	/* Local side had dedicated bonding as requirement */
1143 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1144 		return true;
1145 
1146 	/* Remote side had dedicated bonding as requirement */
1147 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1148 		return true;
1149 
1150 	/* If none of the above criteria match, then don't store the key
1151 	 * persistently */
1152 	return false;
1153 }
1154 
1155 static u8 ltk_role(u8 type)
1156 {
1157 	if (type == SMP_LTK)
1158 		return HCI_ROLE_MASTER;
1159 
1160 	return HCI_ROLE_SLAVE;
1161 }
1162 
1163 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1164 			     u8 addr_type, u8 role)
1165 {
1166 	struct smp_ltk *k;
1167 
1168 	rcu_read_lock();
1169 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1170 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1171 			continue;
1172 
1173 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1174 			rcu_read_unlock();
1175 
1176 			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1177 					       k->val)) {
1178 				bt_dev_warn_ratelimited(hdev,
1179 							"LTK blocked for %pMR",
1180 							&k->bdaddr);
1181 				return NULL;
1182 			}
1183 
1184 			return k;
1185 		}
1186 	}
1187 	rcu_read_unlock();
1188 
1189 	return NULL;
1190 }
1191 
1192 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1193 {
1194 	struct smp_irk *irk_to_return = NULL;
1195 	struct smp_irk *irk;
1196 
1197 	rcu_read_lock();
1198 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1199 		if (!bacmp(&irk->rpa, rpa)) {
1200 			irk_to_return = irk;
1201 			goto done;
1202 		}
1203 	}
1204 
1205 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1206 		if (smp_irk_matches(hdev, irk->val, rpa)) {
1207 			bacpy(&irk->rpa, rpa);
1208 			irk_to_return = irk;
1209 			goto done;
1210 		}
1211 	}
1212 
1213 done:
1214 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1215 						irk_to_return->val)) {
1216 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1217 					&irk_to_return->bdaddr);
1218 		irk_to_return = NULL;
1219 	}
1220 
1221 	rcu_read_unlock();
1222 
1223 	return irk_to_return;
1224 }
1225 
1226 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1227 				     u8 addr_type)
1228 {
1229 	struct smp_irk *irk_to_return = NULL;
1230 	struct smp_irk *irk;
1231 
1232 	/* Identity Address must be public or static random */
1233 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1234 		return NULL;
1235 
1236 	rcu_read_lock();
1237 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1238 		if (addr_type == irk->addr_type &&
1239 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
1240 			irk_to_return = irk;
1241 			goto done;
1242 		}
1243 	}
1244 
1245 done:
1246 
1247 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1248 						irk_to_return->val)) {
1249 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1250 					&irk_to_return->bdaddr);
1251 		irk_to_return = NULL;
1252 	}
1253 
1254 	rcu_read_unlock();
1255 
1256 	return irk_to_return;
1257 }
1258 
1259 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1260 				  bdaddr_t *bdaddr, u8 *val, u8 type,
1261 				  u8 pin_len, bool *persistent)
1262 {
1263 	struct link_key *key, *old_key;
1264 	u8 old_key_type;
1265 
1266 	old_key = hci_find_link_key(hdev, bdaddr);
1267 	if (old_key) {
1268 		old_key_type = old_key->type;
1269 		key = old_key;
1270 	} else {
1271 		old_key_type = conn ? conn->key_type : 0xff;
1272 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1273 		if (!key)
1274 			return NULL;
1275 		list_add_rcu(&key->list, &hdev->link_keys);
1276 	}
1277 
1278 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1279 
1280 	/* Some buggy controller combinations generate a changed
1281 	 * combination key for legacy pairing even when there's no
1282 	 * previous key */
1283 	if (type == HCI_LK_CHANGED_COMBINATION &&
1284 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1285 		type = HCI_LK_COMBINATION;
1286 		if (conn)
1287 			conn->key_type = type;
1288 	}
1289 
1290 	bacpy(&key->bdaddr, bdaddr);
1291 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1292 	key->pin_len = pin_len;
1293 
1294 	if (type == HCI_LK_CHANGED_COMBINATION)
1295 		key->type = old_key_type;
1296 	else
1297 		key->type = type;
1298 
1299 	if (persistent)
1300 		*persistent = hci_persistent_key(hdev, conn, type,
1301 						 old_key_type);
1302 
1303 	return key;
1304 }
1305 
1306 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1307 			    u8 addr_type, u8 type, u8 authenticated,
1308 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1309 {
1310 	struct smp_ltk *key, *old_key;
1311 	u8 role = ltk_role(type);
1312 
1313 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1314 	if (old_key)
1315 		key = old_key;
1316 	else {
1317 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1318 		if (!key)
1319 			return NULL;
1320 		list_add_rcu(&key->list, &hdev->long_term_keys);
1321 	}
1322 
1323 	bacpy(&key->bdaddr, bdaddr);
1324 	key->bdaddr_type = addr_type;
1325 	memcpy(key->val, tk, sizeof(key->val));
1326 	key->authenticated = authenticated;
1327 	key->ediv = ediv;
1328 	key->rand = rand;
1329 	key->enc_size = enc_size;
1330 	key->type = type;
1331 
1332 	return key;
1333 }
1334 
1335 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1336 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
1337 {
1338 	struct smp_irk *irk;
1339 
1340 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1341 	if (!irk) {
1342 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1343 		if (!irk)
1344 			return NULL;
1345 
1346 		bacpy(&irk->bdaddr, bdaddr);
1347 		irk->addr_type = addr_type;
1348 
1349 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1350 	}
1351 
1352 	memcpy(irk->val, val, 16);
1353 	bacpy(&irk->rpa, rpa);
1354 
1355 	return irk;
1356 }
1357 
1358 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1359 {
1360 	struct link_key *key;
1361 
1362 	key = hci_find_link_key(hdev, bdaddr);
1363 	if (!key)
1364 		return -ENOENT;
1365 
1366 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1367 
1368 	list_del_rcu(&key->list);
1369 	kfree_rcu(key, rcu);
1370 
1371 	return 0;
1372 }
1373 
1374 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1375 {
1376 	struct smp_ltk *k, *tmp;
1377 	int removed = 0;
1378 
1379 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1380 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1381 			continue;
1382 
1383 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1384 
1385 		list_del_rcu(&k->list);
1386 		kfree_rcu(k, rcu);
1387 		removed++;
1388 	}
1389 
1390 	return removed ? 0 : -ENOENT;
1391 }
1392 
1393 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1394 {
1395 	struct smp_irk *k, *tmp;
1396 
1397 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1398 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1399 			continue;
1400 
1401 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1402 
1403 		list_del_rcu(&k->list);
1404 		kfree_rcu(k, rcu);
1405 	}
1406 }
1407 
1408 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1409 {
1410 	struct smp_ltk *k;
1411 	struct smp_irk *irk;
1412 	u8 addr_type;
1413 
1414 	if (type == BDADDR_BREDR) {
1415 		if (hci_find_link_key(hdev, bdaddr))
1416 			return true;
1417 		return false;
1418 	}
1419 
1420 	/* Convert to HCI addr type which struct smp_ltk uses */
1421 	if (type == BDADDR_LE_PUBLIC)
1422 		addr_type = ADDR_LE_DEV_PUBLIC;
1423 	else
1424 		addr_type = ADDR_LE_DEV_RANDOM;
1425 
1426 	irk = hci_get_irk(hdev, bdaddr, addr_type);
1427 	if (irk) {
1428 		bdaddr = &irk->bdaddr;
1429 		addr_type = irk->addr_type;
1430 	}
1431 
1432 	rcu_read_lock();
1433 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1434 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1435 			rcu_read_unlock();
1436 			return true;
1437 		}
1438 	}
1439 	rcu_read_unlock();
1440 
1441 	return false;
1442 }
1443 
1444 /* HCI command timer function */
1445 static void hci_cmd_timeout(struct work_struct *work)
1446 {
1447 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1448 					    cmd_timer.work);
1449 
1450 	if (hdev->req_skb) {
1451 		u16 opcode = hci_skb_opcode(hdev->req_skb);
1452 
1453 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1454 
1455 		hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1456 	} else {
1457 		bt_dev_err(hdev, "command tx timeout");
1458 	}
1459 
1460 	if (hdev->reset)
1461 		hdev->reset(hdev);
1462 
1463 	atomic_set(&hdev->cmd_cnt, 1);
1464 	queue_work(hdev->workqueue, &hdev->cmd_work);
1465 }
1466 
1467 /* HCI ncmd timer function */
1468 static void hci_ncmd_timeout(struct work_struct *work)
1469 {
1470 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1471 					    ncmd_timer.work);
1472 
1473 	bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1474 
1475 	/* During HCI_INIT phase no events can be injected if the ncmd timer
1476 	 * triggers since the procedure has its own timeout handling.
1477 	 */
1478 	if (test_bit(HCI_INIT, &hdev->flags))
1479 		return;
1480 
1481 	/* This is an irrecoverable state, inject hardware error event */
1482 	hci_reset_dev(hdev);
1483 }
1484 
1485 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1486 					  bdaddr_t *bdaddr, u8 bdaddr_type)
1487 {
1488 	struct oob_data *data;
1489 
1490 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
1491 		if (bacmp(bdaddr, &data->bdaddr) != 0)
1492 			continue;
1493 		if (data->bdaddr_type != bdaddr_type)
1494 			continue;
1495 		return data;
1496 	}
1497 
1498 	return NULL;
1499 }
1500 
1501 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1502 			       u8 bdaddr_type)
1503 {
1504 	struct oob_data *data;
1505 
1506 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1507 	if (!data)
1508 		return -ENOENT;
1509 
1510 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1511 
1512 	list_del(&data->list);
1513 	kfree(data);
1514 
1515 	return 0;
1516 }
1517 
1518 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1519 {
1520 	struct oob_data *data, *n;
1521 
1522 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1523 		list_del(&data->list);
1524 		kfree(data);
1525 	}
1526 }
1527 
1528 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1529 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1530 			    u8 *hash256, u8 *rand256)
1531 {
1532 	struct oob_data *data;
1533 
1534 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1535 	if (!data) {
1536 		data = kmalloc(sizeof(*data), GFP_KERNEL);
1537 		if (!data)
1538 			return -ENOMEM;
1539 
1540 		bacpy(&data->bdaddr, bdaddr);
1541 		data->bdaddr_type = bdaddr_type;
1542 		list_add(&data->list, &hdev->remote_oob_data);
1543 	}
1544 
1545 	if (hash192 && rand192) {
1546 		memcpy(data->hash192, hash192, sizeof(data->hash192));
1547 		memcpy(data->rand192, rand192, sizeof(data->rand192));
1548 		if (hash256 && rand256)
1549 			data->present = 0x03;
1550 	} else {
1551 		memset(data->hash192, 0, sizeof(data->hash192));
1552 		memset(data->rand192, 0, sizeof(data->rand192));
1553 		if (hash256 && rand256)
1554 			data->present = 0x02;
1555 		else
1556 			data->present = 0x00;
1557 	}
1558 
1559 	if (hash256 && rand256) {
1560 		memcpy(data->hash256, hash256, sizeof(data->hash256));
1561 		memcpy(data->rand256, rand256, sizeof(data->rand256));
1562 	} else {
1563 		memset(data->hash256, 0, sizeof(data->hash256));
1564 		memset(data->rand256, 0, sizeof(data->rand256));
1565 		if (hash192 && rand192)
1566 			data->present = 0x01;
1567 	}
1568 
1569 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1570 
1571 	return 0;
1572 }
1573 
1574 /* This function requires the caller holds hdev->lock */
1575 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1576 {
1577 	struct adv_info *adv_instance;
1578 
1579 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1580 		if (adv_instance->instance == instance)
1581 			return adv_instance;
1582 	}
1583 
1584 	return NULL;
1585 }
1586 
1587 /* This function requires the caller holds hdev->lock */
1588 struct adv_info *hci_find_adv_sid(struct hci_dev *hdev, u8 sid)
1589 {
1590 	struct adv_info *adv;
1591 
1592 	list_for_each_entry(adv, &hdev->adv_instances, list) {
1593 		if (adv->sid == sid)
1594 			return adv;
1595 	}
1596 
1597 	return NULL;
1598 }
1599 
1600 /* This function requires the caller holds hdev->lock */
1601 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1602 {
1603 	struct adv_info *cur_instance;
1604 
1605 	cur_instance = hci_find_adv_instance(hdev, instance);
1606 	if (!cur_instance)
1607 		return NULL;
1608 
1609 	if (cur_instance == list_last_entry(&hdev->adv_instances,
1610 					    struct adv_info, list))
1611 		return list_first_entry(&hdev->adv_instances,
1612 						 struct adv_info, list);
1613 	else
1614 		return list_next_entry(cur_instance, list);
1615 }
1616 
1617 /* This function requires the caller holds hdev->lock */
1618 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1619 {
1620 	struct adv_info *adv_instance;
1621 
1622 	adv_instance = hci_find_adv_instance(hdev, instance);
1623 	if (!adv_instance)
1624 		return -ENOENT;
1625 
1626 	BT_DBG("%s removing %dMR", hdev->name, instance);
1627 
1628 	if (hdev->cur_adv_instance == instance) {
1629 		if (hdev->adv_instance_timeout) {
1630 			cancel_delayed_work(&hdev->adv_instance_expire);
1631 			hdev->adv_instance_timeout = 0;
1632 		}
1633 		hdev->cur_adv_instance = 0x00;
1634 	}
1635 
1636 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1637 
1638 	list_del(&adv_instance->list);
1639 	kfree(adv_instance);
1640 
1641 	hdev->adv_instance_cnt--;
1642 
1643 	return 0;
1644 }
1645 
1646 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1647 {
1648 	struct adv_info *adv_instance, *n;
1649 
1650 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1651 		adv_instance->rpa_expired = rpa_expired;
1652 }
1653 
1654 /* This function requires the caller holds hdev->lock */
1655 void hci_adv_instances_clear(struct hci_dev *hdev)
1656 {
1657 	struct adv_info *adv_instance, *n;
1658 
1659 	if (hdev->adv_instance_timeout) {
1660 		disable_delayed_work(&hdev->adv_instance_expire);
1661 		hdev->adv_instance_timeout = 0;
1662 	}
1663 
1664 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1665 		disable_delayed_work_sync(&adv_instance->rpa_expired_cb);
1666 		list_del(&adv_instance->list);
1667 		kfree(adv_instance);
1668 	}
1669 
1670 	hdev->adv_instance_cnt = 0;
1671 	hdev->cur_adv_instance = 0x00;
1672 }
1673 
1674 static void adv_instance_rpa_expired(struct work_struct *work)
1675 {
1676 	struct adv_info *adv_instance = container_of(work, struct adv_info,
1677 						     rpa_expired_cb.work);
1678 
1679 	BT_DBG("");
1680 
1681 	adv_instance->rpa_expired = true;
1682 }
1683 
1684 /* This function requires the caller holds hdev->lock */
1685 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1686 				      u32 flags, u16 adv_data_len, u8 *adv_data,
1687 				      u16 scan_rsp_len, u8 *scan_rsp_data,
1688 				      u16 timeout, u16 duration, s8 tx_power,
1689 				      u32 min_interval, u32 max_interval,
1690 				      u8 mesh_handle)
1691 {
1692 	struct adv_info *adv;
1693 
1694 	adv = hci_find_adv_instance(hdev, instance);
1695 	if (adv) {
1696 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1697 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1698 		memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1699 	} else {
1700 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1701 		    instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1702 			return ERR_PTR(-EOVERFLOW);
1703 
1704 		adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1705 		if (!adv)
1706 			return ERR_PTR(-ENOMEM);
1707 
1708 		adv->pending = true;
1709 		adv->instance = instance;
1710 
1711 		/* If controller support only one set and the instance is set to
1712 		 * 1 then there is no option other than using handle 0x00.
1713 		 */
1714 		if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1715 			adv->handle = 0x00;
1716 		else
1717 			adv->handle = instance;
1718 
1719 		list_add(&adv->list, &hdev->adv_instances);
1720 		hdev->adv_instance_cnt++;
1721 	}
1722 
1723 	adv->flags = flags;
1724 	adv->min_interval = min_interval;
1725 	adv->max_interval = max_interval;
1726 	adv->tx_power = tx_power;
1727 	/* Defining a mesh_handle changes the timing units to ms,
1728 	 * rather than seconds, and ties the instance to the requested
1729 	 * mesh_tx queue.
1730 	 */
1731 	adv->mesh = mesh_handle;
1732 
1733 	hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1734 				  scan_rsp_len, scan_rsp_data);
1735 
1736 	adv->timeout = timeout;
1737 	adv->remaining_time = timeout;
1738 
1739 	if (duration == 0)
1740 		adv->duration = hdev->def_multi_adv_rotation_duration;
1741 	else
1742 		adv->duration = duration;
1743 
1744 	INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1745 
1746 	BT_DBG("%s for %dMR", hdev->name, instance);
1747 
1748 	return adv;
1749 }
1750 
1751 /* This function requires the caller holds hdev->lock */
1752 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u8 sid,
1753 				      u32 flags, u8 data_len, u8 *data,
1754 				      u32 min_interval, u32 max_interval)
1755 {
1756 	struct adv_info *adv;
1757 
1758 	adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1759 				   0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1760 				   min_interval, max_interval, 0);
1761 	if (IS_ERR(adv))
1762 		return adv;
1763 
1764 	adv->sid = sid;
1765 	adv->periodic = true;
1766 	adv->per_adv_data_len = data_len;
1767 
1768 	if (data)
1769 		memcpy(adv->per_adv_data, data, data_len);
1770 
1771 	return adv;
1772 }
1773 
1774 /* This function requires the caller holds hdev->lock */
1775 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1776 			      u16 adv_data_len, u8 *adv_data,
1777 			      u16 scan_rsp_len, u8 *scan_rsp_data)
1778 {
1779 	struct adv_info *adv;
1780 
1781 	adv = hci_find_adv_instance(hdev, instance);
1782 
1783 	/* If advertisement doesn't exist, we can't modify its data */
1784 	if (!adv)
1785 		return -ENOENT;
1786 
1787 	if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1788 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1789 		memcpy(adv->adv_data, adv_data, adv_data_len);
1790 		adv->adv_data_len = adv_data_len;
1791 		adv->adv_data_changed = true;
1792 	}
1793 
1794 	if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1795 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1796 		memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1797 		adv->scan_rsp_len = scan_rsp_len;
1798 		adv->scan_rsp_changed = true;
1799 	}
1800 
1801 	/* Mark as changed if there are flags which would affect it */
1802 	if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1803 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1804 		adv->scan_rsp_changed = true;
1805 
1806 	return 0;
1807 }
1808 
1809 /* This function requires the caller holds hdev->lock */
1810 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1811 {
1812 	u32 flags;
1813 	struct adv_info *adv;
1814 
1815 	if (instance == 0x00) {
1816 		/* Instance 0 always manages the "Tx Power" and "Flags"
1817 		 * fields
1818 		 */
1819 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1820 
1821 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1822 		 * corresponds to the "connectable" instance flag.
1823 		 */
1824 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1825 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1826 
1827 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1828 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1829 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1830 			flags |= MGMT_ADV_FLAG_DISCOV;
1831 
1832 		return flags;
1833 	}
1834 
1835 	adv = hci_find_adv_instance(hdev, instance);
1836 
1837 	/* Return 0 when we got an invalid instance identifier. */
1838 	if (!adv)
1839 		return 0;
1840 
1841 	return adv->flags;
1842 }
1843 
1844 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1845 {
1846 	struct adv_info *adv;
1847 
1848 	/* Instance 0x00 always set local name */
1849 	if (instance == 0x00)
1850 		return true;
1851 
1852 	adv = hci_find_adv_instance(hdev, instance);
1853 	if (!adv)
1854 		return false;
1855 
1856 	if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1857 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1858 		return true;
1859 
1860 	return adv->scan_rsp_len ? true : false;
1861 }
1862 
1863 /* This function requires the caller holds hdev->lock */
1864 void hci_adv_monitors_clear(struct hci_dev *hdev)
1865 {
1866 	struct adv_monitor *monitor;
1867 	int handle;
1868 
1869 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1870 		hci_free_adv_monitor(hdev, monitor);
1871 
1872 	idr_destroy(&hdev->adv_monitors_idr);
1873 }
1874 
1875 /* Frees the monitor structure and do some bookkeepings.
1876  * This function requires the caller holds hdev->lock.
1877  */
1878 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1879 {
1880 	struct adv_pattern *pattern;
1881 	struct adv_pattern *tmp;
1882 
1883 	if (!monitor)
1884 		return;
1885 
1886 	list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1887 		list_del(&pattern->list);
1888 		kfree(pattern);
1889 	}
1890 
1891 	if (monitor->handle)
1892 		idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1893 
1894 	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED)
1895 		hdev->adv_monitors_cnt--;
1896 
1897 	kfree(monitor);
1898 }
1899 
1900 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1901  * also attempts to forward the request to the controller.
1902  * This function requires the caller holds hci_req_sync_lock.
1903  */
1904 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1905 {
1906 	int min, max, handle;
1907 	int status = 0;
1908 
1909 	if (!monitor)
1910 		return -EINVAL;
1911 
1912 	hci_dev_lock(hdev);
1913 
1914 	min = HCI_MIN_ADV_MONITOR_HANDLE;
1915 	max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1916 	handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1917 			   GFP_KERNEL);
1918 
1919 	hci_dev_unlock(hdev);
1920 
1921 	if (handle < 0)
1922 		return handle;
1923 
1924 	monitor->handle = handle;
1925 
1926 	if (!hdev_is_powered(hdev))
1927 		return status;
1928 
1929 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1930 	case HCI_ADV_MONITOR_EXT_NONE:
1931 		bt_dev_dbg(hdev, "add monitor %d status %d",
1932 			   monitor->handle, status);
1933 		/* Message was not forwarded to controller - not an error */
1934 		break;
1935 
1936 	case HCI_ADV_MONITOR_EXT_MSFT:
1937 		status = msft_add_monitor_pattern(hdev, monitor);
1938 		bt_dev_dbg(hdev, "add monitor %d msft status %d",
1939 			   handle, status);
1940 		break;
1941 	}
1942 
1943 	return status;
1944 }
1945 
1946 /* Attempts to tell the controller and free the monitor. If somehow the
1947  * controller doesn't have a corresponding handle, remove anyway.
1948  * This function requires the caller holds hci_req_sync_lock.
1949  */
1950 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1951 				  struct adv_monitor *monitor)
1952 {
1953 	int status = 0;
1954 	int handle;
1955 
1956 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1957 	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1958 		bt_dev_dbg(hdev, "remove monitor %d status %d",
1959 			   monitor->handle, status);
1960 		goto free_monitor;
1961 
1962 	case HCI_ADV_MONITOR_EXT_MSFT:
1963 		handle = monitor->handle;
1964 		status = msft_remove_monitor(hdev, monitor);
1965 		bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1966 			   handle, status);
1967 		break;
1968 	}
1969 
1970 	/* In case no matching handle registered, just free the monitor */
1971 	if (status == -ENOENT)
1972 		goto free_monitor;
1973 
1974 	return status;
1975 
1976 free_monitor:
1977 	if (status == -ENOENT)
1978 		bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1979 			    monitor->handle);
1980 	hci_free_adv_monitor(hdev, monitor);
1981 
1982 	return status;
1983 }
1984 
1985 /* This function requires the caller holds hci_req_sync_lock */
1986 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1987 {
1988 	struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1989 
1990 	if (!monitor)
1991 		return -EINVAL;
1992 
1993 	return hci_remove_adv_monitor(hdev, monitor);
1994 }
1995 
1996 /* This function requires the caller holds hci_req_sync_lock */
1997 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1998 {
1999 	struct adv_monitor *monitor;
2000 	int idr_next_id = 0;
2001 	int status = 0;
2002 
2003 	while (1) {
2004 		monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2005 		if (!monitor)
2006 			break;
2007 
2008 		status = hci_remove_adv_monitor(hdev, monitor);
2009 		if (status)
2010 			return status;
2011 
2012 		idr_next_id++;
2013 	}
2014 
2015 	return status;
2016 }
2017 
2018 /* This function requires the caller holds hdev->lock */
2019 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2020 {
2021 	return !idr_is_empty(&hdev->adv_monitors_idr);
2022 }
2023 
2024 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2025 {
2026 	if (msft_monitor_supported(hdev))
2027 		return HCI_ADV_MONITOR_EXT_MSFT;
2028 
2029 	return HCI_ADV_MONITOR_EXT_NONE;
2030 }
2031 
2032 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2033 					 bdaddr_t *bdaddr, u8 type)
2034 {
2035 	struct bdaddr_list *b;
2036 
2037 	list_for_each_entry(b, bdaddr_list, list) {
2038 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2039 			return b;
2040 	}
2041 
2042 	return NULL;
2043 }
2044 
2045 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2046 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2047 				u8 type)
2048 {
2049 	struct bdaddr_list_with_irk *b;
2050 
2051 	list_for_each_entry(b, bdaddr_list, list) {
2052 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2053 			return b;
2054 	}
2055 
2056 	return NULL;
2057 }
2058 
2059 struct bdaddr_list_with_flags *
2060 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2061 				  bdaddr_t *bdaddr, u8 type)
2062 {
2063 	struct bdaddr_list_with_flags *b;
2064 
2065 	list_for_each_entry(b, bdaddr_list, list) {
2066 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2067 			return b;
2068 	}
2069 
2070 	return NULL;
2071 }
2072 
2073 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2074 {
2075 	struct bdaddr_list *b, *n;
2076 
2077 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2078 		list_del(&b->list);
2079 		kfree(b);
2080 	}
2081 }
2082 
2083 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2084 {
2085 	struct bdaddr_list *entry;
2086 
2087 	if (!bacmp(bdaddr, BDADDR_ANY))
2088 		return -EBADF;
2089 
2090 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2091 		return -EEXIST;
2092 
2093 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2094 	if (!entry)
2095 		return -ENOMEM;
2096 
2097 	bacpy(&entry->bdaddr, bdaddr);
2098 	entry->bdaddr_type = type;
2099 
2100 	list_add(&entry->list, list);
2101 
2102 	return 0;
2103 }
2104 
2105 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2106 					u8 type, u8 *peer_irk, u8 *local_irk)
2107 {
2108 	struct bdaddr_list_with_irk *entry;
2109 
2110 	if (!bacmp(bdaddr, BDADDR_ANY))
2111 		return -EBADF;
2112 
2113 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2114 		return -EEXIST;
2115 
2116 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2117 	if (!entry)
2118 		return -ENOMEM;
2119 
2120 	bacpy(&entry->bdaddr, bdaddr);
2121 	entry->bdaddr_type = type;
2122 
2123 	if (peer_irk)
2124 		memcpy(entry->peer_irk, peer_irk, 16);
2125 
2126 	if (local_irk)
2127 		memcpy(entry->local_irk, local_irk, 16);
2128 
2129 	list_add(&entry->list, list);
2130 
2131 	return 0;
2132 }
2133 
2134 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2135 				   u8 type, u32 flags)
2136 {
2137 	struct bdaddr_list_with_flags *entry;
2138 
2139 	if (!bacmp(bdaddr, BDADDR_ANY))
2140 		return -EBADF;
2141 
2142 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2143 		return -EEXIST;
2144 
2145 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2146 	if (!entry)
2147 		return -ENOMEM;
2148 
2149 	bacpy(&entry->bdaddr, bdaddr);
2150 	entry->bdaddr_type = type;
2151 	entry->flags = flags;
2152 
2153 	list_add(&entry->list, list);
2154 
2155 	return 0;
2156 }
2157 
2158 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2159 {
2160 	struct bdaddr_list *entry;
2161 
2162 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2163 		hci_bdaddr_list_clear(list);
2164 		return 0;
2165 	}
2166 
2167 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2168 	if (!entry)
2169 		return -ENOENT;
2170 
2171 	list_del(&entry->list);
2172 	kfree(entry);
2173 
2174 	return 0;
2175 }
2176 
2177 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2178 							u8 type)
2179 {
2180 	struct bdaddr_list_with_irk *entry;
2181 
2182 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2183 		hci_bdaddr_list_clear(list);
2184 		return 0;
2185 	}
2186 
2187 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2188 	if (!entry)
2189 		return -ENOENT;
2190 
2191 	list_del(&entry->list);
2192 	kfree(entry);
2193 
2194 	return 0;
2195 }
2196 
2197 /* This function requires the caller holds hdev->lock */
2198 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2199 					       bdaddr_t *addr, u8 addr_type)
2200 {
2201 	struct hci_conn_params *params;
2202 
2203 	list_for_each_entry(params, &hdev->le_conn_params, list) {
2204 		if (bacmp(&params->addr, addr) == 0 &&
2205 		    params->addr_type == addr_type) {
2206 			return params;
2207 		}
2208 	}
2209 
2210 	return NULL;
2211 }
2212 
2213 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2214 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2215 						  bdaddr_t *addr, u8 addr_type)
2216 {
2217 	struct hci_conn_params *param;
2218 
2219 	rcu_read_lock();
2220 
2221 	list_for_each_entry_rcu(param, list, action) {
2222 		if (bacmp(&param->addr, addr) == 0 &&
2223 		    param->addr_type == addr_type) {
2224 			rcu_read_unlock();
2225 			return param;
2226 		}
2227 	}
2228 
2229 	rcu_read_unlock();
2230 
2231 	return NULL;
2232 }
2233 
2234 /* This function requires the caller holds hdev->lock */
2235 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2236 {
2237 	if (list_empty(&param->action))
2238 		return;
2239 
2240 	list_del_rcu(&param->action);
2241 	synchronize_rcu();
2242 	INIT_LIST_HEAD(&param->action);
2243 }
2244 
2245 /* This function requires the caller holds hdev->lock */
2246 void hci_pend_le_list_add(struct hci_conn_params *param,
2247 			  struct list_head *list)
2248 {
2249 	list_add_rcu(&param->action, list);
2250 }
2251 
2252 /* This function requires the caller holds hdev->lock */
2253 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2254 					    bdaddr_t *addr, u8 addr_type)
2255 {
2256 	struct hci_conn_params *params;
2257 
2258 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2259 	if (params)
2260 		return params;
2261 
2262 	params = kzalloc(sizeof(*params), GFP_KERNEL);
2263 	if (!params) {
2264 		bt_dev_err(hdev, "out of memory");
2265 		return NULL;
2266 	}
2267 
2268 	bacpy(&params->addr, addr);
2269 	params->addr_type = addr_type;
2270 
2271 	list_add(&params->list, &hdev->le_conn_params);
2272 	INIT_LIST_HEAD(&params->action);
2273 
2274 	params->conn_min_interval = hdev->le_conn_min_interval;
2275 	params->conn_max_interval = hdev->le_conn_max_interval;
2276 	params->conn_latency = hdev->le_conn_latency;
2277 	params->supervision_timeout = hdev->le_supv_timeout;
2278 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2279 
2280 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2281 
2282 	return params;
2283 }
2284 
2285 void hci_conn_params_free(struct hci_conn_params *params)
2286 {
2287 	hci_pend_le_list_del_init(params);
2288 
2289 	if (params->conn) {
2290 		hci_conn_drop(params->conn);
2291 		hci_conn_put(params->conn);
2292 	}
2293 
2294 	list_del(&params->list);
2295 	kfree(params);
2296 }
2297 
2298 /* This function requires the caller holds hdev->lock */
2299 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2300 {
2301 	struct hci_conn_params *params;
2302 
2303 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2304 	if (!params)
2305 		return;
2306 
2307 	hci_conn_params_free(params);
2308 
2309 	hci_update_passive_scan(hdev);
2310 
2311 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2312 }
2313 
2314 /* This function requires the caller holds hdev->lock */
2315 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2316 {
2317 	struct hci_conn_params *params, *tmp;
2318 
2319 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2320 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2321 			continue;
2322 
2323 		/* If trying to establish one time connection to disabled
2324 		 * device, leave the params, but mark them as just once.
2325 		 */
2326 		if (params->explicit_connect) {
2327 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2328 			continue;
2329 		}
2330 
2331 		hci_conn_params_free(params);
2332 	}
2333 
2334 	BT_DBG("All LE disabled connection parameters were removed");
2335 }
2336 
2337 /* This function requires the caller holds hdev->lock */
2338 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2339 {
2340 	struct hci_conn_params *params, *tmp;
2341 
2342 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2343 		hci_conn_params_free(params);
2344 
2345 	BT_DBG("All LE connection parameters were removed");
2346 }
2347 
2348 /* Copy the Identity Address of the controller.
2349  *
2350  * If the controller has a public BD_ADDR, then by default use that one.
2351  * If this is a LE only controller without a public address, default to
2352  * the static random address.
2353  *
2354  * For debugging purposes it is possible to force controllers with a
2355  * public address to use the static random address instead.
2356  *
2357  * In case BR/EDR has been disabled on a dual-mode controller and
2358  * userspace has configured a static address, then that address
2359  * becomes the identity address instead of the public BR/EDR address.
2360  */
2361 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2362 			       u8 *bdaddr_type)
2363 {
2364 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2365 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2366 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2367 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2368 		bacpy(bdaddr, &hdev->static_addr);
2369 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
2370 	} else {
2371 		bacpy(bdaddr, &hdev->bdaddr);
2372 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
2373 	}
2374 }
2375 
2376 static void hci_clear_wake_reason(struct hci_dev *hdev)
2377 {
2378 	hci_dev_lock(hdev);
2379 
2380 	hdev->wake_reason = 0;
2381 	bacpy(&hdev->wake_addr, BDADDR_ANY);
2382 	hdev->wake_addr_type = 0;
2383 
2384 	hci_dev_unlock(hdev);
2385 }
2386 
2387 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2388 				void *data)
2389 {
2390 	struct hci_dev *hdev =
2391 		container_of(nb, struct hci_dev, suspend_notifier);
2392 	int ret = 0;
2393 
2394 	/* Userspace has full control of this device. Do nothing. */
2395 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2396 		return NOTIFY_DONE;
2397 
2398 	/* To avoid a potential race with hci_unregister_dev. */
2399 	hci_dev_hold(hdev);
2400 
2401 	switch (action) {
2402 	case PM_HIBERNATION_PREPARE:
2403 	case PM_SUSPEND_PREPARE:
2404 		ret = hci_suspend_dev(hdev);
2405 		break;
2406 	case PM_POST_HIBERNATION:
2407 	case PM_POST_SUSPEND:
2408 		ret = hci_resume_dev(hdev);
2409 		break;
2410 	}
2411 
2412 	if (ret)
2413 		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2414 			   action, ret);
2415 
2416 	hci_dev_put(hdev);
2417 	return NOTIFY_DONE;
2418 }
2419 
2420 /* Alloc HCI device */
2421 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2422 {
2423 	struct hci_dev *hdev;
2424 	unsigned int alloc_size;
2425 
2426 	alloc_size = sizeof(*hdev);
2427 	if (sizeof_priv) {
2428 		/* Fixme: May need ALIGN-ment? */
2429 		alloc_size += sizeof_priv;
2430 	}
2431 
2432 	hdev = kzalloc(alloc_size, GFP_KERNEL);
2433 	if (!hdev)
2434 		return NULL;
2435 
2436 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2437 	hdev->esco_type = (ESCO_HV1);
2438 	hdev->link_mode = (HCI_LM_ACCEPT);
2439 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
2440 	hdev->io_capability = 0x03;	/* No Input No Output */
2441 	hdev->manufacturer = 0xffff;	/* Default to internal use */
2442 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2443 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2444 	hdev->adv_instance_cnt = 0;
2445 	hdev->cur_adv_instance = 0x00;
2446 	hdev->adv_instance_timeout = 0;
2447 
2448 	hdev->advmon_allowlist_duration = 300;
2449 	hdev->advmon_no_filter_duration = 500;
2450 	hdev->enable_advmon_interleave_scan = 0x00;	/* Default to disable */
2451 
2452 	hdev->sniff_max_interval = 800;
2453 	hdev->sniff_min_interval = 80;
2454 
2455 	hdev->le_adv_channel_map = 0x07;
2456 	hdev->le_adv_min_interval = 0x0800;
2457 	hdev->le_adv_max_interval = 0x0800;
2458 	hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2459 	hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2460 	hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2461 	hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2462 	hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2463 	hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2464 	hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2465 	hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2466 	hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2467 	hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2468 	hdev->le_conn_min_interval = 0x0018;
2469 	hdev->le_conn_max_interval = 0x0028;
2470 	hdev->le_conn_latency = 0x0000;
2471 	hdev->le_supv_timeout = 0x002a;
2472 	hdev->le_def_tx_len = 0x001b;
2473 	hdev->le_def_tx_time = 0x0148;
2474 	hdev->le_max_tx_len = 0x001b;
2475 	hdev->le_max_tx_time = 0x0148;
2476 	hdev->le_max_rx_len = 0x001b;
2477 	hdev->le_max_rx_time = 0x0148;
2478 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2479 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2480 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2481 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2482 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2483 	hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2484 	hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2485 	hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2486 	hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2487 
2488 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2489 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2490 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2491 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2492 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2493 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2494 
2495 	/* default 1.28 sec page scan */
2496 	hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2497 	hdev->def_page_scan_int = 0x0800;
2498 	hdev->def_page_scan_window = 0x0012;
2499 
2500 	mutex_init(&hdev->lock);
2501 	mutex_init(&hdev->req_lock);
2502 	mutex_init(&hdev->mgmt_pending_lock);
2503 
2504 	ida_init(&hdev->unset_handle_ida);
2505 
2506 	INIT_LIST_HEAD(&hdev->mesh_pending);
2507 	INIT_LIST_HEAD(&hdev->mgmt_pending);
2508 	INIT_LIST_HEAD(&hdev->reject_list);
2509 	INIT_LIST_HEAD(&hdev->accept_list);
2510 	INIT_LIST_HEAD(&hdev->uuids);
2511 	INIT_LIST_HEAD(&hdev->link_keys);
2512 	INIT_LIST_HEAD(&hdev->long_term_keys);
2513 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2514 	INIT_LIST_HEAD(&hdev->remote_oob_data);
2515 	INIT_LIST_HEAD(&hdev->le_accept_list);
2516 	INIT_LIST_HEAD(&hdev->le_resolv_list);
2517 	INIT_LIST_HEAD(&hdev->le_conn_params);
2518 	INIT_LIST_HEAD(&hdev->pend_le_conns);
2519 	INIT_LIST_HEAD(&hdev->pend_le_reports);
2520 	INIT_LIST_HEAD(&hdev->conn_hash.list);
2521 	INIT_LIST_HEAD(&hdev->adv_instances);
2522 	INIT_LIST_HEAD(&hdev->blocked_keys);
2523 	INIT_LIST_HEAD(&hdev->monitored_devices);
2524 
2525 	INIT_LIST_HEAD(&hdev->local_codecs);
2526 	INIT_WORK(&hdev->rx_work, hci_rx_work);
2527 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2528 	INIT_WORK(&hdev->tx_work, hci_tx_work);
2529 	INIT_WORK(&hdev->power_on, hci_power_on);
2530 	INIT_WORK(&hdev->error_reset, hci_error_reset);
2531 
2532 	hci_cmd_sync_init(hdev);
2533 
2534 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2535 
2536 	skb_queue_head_init(&hdev->rx_q);
2537 	skb_queue_head_init(&hdev->cmd_q);
2538 	skb_queue_head_init(&hdev->raw_q);
2539 
2540 	init_waitqueue_head(&hdev->req_wait_q);
2541 
2542 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2543 	INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2544 
2545 	hci_devcd_setup(hdev);
2546 
2547 	hci_init_sysfs(hdev);
2548 	discovery_init(hdev);
2549 
2550 	return hdev;
2551 }
2552 EXPORT_SYMBOL(hci_alloc_dev_priv);
2553 
2554 /* Free HCI device */
2555 void hci_free_dev(struct hci_dev *hdev)
2556 {
2557 	/* will free via device release */
2558 	put_device(&hdev->dev);
2559 }
2560 EXPORT_SYMBOL(hci_free_dev);
2561 
2562 /* Register HCI device */
2563 int hci_register_dev(struct hci_dev *hdev)
2564 {
2565 	int id, error;
2566 
2567 	if (!hdev->open || !hdev->close || !hdev->send)
2568 		return -EINVAL;
2569 
2570 	id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2571 	if (id < 0)
2572 		return id;
2573 
2574 	error = dev_set_name(&hdev->dev, "hci%u", id);
2575 	if (error)
2576 		return error;
2577 
2578 	hdev->name = dev_name(&hdev->dev);
2579 	hdev->id = id;
2580 
2581 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2582 
2583 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2584 	if (!hdev->workqueue) {
2585 		error = -ENOMEM;
2586 		goto err;
2587 	}
2588 
2589 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2590 						      hdev->name);
2591 	if (!hdev->req_workqueue) {
2592 		destroy_workqueue(hdev->workqueue);
2593 		error = -ENOMEM;
2594 		goto err;
2595 	}
2596 
2597 	if (!IS_ERR_OR_NULL(bt_debugfs))
2598 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2599 
2600 	error = device_add(&hdev->dev);
2601 	if (error < 0)
2602 		goto err_wqueue;
2603 
2604 	hci_leds_init(hdev);
2605 
2606 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2607 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2608 				    hdev);
2609 	if (hdev->rfkill) {
2610 		if (rfkill_register(hdev->rfkill) < 0) {
2611 			rfkill_destroy(hdev->rfkill);
2612 			hdev->rfkill = NULL;
2613 		}
2614 	}
2615 
2616 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2617 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2618 
2619 	hci_dev_set_flag(hdev, HCI_SETUP);
2620 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2621 
2622 	/* Assume BR/EDR support until proven otherwise (such as
2623 	 * through reading supported features during init.
2624 	 */
2625 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2626 
2627 	write_lock(&hci_dev_list_lock);
2628 	list_add(&hdev->list, &hci_dev_list);
2629 	write_unlock(&hci_dev_list_lock);
2630 
2631 	/* Devices that are marked for raw-only usage are unconfigured
2632 	 * and should not be included in normal operation.
2633 	 */
2634 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2635 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2636 
2637 	/* Mark Remote Wakeup connection flag as supported if driver has wakeup
2638 	 * callback.
2639 	 */
2640 	if (hdev->wakeup)
2641 		hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2642 
2643 	hci_sock_dev_event(hdev, HCI_DEV_REG);
2644 	hci_dev_hold(hdev);
2645 
2646 	error = hci_register_suspend_notifier(hdev);
2647 	if (error)
2648 		BT_WARN("register suspend notifier failed error:%d\n", error);
2649 
2650 	queue_work(hdev->req_workqueue, &hdev->power_on);
2651 
2652 	idr_init(&hdev->adv_monitors_idr);
2653 	msft_register(hdev);
2654 
2655 	return id;
2656 
2657 err_wqueue:
2658 	debugfs_remove_recursive(hdev->debugfs);
2659 	destroy_workqueue(hdev->workqueue);
2660 	destroy_workqueue(hdev->req_workqueue);
2661 err:
2662 	ida_free(&hci_index_ida, hdev->id);
2663 
2664 	return error;
2665 }
2666 EXPORT_SYMBOL(hci_register_dev);
2667 
2668 /* Unregister HCI device */
2669 void hci_unregister_dev(struct hci_dev *hdev)
2670 {
2671 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2672 
2673 	mutex_lock(&hdev->unregister_lock);
2674 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
2675 	mutex_unlock(&hdev->unregister_lock);
2676 
2677 	write_lock(&hci_dev_list_lock);
2678 	list_del(&hdev->list);
2679 	write_unlock(&hci_dev_list_lock);
2680 
2681 	disable_work_sync(&hdev->rx_work);
2682 	disable_work_sync(&hdev->cmd_work);
2683 	disable_work_sync(&hdev->tx_work);
2684 	disable_work_sync(&hdev->power_on);
2685 	disable_work_sync(&hdev->error_reset);
2686 
2687 	hci_cmd_sync_clear(hdev);
2688 
2689 	hci_unregister_suspend_notifier(hdev);
2690 
2691 	hci_dev_do_close(hdev);
2692 
2693 	if (!test_bit(HCI_INIT, &hdev->flags) &&
2694 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
2695 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2696 		hci_dev_lock(hdev);
2697 		mgmt_index_removed(hdev);
2698 		hci_dev_unlock(hdev);
2699 	}
2700 
2701 	/* mgmt_index_removed should take care of emptying the
2702 	 * pending list */
2703 	BUG_ON(!list_empty(&hdev->mgmt_pending));
2704 
2705 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2706 
2707 	if (hdev->rfkill) {
2708 		rfkill_unregister(hdev->rfkill);
2709 		rfkill_destroy(hdev->rfkill);
2710 	}
2711 
2712 	device_del(&hdev->dev);
2713 	/* Actual cleanup is deferred until hci_release_dev(). */
2714 	hci_dev_put(hdev);
2715 }
2716 EXPORT_SYMBOL(hci_unregister_dev);
2717 
2718 /* Release HCI device */
2719 void hci_release_dev(struct hci_dev *hdev)
2720 {
2721 	debugfs_remove_recursive(hdev->debugfs);
2722 	kfree_const(hdev->hw_info);
2723 	kfree_const(hdev->fw_info);
2724 
2725 	destroy_workqueue(hdev->workqueue);
2726 	destroy_workqueue(hdev->req_workqueue);
2727 
2728 	hci_dev_lock(hdev);
2729 	hci_bdaddr_list_clear(&hdev->reject_list);
2730 	hci_bdaddr_list_clear(&hdev->accept_list);
2731 	hci_uuids_clear(hdev);
2732 	hci_link_keys_clear(hdev);
2733 	hci_smp_ltks_clear(hdev);
2734 	hci_smp_irks_clear(hdev);
2735 	hci_remote_oob_data_clear(hdev);
2736 	hci_adv_instances_clear(hdev);
2737 	hci_adv_monitors_clear(hdev);
2738 	hci_bdaddr_list_clear(&hdev->le_accept_list);
2739 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2740 	hci_conn_params_clear_all(hdev);
2741 	hci_discovery_filter_clear(hdev);
2742 	hci_blocked_keys_clear(hdev);
2743 	hci_codec_list_clear(&hdev->local_codecs);
2744 	msft_release(hdev);
2745 	hci_dev_unlock(hdev);
2746 
2747 	ida_destroy(&hdev->unset_handle_ida);
2748 	ida_free(&hci_index_ida, hdev->id);
2749 	kfree_skb(hdev->sent_cmd);
2750 	kfree_skb(hdev->req_skb);
2751 	kfree_skb(hdev->recv_event);
2752 	kfree(hdev);
2753 }
2754 EXPORT_SYMBOL(hci_release_dev);
2755 
2756 int hci_register_suspend_notifier(struct hci_dev *hdev)
2757 {
2758 	int ret = 0;
2759 
2760 	if (!hdev->suspend_notifier.notifier_call &&
2761 	    !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2762 		hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2763 		ret = register_pm_notifier(&hdev->suspend_notifier);
2764 	}
2765 
2766 	return ret;
2767 }
2768 
2769 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2770 {
2771 	int ret = 0;
2772 
2773 	if (hdev->suspend_notifier.notifier_call) {
2774 		ret = unregister_pm_notifier(&hdev->suspend_notifier);
2775 		if (!ret)
2776 			hdev->suspend_notifier.notifier_call = NULL;
2777 	}
2778 
2779 	return ret;
2780 }
2781 
2782 /* Cancel ongoing command synchronously:
2783  *
2784  * - Cancel command timer
2785  * - Reset command counter
2786  * - Cancel command request
2787  */
2788 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2789 {
2790 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
2791 
2792 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
2793 		disable_delayed_work_sync(&hdev->cmd_timer);
2794 		disable_delayed_work_sync(&hdev->ncmd_timer);
2795 	} else  {
2796 		cancel_delayed_work_sync(&hdev->cmd_timer);
2797 		cancel_delayed_work_sync(&hdev->ncmd_timer);
2798 	}
2799 
2800 	atomic_set(&hdev->cmd_cnt, 1);
2801 
2802 	hci_cmd_sync_cancel_sync(hdev, err);
2803 }
2804 
2805 /* Suspend HCI device */
2806 int hci_suspend_dev(struct hci_dev *hdev)
2807 {
2808 	int ret;
2809 
2810 	bt_dev_dbg(hdev, "");
2811 
2812 	/* Suspend should only act on when powered. */
2813 	if (!hdev_is_powered(hdev) ||
2814 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2815 		return 0;
2816 
2817 	/* If powering down don't attempt to suspend */
2818 	if (mgmt_powering_down(hdev))
2819 		return 0;
2820 
2821 	/* Cancel potentially blocking sync operation before suspend */
2822 	hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2823 
2824 	hci_req_sync_lock(hdev);
2825 	ret = hci_suspend_sync(hdev);
2826 	hci_req_sync_unlock(hdev);
2827 
2828 	hci_clear_wake_reason(hdev);
2829 	mgmt_suspending(hdev, hdev->suspend_state);
2830 
2831 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2832 	return ret;
2833 }
2834 EXPORT_SYMBOL(hci_suspend_dev);
2835 
2836 /* Resume HCI device */
2837 int hci_resume_dev(struct hci_dev *hdev)
2838 {
2839 	int ret;
2840 
2841 	bt_dev_dbg(hdev, "");
2842 
2843 	/* Resume should only act on when powered. */
2844 	if (!hdev_is_powered(hdev) ||
2845 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2846 		return 0;
2847 
2848 	/* If powering down don't attempt to resume */
2849 	if (mgmt_powering_down(hdev))
2850 		return 0;
2851 
2852 	hci_req_sync_lock(hdev);
2853 	ret = hci_resume_sync(hdev);
2854 	hci_req_sync_unlock(hdev);
2855 
2856 	mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2857 		      hdev->wake_addr_type);
2858 
2859 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2860 	return ret;
2861 }
2862 EXPORT_SYMBOL(hci_resume_dev);
2863 
2864 /* Reset HCI device */
2865 int hci_reset_dev(struct hci_dev *hdev)
2866 {
2867 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2868 	struct sk_buff *skb;
2869 
2870 	skb = bt_skb_alloc(3, GFP_ATOMIC);
2871 	if (!skb)
2872 		return -ENOMEM;
2873 
2874 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2875 	skb_put_data(skb, hw_err, 3);
2876 
2877 	bt_dev_err(hdev, "Injecting HCI hardware error event");
2878 
2879 	/* Send Hardware Error to upper stack */
2880 	return hci_recv_frame(hdev, skb);
2881 }
2882 EXPORT_SYMBOL(hci_reset_dev);
2883 
2884 static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
2885 {
2886 	if (hdev->classify_pkt_type)
2887 		return hdev->classify_pkt_type(hdev, skb);
2888 
2889 	return hci_skb_pkt_type(skb);
2890 }
2891 
2892 /* Receive frame from HCI drivers */
2893 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2894 {
2895 	u8 dev_pkt_type;
2896 
2897 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2898 		      && !test_bit(HCI_INIT, &hdev->flags))) {
2899 		kfree_skb(skb);
2900 		return -ENXIO;
2901 	}
2902 
2903 	/* Check if the driver agree with packet type classification */
2904 	dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
2905 	if (hci_skb_pkt_type(skb) != dev_pkt_type) {
2906 		hci_skb_pkt_type(skb) = dev_pkt_type;
2907 	}
2908 
2909 	switch (hci_skb_pkt_type(skb)) {
2910 	case HCI_EVENT_PKT:
2911 		break;
2912 	case HCI_ACLDATA_PKT:
2913 		/* Detect if ISO packet has been sent as ACL */
2914 		if (hci_conn_num(hdev, CIS_LINK) ||
2915 		    hci_conn_num(hdev, BIS_LINK)) {
2916 			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2917 			__u8 type;
2918 
2919 			type = hci_conn_lookup_type(hdev, hci_handle(handle));
2920 			if (type == CIS_LINK || type == BIS_LINK)
2921 				hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2922 		}
2923 		break;
2924 	case HCI_SCODATA_PKT:
2925 		break;
2926 	case HCI_ISODATA_PKT:
2927 		break;
2928 	case HCI_DRV_PKT:
2929 		break;
2930 	default:
2931 		kfree_skb(skb);
2932 		return -EINVAL;
2933 	}
2934 
2935 	/* Incoming skb */
2936 	bt_cb(skb)->incoming = 1;
2937 
2938 	/* Time stamp */
2939 	__net_timestamp(skb);
2940 
2941 	skb_queue_tail(&hdev->rx_q, skb);
2942 	queue_work(hdev->workqueue, &hdev->rx_work);
2943 
2944 	return 0;
2945 }
2946 EXPORT_SYMBOL(hci_recv_frame);
2947 
2948 /* Receive diagnostic message from HCI drivers */
2949 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2950 {
2951 	/* Mark as diagnostic packet */
2952 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2953 
2954 	/* Time stamp */
2955 	__net_timestamp(skb);
2956 
2957 	skb_queue_tail(&hdev->rx_q, skb);
2958 	queue_work(hdev->workqueue, &hdev->rx_work);
2959 
2960 	return 0;
2961 }
2962 EXPORT_SYMBOL(hci_recv_diag);
2963 
2964 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2965 {
2966 	va_list vargs;
2967 
2968 	va_start(vargs, fmt);
2969 	kfree_const(hdev->hw_info);
2970 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2971 	va_end(vargs);
2972 }
2973 EXPORT_SYMBOL(hci_set_hw_info);
2974 
2975 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2976 {
2977 	va_list vargs;
2978 
2979 	va_start(vargs, fmt);
2980 	kfree_const(hdev->fw_info);
2981 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2982 	va_end(vargs);
2983 }
2984 EXPORT_SYMBOL(hci_set_fw_info);
2985 
2986 /* ---- Interface to upper protocols ---- */
2987 
2988 int hci_register_cb(struct hci_cb *cb)
2989 {
2990 	BT_DBG("%p name %s", cb, cb->name);
2991 
2992 	mutex_lock(&hci_cb_list_lock);
2993 	list_add_tail(&cb->list, &hci_cb_list);
2994 	mutex_unlock(&hci_cb_list_lock);
2995 
2996 	return 0;
2997 }
2998 EXPORT_SYMBOL(hci_register_cb);
2999 
3000 int hci_unregister_cb(struct hci_cb *cb)
3001 {
3002 	BT_DBG("%p name %s", cb, cb->name);
3003 
3004 	mutex_lock(&hci_cb_list_lock);
3005 	list_del(&cb->list);
3006 	mutex_unlock(&hci_cb_list_lock);
3007 
3008 	return 0;
3009 }
3010 EXPORT_SYMBOL(hci_unregister_cb);
3011 
3012 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3013 {
3014 	int err;
3015 
3016 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3017 	       skb->len);
3018 
3019 	/* Time stamp */
3020 	__net_timestamp(skb);
3021 
3022 	/* Send copy to monitor */
3023 	hci_send_to_monitor(hdev, skb);
3024 
3025 	if (atomic_read(&hdev->promisc)) {
3026 		/* Send copy to the sockets */
3027 		hci_send_to_sock(hdev, skb);
3028 	}
3029 
3030 	/* Get rid of skb owner, prior to sending to the driver. */
3031 	skb_orphan(skb);
3032 
3033 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3034 		kfree_skb(skb);
3035 		return -EINVAL;
3036 	}
3037 
3038 	if (hci_skb_pkt_type(skb) == HCI_DRV_PKT) {
3039 		/* Intercept HCI Drv packet here and don't go with hdev->send
3040 		 * callback.
3041 		 */
3042 		err = hci_drv_process_cmd(hdev, skb);
3043 		kfree_skb(skb);
3044 		return err;
3045 	}
3046 
3047 	err = hdev->send(hdev, skb);
3048 	if (err < 0) {
3049 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3050 		kfree_skb(skb);
3051 		return err;
3052 	}
3053 
3054 	return 0;
3055 }
3056 
3057 static int hci_send_conn_frame(struct hci_dev *hdev, struct hci_conn *conn,
3058 			       struct sk_buff *skb)
3059 {
3060 	hci_conn_tx_queue(conn, skb);
3061 	return hci_send_frame(hdev, skb);
3062 }
3063 
3064 /* Send HCI command */
3065 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3066 		 const void *param)
3067 {
3068 	struct sk_buff *skb;
3069 
3070 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3071 
3072 	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3073 	if (!skb) {
3074 		bt_dev_err(hdev, "no memory for command");
3075 		return -ENOMEM;
3076 	}
3077 
3078 	/* Stand-alone HCI commands must be flagged as
3079 	 * single-command requests.
3080 	 */
3081 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3082 
3083 	skb_queue_tail(&hdev->cmd_q, skb);
3084 	queue_work(hdev->workqueue, &hdev->cmd_work);
3085 
3086 	return 0;
3087 }
3088 
3089 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3090 		   const void *param)
3091 {
3092 	struct sk_buff *skb;
3093 
3094 	if (hci_opcode_ogf(opcode) != 0x3f) {
3095 		/* A controller receiving a command shall respond with either
3096 		 * a Command Status Event or a Command Complete Event.
3097 		 * Therefore, all standard HCI commands must be sent via the
3098 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3099 		 * Some vendors do not comply with this rule for vendor-specific
3100 		 * commands and do not return any event. We want to support
3101 		 * unresponded commands for such cases only.
3102 		 */
3103 		bt_dev_err(hdev, "unresponded command not supported");
3104 		return -EINVAL;
3105 	}
3106 
3107 	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3108 	if (!skb) {
3109 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3110 			   opcode);
3111 		return -ENOMEM;
3112 	}
3113 
3114 	hci_send_frame(hdev, skb);
3115 
3116 	return 0;
3117 }
3118 EXPORT_SYMBOL(__hci_cmd_send);
3119 
3120 /* Get data from the previously sent command */
3121 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3122 {
3123 	struct hci_command_hdr *hdr;
3124 
3125 	if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3126 		return NULL;
3127 
3128 	hdr = (void *)skb->data;
3129 
3130 	if (hdr->opcode != cpu_to_le16(opcode))
3131 		return NULL;
3132 
3133 	return skb->data + HCI_COMMAND_HDR_SIZE;
3134 }
3135 
3136 /* Get data from the previously sent command */
3137 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3138 {
3139 	void *data;
3140 
3141 	/* Check if opcode matches last sent command */
3142 	data = hci_cmd_data(hdev->sent_cmd, opcode);
3143 	if (!data)
3144 		/* Check if opcode matches last request */
3145 		data = hci_cmd_data(hdev->req_skb, opcode);
3146 
3147 	return data;
3148 }
3149 
3150 /* Get data from last received event */
3151 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3152 {
3153 	struct hci_event_hdr *hdr;
3154 	int offset;
3155 
3156 	if (!hdev->recv_event)
3157 		return NULL;
3158 
3159 	hdr = (void *)hdev->recv_event->data;
3160 	offset = sizeof(*hdr);
3161 
3162 	if (hdr->evt != event) {
3163 		/* In case of LE metaevent check the subevent match */
3164 		if (hdr->evt == HCI_EV_LE_META) {
3165 			struct hci_ev_le_meta *ev;
3166 
3167 			ev = (void *)hdev->recv_event->data + offset;
3168 			offset += sizeof(*ev);
3169 			if (ev->subevent == event)
3170 				goto found;
3171 		}
3172 		return NULL;
3173 	}
3174 
3175 found:
3176 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
3177 
3178 	return hdev->recv_event->data + offset;
3179 }
3180 
3181 /* Send ACL data */
3182 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3183 {
3184 	struct hci_acl_hdr *hdr;
3185 	int len = skb->len;
3186 
3187 	skb_push(skb, HCI_ACL_HDR_SIZE);
3188 	skb_reset_transport_header(skb);
3189 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3190 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3191 	hdr->dlen   = cpu_to_le16(len);
3192 }
3193 
3194 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3195 			  struct sk_buff *skb, __u16 flags)
3196 {
3197 	struct hci_conn *conn = chan->conn;
3198 	struct hci_dev *hdev = conn->hdev;
3199 	struct sk_buff *list;
3200 
3201 	skb->len = skb_headlen(skb);
3202 	skb->data_len = 0;
3203 
3204 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3205 
3206 	hci_add_acl_hdr(skb, conn->handle, flags);
3207 
3208 	list = skb_shinfo(skb)->frag_list;
3209 	if (!list) {
3210 		/* Non fragmented */
3211 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3212 
3213 		skb_queue_tail(queue, skb);
3214 	} else {
3215 		/* Fragmented */
3216 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3217 
3218 		skb_shinfo(skb)->frag_list = NULL;
3219 
3220 		/* Queue all fragments atomically. We need to use spin_lock_bh
3221 		 * here because of 6LoWPAN links, as there this function is
3222 		 * called from softirq and using normal spin lock could cause
3223 		 * deadlocks.
3224 		 */
3225 		spin_lock_bh(&queue->lock);
3226 
3227 		__skb_queue_tail(queue, skb);
3228 
3229 		flags &= ~ACL_START;
3230 		flags |= ACL_CONT;
3231 		do {
3232 			skb = list; list = list->next;
3233 
3234 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3235 			hci_add_acl_hdr(skb, conn->handle, flags);
3236 
3237 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3238 
3239 			__skb_queue_tail(queue, skb);
3240 		} while (list);
3241 
3242 		spin_unlock_bh(&queue->lock);
3243 	}
3244 }
3245 
3246 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3247 {
3248 	struct hci_dev *hdev = chan->conn->hdev;
3249 
3250 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3251 
3252 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3253 
3254 	queue_work(hdev->workqueue, &hdev->tx_work);
3255 }
3256 
3257 /* Send SCO data */
3258 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3259 {
3260 	struct hci_dev *hdev = conn->hdev;
3261 	struct hci_sco_hdr hdr;
3262 
3263 	BT_DBG("%s len %d", hdev->name, skb->len);
3264 
3265 	hdr.handle = cpu_to_le16(conn->handle);
3266 	hdr.dlen   = skb->len;
3267 
3268 	skb_push(skb, HCI_SCO_HDR_SIZE);
3269 	skb_reset_transport_header(skb);
3270 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3271 
3272 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3273 
3274 	skb_queue_tail(&conn->data_q, skb);
3275 	queue_work(hdev->workqueue, &hdev->tx_work);
3276 }
3277 
3278 /* Send ISO data */
3279 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3280 {
3281 	struct hci_iso_hdr *hdr;
3282 	int len = skb->len;
3283 
3284 	skb_push(skb, HCI_ISO_HDR_SIZE);
3285 	skb_reset_transport_header(skb);
3286 	hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3287 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3288 	hdr->dlen   = cpu_to_le16(len);
3289 }
3290 
3291 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3292 			  struct sk_buff *skb)
3293 {
3294 	struct hci_dev *hdev = conn->hdev;
3295 	struct sk_buff *list;
3296 	__u16 flags;
3297 
3298 	skb->len = skb_headlen(skb);
3299 	skb->data_len = 0;
3300 
3301 	hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3302 
3303 	list = skb_shinfo(skb)->frag_list;
3304 
3305 	flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3306 	hci_add_iso_hdr(skb, conn->handle, flags);
3307 
3308 	if (!list) {
3309 		/* Non fragmented */
3310 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3311 
3312 		skb_queue_tail(queue, skb);
3313 	} else {
3314 		/* Fragmented */
3315 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3316 
3317 		skb_shinfo(skb)->frag_list = NULL;
3318 
3319 		__skb_queue_tail(queue, skb);
3320 
3321 		do {
3322 			skb = list; list = list->next;
3323 
3324 			hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3325 			flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3326 						   0x00);
3327 			hci_add_iso_hdr(skb, conn->handle, flags);
3328 
3329 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3330 
3331 			__skb_queue_tail(queue, skb);
3332 		} while (list);
3333 	}
3334 }
3335 
3336 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3337 {
3338 	struct hci_dev *hdev = conn->hdev;
3339 
3340 	BT_DBG("%s len %d", hdev->name, skb->len);
3341 
3342 	hci_queue_iso(conn, &conn->data_q, skb);
3343 
3344 	queue_work(hdev->workqueue, &hdev->tx_work);
3345 }
3346 
3347 /* ---- HCI TX task (outgoing data) ---- */
3348 
3349 /* HCI Connection scheduler */
3350 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3351 {
3352 	struct hci_dev *hdev;
3353 	int cnt, q;
3354 
3355 	if (!conn) {
3356 		*quote = 0;
3357 		return;
3358 	}
3359 
3360 	hdev = conn->hdev;
3361 
3362 	switch (conn->type) {
3363 	case ACL_LINK:
3364 		cnt = hdev->acl_cnt;
3365 		break;
3366 	case SCO_LINK:
3367 	case ESCO_LINK:
3368 		cnt = hdev->sco_cnt;
3369 		break;
3370 	case LE_LINK:
3371 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3372 		break;
3373 	case CIS_LINK:
3374 	case BIS_LINK:
3375 		cnt = hdev->iso_mtu ? hdev->iso_cnt :
3376 			hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3377 		break;
3378 	default:
3379 		cnt = 0;
3380 		bt_dev_err(hdev, "unknown link type %d", conn->type);
3381 	}
3382 
3383 	q = cnt / num;
3384 	*quote = q ? q : 1;
3385 }
3386 
3387 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3388 				     __u8 type2, int *quote)
3389 {
3390 	struct hci_conn_hash *h = &hdev->conn_hash;
3391 	struct hci_conn *conn = NULL, *c;
3392 	unsigned int num = 0, min = ~0;
3393 
3394 	/* We don't have to lock device here. Connections are always
3395 	 * added and removed with TX task disabled. */
3396 
3397 	rcu_read_lock();
3398 
3399 	list_for_each_entry_rcu(c, &h->list, list) {
3400 		if ((c->type != type && c->type != type2) ||
3401 		    skb_queue_empty(&c->data_q))
3402 			continue;
3403 
3404 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3405 			continue;
3406 
3407 		num++;
3408 
3409 		if (c->sent < min) {
3410 			min  = c->sent;
3411 			conn = c;
3412 		}
3413 
3414 		if (hci_conn_num(hdev, type) == num)
3415 			break;
3416 	}
3417 
3418 	rcu_read_unlock();
3419 
3420 	hci_quote_sent(conn, num, quote);
3421 
3422 	BT_DBG("conn %p quote %d", conn, *quote);
3423 	return conn;
3424 }
3425 
3426 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3427 {
3428 	struct hci_conn_hash *h = &hdev->conn_hash;
3429 	struct hci_conn *c;
3430 
3431 	bt_dev_err(hdev, "link tx timeout");
3432 
3433 	hci_dev_lock(hdev);
3434 
3435 	/* Kill stalled connections */
3436 	list_for_each_entry(c, &h->list, list) {
3437 		if (c->type == type && c->sent) {
3438 			bt_dev_err(hdev, "killing stalled connection %pMR",
3439 				   &c->dst);
3440 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3441 		}
3442 	}
3443 
3444 	hci_dev_unlock(hdev);
3445 }
3446 
3447 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3448 				      int *quote)
3449 {
3450 	struct hci_conn_hash *h = &hdev->conn_hash;
3451 	struct hci_chan *chan = NULL;
3452 	unsigned int num = 0, min = ~0, cur_prio = 0;
3453 	struct hci_conn *conn;
3454 	int conn_num = 0;
3455 
3456 	BT_DBG("%s", hdev->name);
3457 
3458 	rcu_read_lock();
3459 
3460 	list_for_each_entry_rcu(conn, &h->list, list) {
3461 		struct hci_chan *tmp;
3462 
3463 		if (conn->type != type)
3464 			continue;
3465 
3466 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3467 			continue;
3468 
3469 		conn_num++;
3470 
3471 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3472 			struct sk_buff *skb;
3473 
3474 			if (skb_queue_empty(&tmp->data_q))
3475 				continue;
3476 
3477 			skb = skb_peek(&tmp->data_q);
3478 			if (skb->priority < cur_prio)
3479 				continue;
3480 
3481 			if (skb->priority > cur_prio) {
3482 				num = 0;
3483 				min = ~0;
3484 				cur_prio = skb->priority;
3485 			}
3486 
3487 			num++;
3488 
3489 			if (conn->sent < min) {
3490 				min  = conn->sent;
3491 				chan = tmp;
3492 			}
3493 		}
3494 
3495 		if (hci_conn_num(hdev, type) == conn_num)
3496 			break;
3497 	}
3498 
3499 	rcu_read_unlock();
3500 
3501 	if (!chan)
3502 		return NULL;
3503 
3504 	hci_quote_sent(chan->conn, num, quote);
3505 
3506 	BT_DBG("chan %p quote %d", chan, *quote);
3507 	return chan;
3508 }
3509 
3510 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3511 {
3512 	struct hci_conn_hash *h = &hdev->conn_hash;
3513 	struct hci_conn *conn;
3514 	int num = 0;
3515 
3516 	BT_DBG("%s", hdev->name);
3517 
3518 	rcu_read_lock();
3519 
3520 	list_for_each_entry_rcu(conn, &h->list, list) {
3521 		struct hci_chan *chan;
3522 
3523 		if (conn->type != type)
3524 			continue;
3525 
3526 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3527 			continue;
3528 
3529 		num++;
3530 
3531 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3532 			struct sk_buff *skb;
3533 
3534 			if (chan->sent) {
3535 				chan->sent = 0;
3536 				continue;
3537 			}
3538 
3539 			if (skb_queue_empty(&chan->data_q))
3540 				continue;
3541 
3542 			skb = skb_peek(&chan->data_q);
3543 			if (skb->priority >= HCI_PRIO_MAX - 1)
3544 				continue;
3545 
3546 			skb->priority = HCI_PRIO_MAX - 1;
3547 
3548 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3549 			       skb->priority);
3550 		}
3551 
3552 		if (hci_conn_num(hdev, type) == num)
3553 			break;
3554 	}
3555 
3556 	rcu_read_unlock();
3557 
3558 }
3559 
3560 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3561 {
3562 	unsigned long last_tx;
3563 
3564 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3565 		return;
3566 
3567 	switch (type) {
3568 	case LE_LINK:
3569 		last_tx = hdev->le_last_tx;
3570 		break;
3571 	default:
3572 		last_tx = hdev->acl_last_tx;
3573 		break;
3574 	}
3575 
3576 	/* tx timeout must be longer than maximum link supervision timeout
3577 	 * (40.9 seconds)
3578 	 */
3579 	if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3580 		hci_link_tx_to(hdev, type);
3581 }
3582 
3583 /* Schedule SCO */
3584 static void hci_sched_sco(struct hci_dev *hdev, __u8 type)
3585 {
3586 	struct hci_conn *conn;
3587 	struct sk_buff *skb;
3588 	int quote, *cnt;
3589 	unsigned int pkts = hdev->sco_pkts;
3590 
3591 	bt_dev_dbg(hdev, "type %u", type);
3592 
3593 	if (!hci_conn_num(hdev, type) || !pkts)
3594 		return;
3595 
3596 	/* Use sco_pkts if flow control has not been enabled which will limit
3597 	 * the amount of buffer sent in a row.
3598 	 */
3599 	if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
3600 		cnt = &pkts;
3601 	else
3602 		cnt = &hdev->sco_cnt;
3603 
3604 	while (*cnt && (conn = hci_low_sent(hdev, type, type, &quote))) {
3605 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3606 			BT_DBG("skb %p len %d", skb, skb->len);
3607 			hci_send_conn_frame(hdev, conn, skb);
3608 
3609 			conn->sent++;
3610 			if (conn->sent == ~0)
3611 				conn->sent = 0;
3612 			(*cnt)--;
3613 		}
3614 	}
3615 
3616 	/* Rescheduled if all packets were sent and flow control is not enabled
3617 	 * as there could be more packets queued that could not be sent and
3618 	 * since no HCI_EV_NUM_COMP_PKTS event will be generated the reschedule
3619 	 * needs to be forced.
3620 	 */
3621 	if (!pkts && !hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
3622 		queue_work(hdev->workqueue, &hdev->tx_work);
3623 }
3624 
3625 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3626 {
3627 	unsigned int cnt = hdev->acl_cnt;
3628 	struct hci_chan *chan;
3629 	struct sk_buff *skb;
3630 	int quote;
3631 
3632 	__check_timeout(hdev, cnt, ACL_LINK);
3633 
3634 	while (hdev->acl_cnt &&
3635 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3636 		u32 priority = (skb_peek(&chan->data_q))->priority;
3637 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3638 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3639 			       skb->len, skb->priority);
3640 
3641 			/* Stop if priority has changed */
3642 			if (skb->priority < priority)
3643 				break;
3644 
3645 			skb = skb_dequeue(&chan->data_q);
3646 
3647 			hci_conn_enter_active_mode(chan->conn,
3648 						   bt_cb(skb)->force_active);
3649 
3650 			hci_send_conn_frame(hdev, chan->conn, skb);
3651 			hdev->acl_last_tx = jiffies;
3652 
3653 			hdev->acl_cnt--;
3654 			chan->sent++;
3655 			chan->conn->sent++;
3656 
3657 			/* Send pending SCO packets right away */
3658 			hci_sched_sco(hdev, SCO_LINK);
3659 			hci_sched_sco(hdev, ESCO_LINK);
3660 		}
3661 	}
3662 
3663 	if (cnt != hdev->acl_cnt)
3664 		hci_prio_recalculate(hdev, ACL_LINK);
3665 }
3666 
3667 static void hci_sched_acl(struct hci_dev *hdev)
3668 {
3669 	BT_DBG("%s", hdev->name);
3670 
3671 	/* No ACL link over BR/EDR controller */
3672 	if (!hci_conn_num(hdev, ACL_LINK))
3673 		return;
3674 
3675 	hci_sched_acl_pkt(hdev);
3676 }
3677 
3678 static void hci_sched_le(struct hci_dev *hdev)
3679 {
3680 	struct hci_chan *chan;
3681 	struct sk_buff *skb;
3682 	int quote, *cnt, tmp;
3683 
3684 	BT_DBG("%s", hdev->name);
3685 
3686 	if (!hci_conn_num(hdev, LE_LINK))
3687 		return;
3688 
3689 	cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3690 
3691 	__check_timeout(hdev, *cnt, LE_LINK);
3692 
3693 	tmp = *cnt;
3694 	while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3695 		u32 priority = (skb_peek(&chan->data_q))->priority;
3696 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3697 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3698 			       skb->len, skb->priority);
3699 
3700 			/* Stop if priority has changed */
3701 			if (skb->priority < priority)
3702 				break;
3703 
3704 			skb = skb_dequeue(&chan->data_q);
3705 
3706 			hci_send_conn_frame(hdev, chan->conn, skb);
3707 			hdev->le_last_tx = jiffies;
3708 
3709 			(*cnt)--;
3710 			chan->sent++;
3711 			chan->conn->sent++;
3712 
3713 			/* Send pending SCO packets right away */
3714 			hci_sched_sco(hdev, SCO_LINK);
3715 			hci_sched_sco(hdev, ESCO_LINK);
3716 		}
3717 	}
3718 
3719 	if (*cnt != tmp)
3720 		hci_prio_recalculate(hdev, LE_LINK);
3721 }
3722 
3723 /* Schedule CIS */
3724 static void hci_sched_iso(struct hci_dev *hdev)
3725 {
3726 	struct hci_conn *conn;
3727 	struct sk_buff *skb;
3728 	int quote, *cnt;
3729 
3730 	BT_DBG("%s", hdev->name);
3731 
3732 	if (!hci_conn_num(hdev, CIS_LINK) &&
3733 	    !hci_conn_num(hdev, BIS_LINK))
3734 		return;
3735 
3736 	cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3737 		hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3738 	while (*cnt && (conn = hci_low_sent(hdev, CIS_LINK, BIS_LINK,
3739 					    &quote))) {
3740 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3741 			BT_DBG("skb %p len %d", skb, skb->len);
3742 			hci_send_conn_frame(hdev, conn, skb);
3743 
3744 			conn->sent++;
3745 			if (conn->sent == ~0)
3746 				conn->sent = 0;
3747 			(*cnt)--;
3748 		}
3749 	}
3750 }
3751 
3752 static void hci_tx_work(struct work_struct *work)
3753 {
3754 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3755 	struct sk_buff *skb;
3756 
3757 	BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3758 	       hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3759 
3760 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3761 		/* Schedule queues and send stuff to HCI driver */
3762 		hci_sched_sco(hdev, SCO_LINK);
3763 		hci_sched_sco(hdev, ESCO_LINK);
3764 		hci_sched_iso(hdev);
3765 		hci_sched_acl(hdev);
3766 		hci_sched_le(hdev);
3767 	}
3768 
3769 	/* Send next queued raw (unknown type) packet */
3770 	while ((skb = skb_dequeue(&hdev->raw_q)))
3771 		hci_send_frame(hdev, skb);
3772 }
3773 
3774 /* ----- HCI RX task (incoming data processing) ----- */
3775 
3776 /* ACL data packet */
3777 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3778 {
3779 	struct hci_acl_hdr *hdr;
3780 	struct hci_conn *conn;
3781 	__u16 handle, flags;
3782 
3783 	hdr = skb_pull_data(skb, sizeof(*hdr));
3784 	if (!hdr) {
3785 		bt_dev_err(hdev, "ACL packet too small");
3786 		goto drop;
3787 	}
3788 
3789 	handle = __le16_to_cpu(hdr->handle);
3790 	flags  = hci_flags(handle);
3791 	handle = hci_handle(handle);
3792 
3793 	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3794 		   handle, flags);
3795 
3796 	hdev->stat.acl_rx++;
3797 
3798 	hci_dev_lock(hdev);
3799 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3800 	hci_dev_unlock(hdev);
3801 
3802 	if (conn) {
3803 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3804 
3805 		/* Send to upper protocol */
3806 		l2cap_recv_acldata(conn, skb, flags);
3807 		return;
3808 	} else {
3809 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3810 			   handle);
3811 	}
3812 
3813 drop:
3814 	kfree_skb(skb);
3815 }
3816 
3817 /* SCO data packet */
3818 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3819 {
3820 	struct hci_sco_hdr *hdr;
3821 	struct hci_conn *conn;
3822 	__u16 handle, flags;
3823 
3824 	hdr = skb_pull_data(skb, sizeof(*hdr));
3825 	if (!hdr) {
3826 		bt_dev_err(hdev, "SCO packet too small");
3827 		goto drop;
3828 	}
3829 
3830 	handle = __le16_to_cpu(hdr->handle);
3831 	flags  = hci_flags(handle);
3832 	handle = hci_handle(handle);
3833 
3834 	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3835 		   handle, flags);
3836 
3837 	hdev->stat.sco_rx++;
3838 
3839 	hci_dev_lock(hdev);
3840 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3841 	hci_dev_unlock(hdev);
3842 
3843 	if (conn) {
3844 		/* Send to upper protocol */
3845 		hci_skb_pkt_status(skb) = flags & 0x03;
3846 		sco_recv_scodata(conn, skb);
3847 		return;
3848 	} else {
3849 		bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3850 				       handle);
3851 	}
3852 
3853 drop:
3854 	kfree_skb(skb);
3855 }
3856 
3857 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3858 {
3859 	struct hci_iso_hdr *hdr;
3860 	struct hci_conn *conn;
3861 	__u16 handle, flags;
3862 
3863 	hdr = skb_pull_data(skb, sizeof(*hdr));
3864 	if (!hdr) {
3865 		bt_dev_err(hdev, "ISO packet too small");
3866 		goto drop;
3867 	}
3868 
3869 	handle = __le16_to_cpu(hdr->handle);
3870 	flags  = hci_flags(handle);
3871 	handle = hci_handle(handle);
3872 
3873 	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3874 		   handle, flags);
3875 
3876 	hci_dev_lock(hdev);
3877 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3878 	hci_dev_unlock(hdev);
3879 
3880 	if (!conn) {
3881 		bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3882 			   handle);
3883 		goto drop;
3884 	}
3885 
3886 	/* Send to upper protocol */
3887 	iso_recv(conn, skb, flags);
3888 	return;
3889 
3890 drop:
3891 	kfree_skb(skb);
3892 }
3893 
3894 static bool hci_req_is_complete(struct hci_dev *hdev)
3895 {
3896 	struct sk_buff *skb;
3897 
3898 	skb = skb_peek(&hdev->cmd_q);
3899 	if (!skb)
3900 		return true;
3901 
3902 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3903 }
3904 
3905 static void hci_resend_last(struct hci_dev *hdev)
3906 {
3907 	struct hci_command_hdr *sent;
3908 	struct sk_buff *skb;
3909 	u16 opcode;
3910 
3911 	if (!hdev->sent_cmd)
3912 		return;
3913 
3914 	sent = (void *) hdev->sent_cmd->data;
3915 	opcode = __le16_to_cpu(sent->opcode);
3916 	if (opcode == HCI_OP_RESET)
3917 		return;
3918 
3919 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3920 	if (!skb)
3921 		return;
3922 
3923 	skb_queue_head(&hdev->cmd_q, skb);
3924 	queue_work(hdev->workqueue, &hdev->cmd_work);
3925 }
3926 
3927 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3928 			  hci_req_complete_t *req_complete,
3929 			  hci_req_complete_skb_t *req_complete_skb)
3930 {
3931 	struct sk_buff *skb;
3932 	unsigned long flags;
3933 
3934 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3935 
3936 	/* If the completed command doesn't match the last one that was
3937 	 * sent we need to do special handling of it.
3938 	 */
3939 	if (!hci_sent_cmd_data(hdev, opcode)) {
3940 		/* Some CSR based controllers generate a spontaneous
3941 		 * reset complete event during init and any pending
3942 		 * command will never be completed. In such a case we
3943 		 * need to resend whatever was the last sent
3944 		 * command.
3945 		 */
3946 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3947 			hci_resend_last(hdev);
3948 
3949 		return;
3950 	}
3951 
3952 	/* If we reach this point this event matches the last command sent */
3953 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3954 
3955 	/* If the command succeeded and there's still more commands in
3956 	 * this request the request is not yet complete.
3957 	 */
3958 	if (!status && !hci_req_is_complete(hdev))
3959 		return;
3960 
3961 	skb = hdev->req_skb;
3962 
3963 	/* If this was the last command in a request the complete
3964 	 * callback would be found in hdev->req_skb instead of the
3965 	 * command queue (hdev->cmd_q).
3966 	 */
3967 	if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3968 		*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3969 		return;
3970 	}
3971 
3972 	if (skb && bt_cb(skb)->hci.req_complete) {
3973 		*req_complete = bt_cb(skb)->hci.req_complete;
3974 		return;
3975 	}
3976 
3977 	/* Remove all pending commands belonging to this request */
3978 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3979 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3980 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3981 			__skb_queue_head(&hdev->cmd_q, skb);
3982 			break;
3983 		}
3984 
3985 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3986 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3987 		else
3988 			*req_complete = bt_cb(skb)->hci.req_complete;
3989 		dev_kfree_skb_irq(skb);
3990 	}
3991 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3992 }
3993 
3994 static void hci_rx_work(struct work_struct *work)
3995 {
3996 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3997 	struct sk_buff *skb;
3998 
3999 	BT_DBG("%s", hdev->name);
4000 
4001 	/* The kcov_remote functions used for collecting packet parsing
4002 	 * coverage information from this background thread and associate
4003 	 * the coverage with the syscall's thread which originally injected
4004 	 * the packet. This helps fuzzing the kernel.
4005 	 */
4006 	for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4007 		kcov_remote_start_common(skb_get_kcov_handle(skb));
4008 
4009 		/* Send copy to monitor */
4010 		hci_send_to_monitor(hdev, skb);
4011 
4012 		if (atomic_read(&hdev->promisc)) {
4013 			/* Send copy to the sockets */
4014 			hci_send_to_sock(hdev, skb);
4015 		}
4016 
4017 		/* If the device has been opened in HCI_USER_CHANNEL,
4018 		 * the userspace has exclusive access to device.
4019 		 * When device is HCI_INIT, we still need to process
4020 		 * the data packets to the driver in order
4021 		 * to complete its setup().
4022 		 */
4023 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4024 		    !test_bit(HCI_INIT, &hdev->flags)) {
4025 			kfree_skb(skb);
4026 			continue;
4027 		}
4028 
4029 		if (test_bit(HCI_INIT, &hdev->flags)) {
4030 			/* Don't process data packets in this states. */
4031 			switch (hci_skb_pkt_type(skb)) {
4032 			case HCI_ACLDATA_PKT:
4033 			case HCI_SCODATA_PKT:
4034 			case HCI_ISODATA_PKT:
4035 				kfree_skb(skb);
4036 				continue;
4037 			}
4038 		}
4039 
4040 		/* Process frame */
4041 		switch (hci_skb_pkt_type(skb)) {
4042 		case HCI_EVENT_PKT:
4043 			BT_DBG("%s Event packet", hdev->name);
4044 			hci_event_packet(hdev, skb);
4045 			break;
4046 
4047 		case HCI_ACLDATA_PKT:
4048 			BT_DBG("%s ACL data packet", hdev->name);
4049 			hci_acldata_packet(hdev, skb);
4050 			break;
4051 
4052 		case HCI_SCODATA_PKT:
4053 			BT_DBG("%s SCO data packet", hdev->name);
4054 			hci_scodata_packet(hdev, skb);
4055 			break;
4056 
4057 		case HCI_ISODATA_PKT:
4058 			BT_DBG("%s ISO data packet", hdev->name);
4059 			hci_isodata_packet(hdev, skb);
4060 			break;
4061 
4062 		default:
4063 			kfree_skb(skb);
4064 			break;
4065 		}
4066 	}
4067 }
4068 
4069 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4070 {
4071 	int err;
4072 
4073 	bt_dev_dbg(hdev, "skb %p", skb);
4074 
4075 	kfree_skb(hdev->sent_cmd);
4076 
4077 	hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4078 	if (!hdev->sent_cmd) {
4079 		skb_queue_head(&hdev->cmd_q, skb);
4080 		queue_work(hdev->workqueue, &hdev->cmd_work);
4081 		return;
4082 	}
4083 
4084 	if (hci_skb_opcode(skb) != HCI_OP_NOP) {
4085 		err = hci_send_frame(hdev, skb);
4086 		if (err < 0) {
4087 			hci_cmd_sync_cancel_sync(hdev, -err);
4088 			return;
4089 		}
4090 		atomic_dec(&hdev->cmd_cnt);
4091 	}
4092 
4093 	if (hdev->req_status == HCI_REQ_PEND &&
4094 	    !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4095 		kfree_skb(hdev->req_skb);
4096 		hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4097 	}
4098 }
4099 
4100 static void hci_cmd_work(struct work_struct *work)
4101 {
4102 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4103 	struct sk_buff *skb;
4104 
4105 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4106 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4107 
4108 	/* Send queued commands */
4109 	if (atomic_read(&hdev->cmd_cnt)) {
4110 		skb = skb_dequeue(&hdev->cmd_q);
4111 		if (!skb)
4112 			return;
4113 
4114 		hci_send_cmd_sync(hdev, skb);
4115 
4116 		rcu_read_lock();
4117 		if (test_bit(HCI_RESET, &hdev->flags) ||
4118 		    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4119 			cancel_delayed_work(&hdev->cmd_timer);
4120 		else
4121 			queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4122 					   HCI_CMD_TIMEOUT);
4123 		rcu_read_unlock();
4124 	}
4125 }
4126