xref: /linux/net/bluetooth/hci_core.c (revision 8195136669661fdfe54e9a8923c33b31c92fc1da)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37 
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42 
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48 #include "hci_codec.h"
49 
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
53 
54 /* HCI device list */
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
57 
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
61 
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
64 
65 /* Get HCI device by index.
66  * Device is held on return. */
67 struct hci_dev *hci_dev_get(int index)
68 {
69 	struct hci_dev *hdev = NULL, *d;
70 
71 	BT_DBG("%d", index);
72 
73 	if (index < 0)
74 		return NULL;
75 
76 	read_lock(&hci_dev_list_lock);
77 	list_for_each_entry(d, &hci_dev_list, list) {
78 		if (d->id == index) {
79 			hdev = hci_dev_hold(d);
80 			break;
81 		}
82 	}
83 	read_unlock(&hci_dev_list_lock);
84 	return hdev;
85 }
86 
87 /* ---- Inquiry support ---- */
88 
89 bool hci_discovery_active(struct hci_dev *hdev)
90 {
91 	struct discovery_state *discov = &hdev->discovery;
92 
93 	switch (discov->state) {
94 	case DISCOVERY_FINDING:
95 	case DISCOVERY_RESOLVING:
96 		return true;
97 
98 	default:
99 		return false;
100 	}
101 }
102 
103 void hci_discovery_set_state(struct hci_dev *hdev, int state)
104 {
105 	int old_state = hdev->discovery.state;
106 
107 	if (old_state == state)
108 		return;
109 
110 	hdev->discovery.state = state;
111 
112 	switch (state) {
113 	case DISCOVERY_STOPPED:
114 		hci_update_passive_scan(hdev);
115 
116 		if (old_state != DISCOVERY_STARTING)
117 			mgmt_discovering(hdev, 0);
118 		break;
119 	case DISCOVERY_STARTING:
120 		break;
121 	case DISCOVERY_FINDING:
122 		/* If discovery was not started then it was initiated by the
123 		 * MGMT interface so no MGMT event shall be generated either
124 		 */
125 		if (old_state != DISCOVERY_STARTING) {
126 			hdev->discovery.state = old_state;
127 			return;
128 		}
129 		mgmt_discovering(hdev, 1);
130 		break;
131 	case DISCOVERY_RESOLVING:
132 		break;
133 	case DISCOVERY_STOPPING:
134 		break;
135 	}
136 
137 	bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
138 }
139 
140 void hci_inquiry_cache_flush(struct hci_dev *hdev)
141 {
142 	struct discovery_state *cache = &hdev->discovery;
143 	struct inquiry_entry *p, *n;
144 
145 	list_for_each_entry_safe(p, n, &cache->all, all) {
146 		list_del(&p->all);
147 		kfree(p);
148 	}
149 
150 	INIT_LIST_HEAD(&cache->unknown);
151 	INIT_LIST_HEAD(&cache->resolve);
152 }
153 
154 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
155 					       bdaddr_t *bdaddr)
156 {
157 	struct discovery_state *cache = &hdev->discovery;
158 	struct inquiry_entry *e;
159 
160 	BT_DBG("cache %p, %pMR", cache, bdaddr);
161 
162 	list_for_each_entry(e, &cache->all, all) {
163 		if (!bacmp(&e->data.bdaddr, bdaddr))
164 			return e;
165 	}
166 
167 	return NULL;
168 }
169 
170 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
171 						       bdaddr_t *bdaddr)
172 {
173 	struct discovery_state *cache = &hdev->discovery;
174 	struct inquiry_entry *e;
175 
176 	BT_DBG("cache %p, %pMR", cache, bdaddr);
177 
178 	list_for_each_entry(e, &cache->unknown, list) {
179 		if (!bacmp(&e->data.bdaddr, bdaddr))
180 			return e;
181 	}
182 
183 	return NULL;
184 }
185 
186 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
187 						       bdaddr_t *bdaddr,
188 						       int state)
189 {
190 	struct discovery_state *cache = &hdev->discovery;
191 	struct inquiry_entry *e;
192 
193 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
194 
195 	list_for_each_entry(e, &cache->resolve, list) {
196 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
197 			return e;
198 		if (!bacmp(&e->data.bdaddr, bdaddr))
199 			return e;
200 	}
201 
202 	return NULL;
203 }
204 
205 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
206 				      struct inquiry_entry *ie)
207 {
208 	struct discovery_state *cache = &hdev->discovery;
209 	struct list_head *pos = &cache->resolve;
210 	struct inquiry_entry *p;
211 
212 	list_del(&ie->list);
213 
214 	list_for_each_entry(p, &cache->resolve, list) {
215 		if (p->name_state != NAME_PENDING &&
216 		    abs(p->data.rssi) >= abs(ie->data.rssi))
217 			break;
218 		pos = &p->list;
219 	}
220 
221 	list_add(&ie->list, pos);
222 }
223 
224 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
225 			     bool name_known)
226 {
227 	struct discovery_state *cache = &hdev->discovery;
228 	struct inquiry_entry *ie;
229 	u32 flags = 0;
230 
231 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
232 
233 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
234 
235 	if (!data->ssp_mode)
236 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
237 
238 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
239 	if (ie) {
240 		if (!ie->data.ssp_mode)
241 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
242 
243 		if (ie->name_state == NAME_NEEDED &&
244 		    data->rssi != ie->data.rssi) {
245 			ie->data.rssi = data->rssi;
246 			hci_inquiry_cache_update_resolve(hdev, ie);
247 		}
248 
249 		goto update;
250 	}
251 
252 	/* Entry not in the cache. Add new one. */
253 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
254 	if (!ie) {
255 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
256 		goto done;
257 	}
258 
259 	list_add(&ie->all, &cache->all);
260 
261 	if (name_known) {
262 		ie->name_state = NAME_KNOWN;
263 	} else {
264 		ie->name_state = NAME_NOT_KNOWN;
265 		list_add(&ie->list, &cache->unknown);
266 	}
267 
268 update:
269 	if (name_known && ie->name_state != NAME_KNOWN &&
270 	    ie->name_state != NAME_PENDING) {
271 		ie->name_state = NAME_KNOWN;
272 		list_del(&ie->list);
273 	}
274 
275 	memcpy(&ie->data, data, sizeof(*data));
276 	ie->timestamp = jiffies;
277 	cache->timestamp = jiffies;
278 
279 	if (ie->name_state == NAME_NOT_KNOWN)
280 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
281 
282 done:
283 	return flags;
284 }
285 
286 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
287 {
288 	struct discovery_state *cache = &hdev->discovery;
289 	struct inquiry_info *info = (struct inquiry_info *) buf;
290 	struct inquiry_entry *e;
291 	int copied = 0;
292 
293 	list_for_each_entry(e, &cache->all, all) {
294 		struct inquiry_data *data = &e->data;
295 
296 		if (copied >= num)
297 			break;
298 
299 		bacpy(&info->bdaddr, &data->bdaddr);
300 		info->pscan_rep_mode	= data->pscan_rep_mode;
301 		info->pscan_period_mode	= data->pscan_period_mode;
302 		info->pscan_mode	= data->pscan_mode;
303 		memcpy(info->dev_class, data->dev_class, 3);
304 		info->clock_offset	= data->clock_offset;
305 
306 		info++;
307 		copied++;
308 	}
309 
310 	BT_DBG("cache %p, copied %d", cache, copied);
311 	return copied;
312 }
313 
314 int hci_inquiry(void __user *arg)
315 {
316 	__u8 __user *ptr = arg;
317 	struct hci_inquiry_req ir;
318 	struct hci_dev *hdev;
319 	int err = 0, do_inquiry = 0, max_rsp;
320 	__u8 *buf;
321 
322 	if (copy_from_user(&ir, ptr, sizeof(ir)))
323 		return -EFAULT;
324 
325 	hdev = hci_dev_get(ir.dev_id);
326 	if (!hdev)
327 		return -ENODEV;
328 
329 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
330 		err = -EBUSY;
331 		goto done;
332 	}
333 
334 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
335 		err = -EOPNOTSUPP;
336 		goto done;
337 	}
338 
339 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
340 		err = -EOPNOTSUPP;
341 		goto done;
342 	}
343 
344 	/* Restrict maximum inquiry length to 60 seconds */
345 	if (ir.length > 60) {
346 		err = -EINVAL;
347 		goto done;
348 	}
349 
350 	hci_dev_lock(hdev);
351 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
352 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
353 		hci_inquiry_cache_flush(hdev);
354 		do_inquiry = 1;
355 	}
356 	hci_dev_unlock(hdev);
357 
358 	if (do_inquiry) {
359 		hci_req_sync_lock(hdev);
360 		err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
361 		hci_req_sync_unlock(hdev);
362 
363 		if (err < 0)
364 			goto done;
365 
366 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
367 		 * cleared). If it is interrupted by a signal, return -EINTR.
368 		 */
369 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
370 				TASK_INTERRUPTIBLE)) {
371 			err = -EINTR;
372 			goto done;
373 		}
374 	}
375 
376 	/* for unlimited number of responses we will use buffer with
377 	 * 255 entries
378 	 */
379 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
380 
381 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
382 	 * copy it to the user space.
383 	 */
384 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
385 	if (!buf) {
386 		err = -ENOMEM;
387 		goto done;
388 	}
389 
390 	hci_dev_lock(hdev);
391 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
392 	hci_dev_unlock(hdev);
393 
394 	BT_DBG("num_rsp %d", ir.num_rsp);
395 
396 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
397 		ptr += sizeof(ir);
398 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
399 				 ir.num_rsp))
400 			err = -EFAULT;
401 	} else
402 		err = -EFAULT;
403 
404 	kfree(buf);
405 
406 done:
407 	hci_dev_put(hdev);
408 	return err;
409 }
410 
411 static int hci_dev_do_open(struct hci_dev *hdev)
412 {
413 	int ret = 0;
414 
415 	BT_DBG("%s %p", hdev->name, hdev);
416 
417 	hci_req_sync_lock(hdev);
418 
419 	ret = hci_dev_open_sync(hdev);
420 
421 	hci_req_sync_unlock(hdev);
422 	return ret;
423 }
424 
425 /* ---- HCI ioctl helpers ---- */
426 
427 int hci_dev_open(__u16 dev)
428 {
429 	struct hci_dev *hdev;
430 	int err;
431 
432 	hdev = hci_dev_get(dev);
433 	if (!hdev)
434 		return -ENODEV;
435 
436 	/* Devices that are marked as unconfigured can only be powered
437 	 * up as user channel. Trying to bring them up as normal devices
438 	 * will result into a failure. Only user channel operation is
439 	 * possible.
440 	 *
441 	 * When this function is called for a user channel, the flag
442 	 * HCI_USER_CHANNEL will be set first before attempting to
443 	 * open the device.
444 	 */
445 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
446 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
447 		err = -EOPNOTSUPP;
448 		goto done;
449 	}
450 
451 	/* We need to ensure that no other power on/off work is pending
452 	 * before proceeding to call hci_dev_do_open. This is
453 	 * particularly important if the setup procedure has not yet
454 	 * completed.
455 	 */
456 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
457 		cancel_delayed_work(&hdev->power_off);
458 
459 	/* After this call it is guaranteed that the setup procedure
460 	 * has finished. This means that error conditions like RFKILL
461 	 * or no valid public or static random address apply.
462 	 */
463 	flush_workqueue(hdev->req_workqueue);
464 
465 	/* For controllers not using the management interface and that
466 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
467 	 * so that pairing works for them. Once the management interface
468 	 * is in use this bit will be cleared again and userspace has
469 	 * to explicitly enable it.
470 	 */
471 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
472 	    !hci_dev_test_flag(hdev, HCI_MGMT))
473 		hci_dev_set_flag(hdev, HCI_BONDABLE);
474 
475 	err = hci_dev_do_open(hdev);
476 
477 done:
478 	hci_dev_put(hdev);
479 	return err;
480 }
481 
482 int hci_dev_do_close(struct hci_dev *hdev)
483 {
484 	int err;
485 
486 	BT_DBG("%s %p", hdev->name, hdev);
487 
488 	hci_req_sync_lock(hdev);
489 
490 	err = hci_dev_close_sync(hdev);
491 
492 	hci_req_sync_unlock(hdev);
493 
494 	return err;
495 }
496 
497 int hci_dev_close(__u16 dev)
498 {
499 	struct hci_dev *hdev;
500 	int err;
501 
502 	hdev = hci_dev_get(dev);
503 	if (!hdev)
504 		return -ENODEV;
505 
506 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
507 		err = -EBUSY;
508 		goto done;
509 	}
510 
511 	cancel_work_sync(&hdev->power_on);
512 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
513 		cancel_delayed_work(&hdev->power_off);
514 
515 	err = hci_dev_do_close(hdev);
516 
517 done:
518 	hci_dev_put(hdev);
519 	return err;
520 }
521 
522 static int hci_dev_do_reset(struct hci_dev *hdev)
523 {
524 	int ret;
525 
526 	BT_DBG("%s %p", hdev->name, hdev);
527 
528 	hci_req_sync_lock(hdev);
529 
530 	/* Drop queues */
531 	skb_queue_purge(&hdev->rx_q);
532 	skb_queue_purge(&hdev->cmd_q);
533 
534 	/* Cancel these to avoid queueing non-chained pending work */
535 	hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
536 	/* Wait for
537 	 *
538 	 *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
539 	 *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
540 	 *
541 	 * inside RCU section to see the flag or complete scheduling.
542 	 */
543 	synchronize_rcu();
544 	/* Explicitly cancel works in case scheduled after setting the flag. */
545 	cancel_delayed_work(&hdev->cmd_timer);
546 	cancel_delayed_work(&hdev->ncmd_timer);
547 
548 	/* Avoid potential lockdep warnings from the *_flush() calls by
549 	 * ensuring the workqueue is empty up front.
550 	 */
551 	drain_workqueue(hdev->workqueue);
552 
553 	hci_dev_lock(hdev);
554 	hci_inquiry_cache_flush(hdev);
555 	hci_conn_hash_flush(hdev);
556 	hci_dev_unlock(hdev);
557 
558 	if (hdev->flush)
559 		hdev->flush(hdev);
560 
561 	hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
562 
563 	atomic_set(&hdev->cmd_cnt, 1);
564 	hdev->acl_cnt = 0;
565 	hdev->sco_cnt = 0;
566 	hdev->le_cnt = 0;
567 	hdev->iso_cnt = 0;
568 
569 	ret = hci_reset_sync(hdev);
570 
571 	hci_req_sync_unlock(hdev);
572 	return ret;
573 }
574 
575 int hci_dev_reset(__u16 dev)
576 {
577 	struct hci_dev *hdev;
578 	int err;
579 
580 	hdev = hci_dev_get(dev);
581 	if (!hdev)
582 		return -ENODEV;
583 
584 	if (!test_bit(HCI_UP, &hdev->flags)) {
585 		err = -ENETDOWN;
586 		goto done;
587 	}
588 
589 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
590 		err = -EBUSY;
591 		goto done;
592 	}
593 
594 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
595 		err = -EOPNOTSUPP;
596 		goto done;
597 	}
598 
599 	err = hci_dev_do_reset(hdev);
600 
601 done:
602 	hci_dev_put(hdev);
603 	return err;
604 }
605 
606 int hci_dev_reset_stat(__u16 dev)
607 {
608 	struct hci_dev *hdev;
609 	int ret = 0;
610 
611 	hdev = hci_dev_get(dev);
612 	if (!hdev)
613 		return -ENODEV;
614 
615 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
616 		ret = -EBUSY;
617 		goto done;
618 	}
619 
620 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
621 		ret = -EOPNOTSUPP;
622 		goto done;
623 	}
624 
625 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
626 
627 done:
628 	hci_dev_put(hdev);
629 	return ret;
630 }
631 
632 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
633 {
634 	bool conn_changed, discov_changed;
635 
636 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
637 
638 	if ((scan & SCAN_PAGE))
639 		conn_changed = !hci_dev_test_and_set_flag(hdev,
640 							  HCI_CONNECTABLE);
641 	else
642 		conn_changed = hci_dev_test_and_clear_flag(hdev,
643 							   HCI_CONNECTABLE);
644 
645 	if ((scan & SCAN_INQUIRY)) {
646 		discov_changed = !hci_dev_test_and_set_flag(hdev,
647 							    HCI_DISCOVERABLE);
648 	} else {
649 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
650 		discov_changed = hci_dev_test_and_clear_flag(hdev,
651 							     HCI_DISCOVERABLE);
652 	}
653 
654 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
655 		return;
656 
657 	if (conn_changed || discov_changed) {
658 		/* In case this was disabled through mgmt */
659 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
660 
661 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
662 			hci_update_adv_data(hdev, hdev->cur_adv_instance);
663 
664 		mgmt_new_settings(hdev);
665 	}
666 }
667 
668 int hci_dev_cmd(unsigned int cmd, void __user *arg)
669 {
670 	struct hci_dev *hdev;
671 	struct hci_dev_req dr;
672 	__le16 policy;
673 	int err = 0;
674 
675 	if (copy_from_user(&dr, arg, sizeof(dr)))
676 		return -EFAULT;
677 
678 	hdev = hci_dev_get(dr.dev_id);
679 	if (!hdev)
680 		return -ENODEV;
681 
682 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
683 		err = -EBUSY;
684 		goto done;
685 	}
686 
687 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
688 		err = -EOPNOTSUPP;
689 		goto done;
690 	}
691 
692 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
693 		err = -EOPNOTSUPP;
694 		goto done;
695 	}
696 
697 	switch (cmd) {
698 	case HCISETAUTH:
699 		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
700 					  1, &dr.dev_opt, HCI_CMD_TIMEOUT);
701 		break;
702 
703 	case HCISETENCRYPT:
704 		if (!lmp_encrypt_capable(hdev)) {
705 			err = -EOPNOTSUPP;
706 			break;
707 		}
708 
709 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
710 			/* Auth must be enabled first */
711 			err = hci_cmd_sync_status(hdev,
712 						  HCI_OP_WRITE_AUTH_ENABLE,
713 						  1, &dr.dev_opt,
714 						  HCI_CMD_TIMEOUT);
715 			if (err)
716 				break;
717 		}
718 
719 		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
720 					  1, &dr.dev_opt, HCI_CMD_TIMEOUT);
721 		break;
722 
723 	case HCISETSCAN:
724 		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
725 					  1, &dr.dev_opt, HCI_CMD_TIMEOUT);
726 
727 		/* Ensure that the connectable and discoverable states
728 		 * get correctly modified as this was a non-mgmt change.
729 		 */
730 		if (!err)
731 			hci_update_passive_scan_state(hdev, dr.dev_opt);
732 		break;
733 
734 	case HCISETLINKPOL:
735 		policy = cpu_to_le16(dr.dev_opt);
736 
737 		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
738 					  2, &policy, HCI_CMD_TIMEOUT);
739 		break;
740 
741 	case HCISETLINKMODE:
742 		hdev->link_mode = ((__u16) dr.dev_opt) &
743 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
744 		break;
745 
746 	case HCISETPTYPE:
747 		if (hdev->pkt_type == (__u16) dr.dev_opt)
748 			break;
749 
750 		hdev->pkt_type = (__u16) dr.dev_opt;
751 		mgmt_phy_configuration_changed(hdev, NULL);
752 		break;
753 
754 	case HCISETACLMTU:
755 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
756 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
757 		break;
758 
759 	case HCISETSCOMTU:
760 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
761 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
762 		break;
763 
764 	default:
765 		err = -EINVAL;
766 		break;
767 	}
768 
769 done:
770 	hci_dev_put(hdev);
771 	return err;
772 }
773 
774 int hci_get_dev_list(void __user *arg)
775 {
776 	struct hci_dev *hdev;
777 	struct hci_dev_list_req *dl;
778 	struct hci_dev_req *dr;
779 	int n = 0, err;
780 	__u16 dev_num;
781 
782 	if (get_user(dev_num, (__u16 __user *) arg))
783 		return -EFAULT;
784 
785 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
786 		return -EINVAL;
787 
788 	dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
789 	if (!dl)
790 		return -ENOMEM;
791 
792 	dl->dev_num = dev_num;
793 	dr = dl->dev_req;
794 
795 	read_lock(&hci_dev_list_lock);
796 	list_for_each_entry(hdev, &hci_dev_list, list) {
797 		unsigned long flags = hdev->flags;
798 
799 		/* When the auto-off is configured it means the transport
800 		 * is running, but in that case still indicate that the
801 		 * device is actually down.
802 		 */
803 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
804 			flags &= ~BIT(HCI_UP);
805 
806 		dr[n].dev_id  = hdev->id;
807 		dr[n].dev_opt = flags;
808 
809 		if (++n >= dev_num)
810 			break;
811 	}
812 	read_unlock(&hci_dev_list_lock);
813 
814 	dl->dev_num = n;
815 	err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
816 	kfree(dl);
817 
818 	return err ? -EFAULT : 0;
819 }
820 
821 int hci_get_dev_info(void __user *arg)
822 {
823 	struct hci_dev *hdev;
824 	struct hci_dev_info di;
825 	unsigned long flags;
826 	int err = 0;
827 
828 	if (copy_from_user(&di, arg, sizeof(di)))
829 		return -EFAULT;
830 
831 	hdev = hci_dev_get(di.dev_id);
832 	if (!hdev)
833 		return -ENODEV;
834 
835 	/* When the auto-off is configured it means the transport
836 	 * is running, but in that case still indicate that the
837 	 * device is actually down.
838 	 */
839 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
840 		flags = hdev->flags & ~BIT(HCI_UP);
841 	else
842 		flags = hdev->flags;
843 
844 	strscpy(di.name, hdev->name, sizeof(di.name));
845 	di.bdaddr   = hdev->bdaddr;
846 	di.type     = (hdev->bus & 0x0f);
847 	di.flags    = flags;
848 	di.pkt_type = hdev->pkt_type;
849 	if (lmp_bredr_capable(hdev)) {
850 		di.acl_mtu  = hdev->acl_mtu;
851 		di.acl_pkts = hdev->acl_pkts;
852 		di.sco_mtu  = hdev->sco_mtu;
853 		di.sco_pkts = hdev->sco_pkts;
854 	} else {
855 		di.acl_mtu  = hdev->le_mtu;
856 		di.acl_pkts = hdev->le_pkts;
857 		di.sco_mtu  = 0;
858 		di.sco_pkts = 0;
859 	}
860 	di.link_policy = hdev->link_policy;
861 	di.link_mode   = hdev->link_mode;
862 
863 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
864 	memcpy(&di.features, &hdev->features, sizeof(di.features));
865 
866 	if (copy_to_user(arg, &di, sizeof(di)))
867 		err = -EFAULT;
868 
869 	hci_dev_put(hdev);
870 
871 	return err;
872 }
873 
874 /* ---- Interface to HCI drivers ---- */
875 
876 static int hci_dev_do_poweroff(struct hci_dev *hdev)
877 {
878 	int err;
879 
880 	BT_DBG("%s %p", hdev->name, hdev);
881 
882 	hci_req_sync_lock(hdev);
883 
884 	err = hci_set_powered_sync(hdev, false);
885 
886 	hci_req_sync_unlock(hdev);
887 
888 	return err;
889 }
890 
891 static int hci_rfkill_set_block(void *data, bool blocked)
892 {
893 	struct hci_dev *hdev = data;
894 	int err;
895 
896 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
897 
898 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
899 		return -EBUSY;
900 
901 	if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
902 		return 0;
903 
904 	if (blocked) {
905 		hci_dev_set_flag(hdev, HCI_RFKILLED);
906 
907 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
908 		    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
909 			err = hci_dev_do_poweroff(hdev);
910 			if (err) {
911 				bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
912 					   err);
913 
914 				/* Make sure the device is still closed even if
915 				 * anything during power off sequence (eg.
916 				 * disconnecting devices) failed.
917 				 */
918 				hci_dev_do_close(hdev);
919 			}
920 		}
921 	} else {
922 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
923 	}
924 
925 	return 0;
926 }
927 
928 static const struct rfkill_ops hci_rfkill_ops = {
929 	.set_block = hci_rfkill_set_block,
930 };
931 
932 static void hci_power_on(struct work_struct *work)
933 {
934 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
935 	int err;
936 
937 	BT_DBG("%s", hdev->name);
938 
939 	if (test_bit(HCI_UP, &hdev->flags) &&
940 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
941 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
942 		cancel_delayed_work(&hdev->power_off);
943 		err = hci_powered_update_sync(hdev);
944 		mgmt_power_on(hdev, err);
945 		return;
946 	}
947 
948 	err = hci_dev_do_open(hdev);
949 	if (err < 0) {
950 		hci_dev_lock(hdev);
951 		mgmt_set_powered_failed(hdev, err);
952 		hci_dev_unlock(hdev);
953 		return;
954 	}
955 
956 	/* During the HCI setup phase, a few error conditions are
957 	 * ignored and they need to be checked now. If they are still
958 	 * valid, it is important to turn the device back off.
959 	 */
960 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
961 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
962 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
963 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
964 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
965 		hci_dev_do_close(hdev);
966 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
967 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
968 				   HCI_AUTO_OFF_TIMEOUT);
969 	}
970 
971 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
972 		/* For unconfigured devices, set the HCI_RAW flag
973 		 * so that userspace can easily identify them.
974 		 */
975 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
976 			set_bit(HCI_RAW, &hdev->flags);
977 
978 		/* For fully configured devices, this will send
979 		 * the Index Added event. For unconfigured devices,
980 		 * it will send Unconfigued Index Added event.
981 		 *
982 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
983 		 * and no event will be send.
984 		 */
985 		mgmt_index_added(hdev);
986 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
987 		/* When the controller is now configured, then it
988 		 * is important to clear the HCI_RAW flag.
989 		 */
990 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
991 			clear_bit(HCI_RAW, &hdev->flags);
992 
993 		/* Powering on the controller with HCI_CONFIG set only
994 		 * happens with the transition from unconfigured to
995 		 * configured. This will send the Index Added event.
996 		 */
997 		mgmt_index_added(hdev);
998 	}
999 }
1000 
1001 static void hci_power_off(struct work_struct *work)
1002 {
1003 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1004 					    power_off.work);
1005 
1006 	BT_DBG("%s", hdev->name);
1007 
1008 	hci_dev_do_close(hdev);
1009 }
1010 
1011 static void hci_error_reset(struct work_struct *work)
1012 {
1013 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1014 
1015 	hci_dev_hold(hdev);
1016 	BT_DBG("%s", hdev->name);
1017 
1018 	if (hdev->hw_error)
1019 		hdev->hw_error(hdev, hdev->hw_error_code);
1020 	else
1021 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1022 
1023 	if (!hci_dev_do_close(hdev))
1024 		hci_dev_do_open(hdev);
1025 
1026 	hci_dev_put(hdev);
1027 }
1028 
1029 void hci_uuids_clear(struct hci_dev *hdev)
1030 {
1031 	struct bt_uuid *uuid, *tmp;
1032 
1033 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1034 		list_del(&uuid->list);
1035 		kfree(uuid);
1036 	}
1037 }
1038 
1039 void hci_link_keys_clear(struct hci_dev *hdev)
1040 {
1041 	struct link_key *key, *tmp;
1042 
1043 	list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1044 		list_del_rcu(&key->list);
1045 		kfree_rcu(key, rcu);
1046 	}
1047 }
1048 
1049 void hci_smp_ltks_clear(struct hci_dev *hdev)
1050 {
1051 	struct smp_ltk *k, *tmp;
1052 
1053 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1054 		list_del_rcu(&k->list);
1055 		kfree_rcu(k, rcu);
1056 	}
1057 }
1058 
1059 void hci_smp_irks_clear(struct hci_dev *hdev)
1060 {
1061 	struct smp_irk *k, *tmp;
1062 
1063 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1064 		list_del_rcu(&k->list);
1065 		kfree_rcu(k, rcu);
1066 	}
1067 }
1068 
1069 void hci_blocked_keys_clear(struct hci_dev *hdev)
1070 {
1071 	struct blocked_key *b, *tmp;
1072 
1073 	list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1074 		list_del_rcu(&b->list);
1075 		kfree_rcu(b, rcu);
1076 	}
1077 }
1078 
1079 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1080 {
1081 	bool blocked = false;
1082 	struct blocked_key *b;
1083 
1084 	rcu_read_lock();
1085 	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1086 		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1087 			blocked = true;
1088 			break;
1089 		}
1090 	}
1091 
1092 	rcu_read_unlock();
1093 	return blocked;
1094 }
1095 
1096 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1097 {
1098 	struct link_key *k;
1099 
1100 	rcu_read_lock();
1101 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1102 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
1103 			rcu_read_unlock();
1104 
1105 			if (hci_is_blocked_key(hdev,
1106 					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
1107 					       k->val)) {
1108 				bt_dev_warn_ratelimited(hdev,
1109 							"Link key blocked for %pMR",
1110 							&k->bdaddr);
1111 				return NULL;
1112 			}
1113 
1114 			return k;
1115 		}
1116 	}
1117 	rcu_read_unlock();
1118 
1119 	return NULL;
1120 }
1121 
1122 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1123 			       u8 key_type, u8 old_key_type)
1124 {
1125 	/* Legacy key */
1126 	if (key_type < 0x03)
1127 		return true;
1128 
1129 	/* Debug keys are insecure so don't store them persistently */
1130 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1131 		return false;
1132 
1133 	/* Changed combination key and there's no previous one */
1134 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1135 		return false;
1136 
1137 	/* Security mode 3 case */
1138 	if (!conn)
1139 		return true;
1140 
1141 	/* BR/EDR key derived using SC from an LE link */
1142 	if (conn->type == LE_LINK)
1143 		return true;
1144 
1145 	/* Neither local nor remote side had no-bonding as requirement */
1146 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1147 		return true;
1148 
1149 	/* Local side had dedicated bonding as requirement */
1150 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1151 		return true;
1152 
1153 	/* Remote side had dedicated bonding as requirement */
1154 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1155 		return true;
1156 
1157 	/* If none of the above criteria match, then don't store the key
1158 	 * persistently */
1159 	return false;
1160 }
1161 
1162 static u8 ltk_role(u8 type)
1163 {
1164 	if (type == SMP_LTK)
1165 		return HCI_ROLE_MASTER;
1166 
1167 	return HCI_ROLE_SLAVE;
1168 }
1169 
1170 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1171 			     u8 addr_type, u8 role)
1172 {
1173 	struct smp_ltk *k;
1174 
1175 	rcu_read_lock();
1176 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1177 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1178 			continue;
1179 
1180 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1181 			rcu_read_unlock();
1182 
1183 			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1184 					       k->val)) {
1185 				bt_dev_warn_ratelimited(hdev,
1186 							"LTK blocked for %pMR",
1187 							&k->bdaddr);
1188 				return NULL;
1189 			}
1190 
1191 			return k;
1192 		}
1193 	}
1194 	rcu_read_unlock();
1195 
1196 	return NULL;
1197 }
1198 
1199 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1200 {
1201 	struct smp_irk *irk_to_return = NULL;
1202 	struct smp_irk *irk;
1203 
1204 	rcu_read_lock();
1205 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1206 		if (!bacmp(&irk->rpa, rpa)) {
1207 			irk_to_return = irk;
1208 			goto done;
1209 		}
1210 	}
1211 
1212 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1213 		if (smp_irk_matches(hdev, irk->val, rpa)) {
1214 			bacpy(&irk->rpa, rpa);
1215 			irk_to_return = irk;
1216 			goto done;
1217 		}
1218 	}
1219 
1220 done:
1221 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1222 						irk_to_return->val)) {
1223 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1224 					&irk_to_return->bdaddr);
1225 		irk_to_return = NULL;
1226 	}
1227 
1228 	rcu_read_unlock();
1229 
1230 	return irk_to_return;
1231 }
1232 
1233 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1234 				     u8 addr_type)
1235 {
1236 	struct smp_irk *irk_to_return = NULL;
1237 	struct smp_irk *irk;
1238 
1239 	/* Identity Address must be public or static random */
1240 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1241 		return NULL;
1242 
1243 	rcu_read_lock();
1244 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1245 		if (addr_type == irk->addr_type &&
1246 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
1247 			irk_to_return = irk;
1248 			goto done;
1249 		}
1250 	}
1251 
1252 done:
1253 
1254 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1255 						irk_to_return->val)) {
1256 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1257 					&irk_to_return->bdaddr);
1258 		irk_to_return = NULL;
1259 	}
1260 
1261 	rcu_read_unlock();
1262 
1263 	return irk_to_return;
1264 }
1265 
1266 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1267 				  bdaddr_t *bdaddr, u8 *val, u8 type,
1268 				  u8 pin_len, bool *persistent)
1269 {
1270 	struct link_key *key, *old_key;
1271 	u8 old_key_type;
1272 
1273 	old_key = hci_find_link_key(hdev, bdaddr);
1274 	if (old_key) {
1275 		old_key_type = old_key->type;
1276 		key = old_key;
1277 	} else {
1278 		old_key_type = conn ? conn->key_type : 0xff;
1279 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1280 		if (!key)
1281 			return NULL;
1282 		list_add_rcu(&key->list, &hdev->link_keys);
1283 	}
1284 
1285 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1286 
1287 	/* Some buggy controller combinations generate a changed
1288 	 * combination key for legacy pairing even when there's no
1289 	 * previous key */
1290 	if (type == HCI_LK_CHANGED_COMBINATION &&
1291 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1292 		type = HCI_LK_COMBINATION;
1293 		if (conn)
1294 			conn->key_type = type;
1295 	}
1296 
1297 	bacpy(&key->bdaddr, bdaddr);
1298 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1299 	key->pin_len = pin_len;
1300 
1301 	if (type == HCI_LK_CHANGED_COMBINATION)
1302 		key->type = old_key_type;
1303 	else
1304 		key->type = type;
1305 
1306 	if (persistent)
1307 		*persistent = hci_persistent_key(hdev, conn, type,
1308 						 old_key_type);
1309 
1310 	return key;
1311 }
1312 
1313 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1314 			    u8 addr_type, u8 type, u8 authenticated,
1315 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1316 {
1317 	struct smp_ltk *key, *old_key;
1318 	u8 role = ltk_role(type);
1319 
1320 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1321 	if (old_key)
1322 		key = old_key;
1323 	else {
1324 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1325 		if (!key)
1326 			return NULL;
1327 		list_add_rcu(&key->list, &hdev->long_term_keys);
1328 	}
1329 
1330 	bacpy(&key->bdaddr, bdaddr);
1331 	key->bdaddr_type = addr_type;
1332 	memcpy(key->val, tk, sizeof(key->val));
1333 	key->authenticated = authenticated;
1334 	key->ediv = ediv;
1335 	key->rand = rand;
1336 	key->enc_size = enc_size;
1337 	key->type = type;
1338 
1339 	return key;
1340 }
1341 
1342 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1343 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
1344 {
1345 	struct smp_irk *irk;
1346 
1347 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1348 	if (!irk) {
1349 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1350 		if (!irk)
1351 			return NULL;
1352 
1353 		bacpy(&irk->bdaddr, bdaddr);
1354 		irk->addr_type = addr_type;
1355 
1356 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1357 	}
1358 
1359 	memcpy(irk->val, val, 16);
1360 	bacpy(&irk->rpa, rpa);
1361 
1362 	return irk;
1363 }
1364 
1365 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1366 {
1367 	struct link_key *key;
1368 
1369 	key = hci_find_link_key(hdev, bdaddr);
1370 	if (!key)
1371 		return -ENOENT;
1372 
1373 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1374 
1375 	list_del_rcu(&key->list);
1376 	kfree_rcu(key, rcu);
1377 
1378 	return 0;
1379 }
1380 
1381 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1382 {
1383 	struct smp_ltk *k, *tmp;
1384 	int removed = 0;
1385 
1386 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1387 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1388 			continue;
1389 
1390 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1391 
1392 		list_del_rcu(&k->list);
1393 		kfree_rcu(k, rcu);
1394 		removed++;
1395 	}
1396 
1397 	return removed ? 0 : -ENOENT;
1398 }
1399 
1400 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1401 {
1402 	struct smp_irk *k, *tmp;
1403 
1404 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1405 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1406 			continue;
1407 
1408 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1409 
1410 		list_del_rcu(&k->list);
1411 		kfree_rcu(k, rcu);
1412 	}
1413 }
1414 
1415 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1416 {
1417 	struct smp_ltk *k;
1418 	struct smp_irk *irk;
1419 	u8 addr_type;
1420 
1421 	if (type == BDADDR_BREDR) {
1422 		if (hci_find_link_key(hdev, bdaddr))
1423 			return true;
1424 		return false;
1425 	}
1426 
1427 	/* Convert to HCI addr type which struct smp_ltk uses */
1428 	if (type == BDADDR_LE_PUBLIC)
1429 		addr_type = ADDR_LE_DEV_PUBLIC;
1430 	else
1431 		addr_type = ADDR_LE_DEV_RANDOM;
1432 
1433 	irk = hci_get_irk(hdev, bdaddr, addr_type);
1434 	if (irk) {
1435 		bdaddr = &irk->bdaddr;
1436 		addr_type = irk->addr_type;
1437 	}
1438 
1439 	rcu_read_lock();
1440 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1441 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1442 			rcu_read_unlock();
1443 			return true;
1444 		}
1445 	}
1446 	rcu_read_unlock();
1447 
1448 	return false;
1449 }
1450 
1451 /* HCI command timer function */
1452 static void hci_cmd_timeout(struct work_struct *work)
1453 {
1454 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1455 					    cmd_timer.work);
1456 
1457 	if (hdev->req_skb) {
1458 		u16 opcode = hci_skb_opcode(hdev->req_skb);
1459 
1460 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1461 
1462 		hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1463 	} else {
1464 		bt_dev_err(hdev, "command tx timeout");
1465 	}
1466 
1467 	if (hdev->cmd_timeout)
1468 		hdev->cmd_timeout(hdev);
1469 
1470 	atomic_set(&hdev->cmd_cnt, 1);
1471 	queue_work(hdev->workqueue, &hdev->cmd_work);
1472 }
1473 
1474 /* HCI ncmd timer function */
1475 static void hci_ncmd_timeout(struct work_struct *work)
1476 {
1477 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1478 					    ncmd_timer.work);
1479 
1480 	bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1481 
1482 	/* During HCI_INIT phase no events can be injected if the ncmd timer
1483 	 * triggers since the procedure has its own timeout handling.
1484 	 */
1485 	if (test_bit(HCI_INIT, &hdev->flags))
1486 		return;
1487 
1488 	/* This is an irrecoverable state, inject hardware error event */
1489 	hci_reset_dev(hdev);
1490 }
1491 
1492 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1493 					  bdaddr_t *bdaddr, u8 bdaddr_type)
1494 {
1495 	struct oob_data *data;
1496 
1497 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
1498 		if (bacmp(bdaddr, &data->bdaddr) != 0)
1499 			continue;
1500 		if (data->bdaddr_type != bdaddr_type)
1501 			continue;
1502 		return data;
1503 	}
1504 
1505 	return NULL;
1506 }
1507 
1508 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1509 			       u8 bdaddr_type)
1510 {
1511 	struct oob_data *data;
1512 
1513 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1514 	if (!data)
1515 		return -ENOENT;
1516 
1517 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1518 
1519 	list_del(&data->list);
1520 	kfree(data);
1521 
1522 	return 0;
1523 }
1524 
1525 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1526 {
1527 	struct oob_data *data, *n;
1528 
1529 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1530 		list_del(&data->list);
1531 		kfree(data);
1532 	}
1533 }
1534 
1535 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1536 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1537 			    u8 *hash256, u8 *rand256)
1538 {
1539 	struct oob_data *data;
1540 
1541 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1542 	if (!data) {
1543 		data = kmalloc(sizeof(*data), GFP_KERNEL);
1544 		if (!data)
1545 			return -ENOMEM;
1546 
1547 		bacpy(&data->bdaddr, bdaddr);
1548 		data->bdaddr_type = bdaddr_type;
1549 		list_add(&data->list, &hdev->remote_oob_data);
1550 	}
1551 
1552 	if (hash192 && rand192) {
1553 		memcpy(data->hash192, hash192, sizeof(data->hash192));
1554 		memcpy(data->rand192, rand192, sizeof(data->rand192));
1555 		if (hash256 && rand256)
1556 			data->present = 0x03;
1557 	} else {
1558 		memset(data->hash192, 0, sizeof(data->hash192));
1559 		memset(data->rand192, 0, sizeof(data->rand192));
1560 		if (hash256 && rand256)
1561 			data->present = 0x02;
1562 		else
1563 			data->present = 0x00;
1564 	}
1565 
1566 	if (hash256 && rand256) {
1567 		memcpy(data->hash256, hash256, sizeof(data->hash256));
1568 		memcpy(data->rand256, rand256, sizeof(data->rand256));
1569 	} else {
1570 		memset(data->hash256, 0, sizeof(data->hash256));
1571 		memset(data->rand256, 0, sizeof(data->rand256));
1572 		if (hash192 && rand192)
1573 			data->present = 0x01;
1574 	}
1575 
1576 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1577 
1578 	return 0;
1579 }
1580 
1581 /* This function requires the caller holds hdev->lock */
1582 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1583 {
1584 	struct adv_info *adv_instance;
1585 
1586 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1587 		if (adv_instance->instance == instance)
1588 			return adv_instance;
1589 	}
1590 
1591 	return NULL;
1592 }
1593 
1594 /* This function requires the caller holds hdev->lock */
1595 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1596 {
1597 	struct adv_info *cur_instance;
1598 
1599 	cur_instance = hci_find_adv_instance(hdev, instance);
1600 	if (!cur_instance)
1601 		return NULL;
1602 
1603 	if (cur_instance == list_last_entry(&hdev->adv_instances,
1604 					    struct adv_info, list))
1605 		return list_first_entry(&hdev->adv_instances,
1606 						 struct adv_info, list);
1607 	else
1608 		return list_next_entry(cur_instance, list);
1609 }
1610 
1611 /* This function requires the caller holds hdev->lock */
1612 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1613 {
1614 	struct adv_info *adv_instance;
1615 
1616 	adv_instance = hci_find_adv_instance(hdev, instance);
1617 	if (!adv_instance)
1618 		return -ENOENT;
1619 
1620 	BT_DBG("%s removing %dMR", hdev->name, instance);
1621 
1622 	if (hdev->cur_adv_instance == instance) {
1623 		if (hdev->adv_instance_timeout) {
1624 			cancel_delayed_work(&hdev->adv_instance_expire);
1625 			hdev->adv_instance_timeout = 0;
1626 		}
1627 		hdev->cur_adv_instance = 0x00;
1628 	}
1629 
1630 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1631 
1632 	list_del(&adv_instance->list);
1633 	kfree(adv_instance);
1634 
1635 	hdev->adv_instance_cnt--;
1636 
1637 	return 0;
1638 }
1639 
1640 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1641 {
1642 	struct adv_info *adv_instance, *n;
1643 
1644 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1645 		adv_instance->rpa_expired = rpa_expired;
1646 }
1647 
1648 /* This function requires the caller holds hdev->lock */
1649 void hci_adv_instances_clear(struct hci_dev *hdev)
1650 {
1651 	struct adv_info *adv_instance, *n;
1652 
1653 	if (hdev->adv_instance_timeout) {
1654 		cancel_delayed_work(&hdev->adv_instance_expire);
1655 		hdev->adv_instance_timeout = 0;
1656 	}
1657 
1658 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1659 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1660 		list_del(&adv_instance->list);
1661 		kfree(adv_instance);
1662 	}
1663 
1664 	hdev->adv_instance_cnt = 0;
1665 	hdev->cur_adv_instance = 0x00;
1666 }
1667 
1668 static void adv_instance_rpa_expired(struct work_struct *work)
1669 {
1670 	struct adv_info *adv_instance = container_of(work, struct adv_info,
1671 						     rpa_expired_cb.work);
1672 
1673 	BT_DBG("");
1674 
1675 	adv_instance->rpa_expired = true;
1676 }
1677 
1678 /* This function requires the caller holds hdev->lock */
1679 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1680 				      u32 flags, u16 adv_data_len, u8 *adv_data,
1681 				      u16 scan_rsp_len, u8 *scan_rsp_data,
1682 				      u16 timeout, u16 duration, s8 tx_power,
1683 				      u32 min_interval, u32 max_interval,
1684 				      u8 mesh_handle)
1685 {
1686 	struct adv_info *adv;
1687 
1688 	adv = hci_find_adv_instance(hdev, instance);
1689 	if (adv) {
1690 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1691 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1692 		memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1693 	} else {
1694 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1695 		    instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1696 			return ERR_PTR(-EOVERFLOW);
1697 
1698 		adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1699 		if (!adv)
1700 			return ERR_PTR(-ENOMEM);
1701 
1702 		adv->pending = true;
1703 		adv->instance = instance;
1704 
1705 		/* If controller support only one set and the instance is set to
1706 		 * 1 then there is no option other than using handle 0x00.
1707 		 */
1708 		if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1709 			adv->handle = 0x00;
1710 		else
1711 			adv->handle = instance;
1712 
1713 		list_add(&adv->list, &hdev->adv_instances);
1714 		hdev->adv_instance_cnt++;
1715 	}
1716 
1717 	adv->flags = flags;
1718 	adv->min_interval = min_interval;
1719 	adv->max_interval = max_interval;
1720 	adv->tx_power = tx_power;
1721 	/* Defining a mesh_handle changes the timing units to ms,
1722 	 * rather than seconds, and ties the instance to the requested
1723 	 * mesh_tx queue.
1724 	 */
1725 	adv->mesh = mesh_handle;
1726 
1727 	hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1728 				  scan_rsp_len, scan_rsp_data);
1729 
1730 	adv->timeout = timeout;
1731 	adv->remaining_time = timeout;
1732 
1733 	if (duration == 0)
1734 		adv->duration = hdev->def_multi_adv_rotation_duration;
1735 	else
1736 		adv->duration = duration;
1737 
1738 	INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1739 
1740 	BT_DBG("%s for %dMR", hdev->name, instance);
1741 
1742 	return adv;
1743 }
1744 
1745 /* This function requires the caller holds hdev->lock */
1746 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1747 				      u32 flags, u8 data_len, u8 *data,
1748 				      u32 min_interval, u32 max_interval)
1749 {
1750 	struct adv_info *adv;
1751 
1752 	adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1753 				   0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1754 				   min_interval, max_interval, 0);
1755 	if (IS_ERR(adv))
1756 		return adv;
1757 
1758 	adv->periodic = true;
1759 	adv->per_adv_data_len = data_len;
1760 
1761 	if (data)
1762 		memcpy(adv->per_adv_data, data, data_len);
1763 
1764 	return adv;
1765 }
1766 
1767 /* This function requires the caller holds hdev->lock */
1768 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1769 			      u16 adv_data_len, u8 *adv_data,
1770 			      u16 scan_rsp_len, u8 *scan_rsp_data)
1771 {
1772 	struct adv_info *adv;
1773 
1774 	adv = hci_find_adv_instance(hdev, instance);
1775 
1776 	/* If advertisement doesn't exist, we can't modify its data */
1777 	if (!adv)
1778 		return -ENOENT;
1779 
1780 	if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1781 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1782 		memcpy(adv->adv_data, adv_data, adv_data_len);
1783 		adv->adv_data_len = adv_data_len;
1784 		adv->adv_data_changed = true;
1785 	}
1786 
1787 	if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1788 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1789 		memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1790 		adv->scan_rsp_len = scan_rsp_len;
1791 		adv->scan_rsp_changed = true;
1792 	}
1793 
1794 	/* Mark as changed if there are flags which would affect it */
1795 	if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1796 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1797 		adv->scan_rsp_changed = true;
1798 
1799 	return 0;
1800 }
1801 
1802 /* This function requires the caller holds hdev->lock */
1803 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1804 {
1805 	u32 flags;
1806 	struct adv_info *adv;
1807 
1808 	if (instance == 0x00) {
1809 		/* Instance 0 always manages the "Tx Power" and "Flags"
1810 		 * fields
1811 		 */
1812 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1813 
1814 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1815 		 * corresponds to the "connectable" instance flag.
1816 		 */
1817 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1818 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1819 
1820 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1821 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1822 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1823 			flags |= MGMT_ADV_FLAG_DISCOV;
1824 
1825 		return flags;
1826 	}
1827 
1828 	adv = hci_find_adv_instance(hdev, instance);
1829 
1830 	/* Return 0 when we got an invalid instance identifier. */
1831 	if (!adv)
1832 		return 0;
1833 
1834 	return adv->flags;
1835 }
1836 
1837 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1838 {
1839 	struct adv_info *adv;
1840 
1841 	/* Instance 0x00 always set local name */
1842 	if (instance == 0x00)
1843 		return true;
1844 
1845 	adv = hci_find_adv_instance(hdev, instance);
1846 	if (!adv)
1847 		return false;
1848 
1849 	if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1850 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1851 		return true;
1852 
1853 	return adv->scan_rsp_len ? true : false;
1854 }
1855 
1856 /* This function requires the caller holds hdev->lock */
1857 void hci_adv_monitors_clear(struct hci_dev *hdev)
1858 {
1859 	struct adv_monitor *monitor;
1860 	int handle;
1861 
1862 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1863 		hci_free_adv_monitor(hdev, monitor);
1864 
1865 	idr_destroy(&hdev->adv_monitors_idr);
1866 }
1867 
1868 /* Frees the monitor structure and do some bookkeepings.
1869  * This function requires the caller holds hdev->lock.
1870  */
1871 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1872 {
1873 	struct adv_pattern *pattern;
1874 	struct adv_pattern *tmp;
1875 
1876 	if (!monitor)
1877 		return;
1878 
1879 	list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1880 		list_del(&pattern->list);
1881 		kfree(pattern);
1882 	}
1883 
1884 	if (monitor->handle)
1885 		idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1886 
1887 	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1888 		hdev->adv_monitors_cnt--;
1889 		mgmt_adv_monitor_removed(hdev, monitor->handle);
1890 	}
1891 
1892 	kfree(monitor);
1893 }
1894 
1895 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1896  * also attempts to forward the request to the controller.
1897  * This function requires the caller holds hci_req_sync_lock.
1898  */
1899 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1900 {
1901 	int min, max, handle;
1902 	int status = 0;
1903 
1904 	if (!monitor)
1905 		return -EINVAL;
1906 
1907 	hci_dev_lock(hdev);
1908 
1909 	min = HCI_MIN_ADV_MONITOR_HANDLE;
1910 	max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1911 	handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1912 			   GFP_KERNEL);
1913 
1914 	hci_dev_unlock(hdev);
1915 
1916 	if (handle < 0)
1917 		return handle;
1918 
1919 	monitor->handle = handle;
1920 
1921 	if (!hdev_is_powered(hdev))
1922 		return status;
1923 
1924 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1925 	case HCI_ADV_MONITOR_EXT_NONE:
1926 		bt_dev_dbg(hdev, "add monitor %d status %d",
1927 			   monitor->handle, status);
1928 		/* Message was not forwarded to controller - not an error */
1929 		break;
1930 
1931 	case HCI_ADV_MONITOR_EXT_MSFT:
1932 		status = msft_add_monitor_pattern(hdev, monitor);
1933 		bt_dev_dbg(hdev, "add monitor %d msft status %d",
1934 			   handle, status);
1935 		break;
1936 	}
1937 
1938 	return status;
1939 }
1940 
1941 /* Attempts to tell the controller and free the monitor. If somehow the
1942  * controller doesn't have a corresponding handle, remove anyway.
1943  * This function requires the caller holds hci_req_sync_lock.
1944  */
1945 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1946 				  struct adv_monitor *monitor)
1947 {
1948 	int status = 0;
1949 	int handle;
1950 
1951 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1952 	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1953 		bt_dev_dbg(hdev, "remove monitor %d status %d",
1954 			   monitor->handle, status);
1955 		goto free_monitor;
1956 
1957 	case HCI_ADV_MONITOR_EXT_MSFT:
1958 		handle = monitor->handle;
1959 		status = msft_remove_monitor(hdev, monitor);
1960 		bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1961 			   handle, status);
1962 		break;
1963 	}
1964 
1965 	/* In case no matching handle registered, just free the monitor */
1966 	if (status == -ENOENT)
1967 		goto free_monitor;
1968 
1969 	return status;
1970 
1971 free_monitor:
1972 	if (status == -ENOENT)
1973 		bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1974 			    monitor->handle);
1975 	hci_free_adv_monitor(hdev, monitor);
1976 
1977 	return status;
1978 }
1979 
1980 /* This function requires the caller holds hci_req_sync_lock */
1981 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1982 {
1983 	struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1984 
1985 	if (!monitor)
1986 		return -EINVAL;
1987 
1988 	return hci_remove_adv_monitor(hdev, monitor);
1989 }
1990 
1991 /* This function requires the caller holds hci_req_sync_lock */
1992 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1993 {
1994 	struct adv_monitor *monitor;
1995 	int idr_next_id = 0;
1996 	int status = 0;
1997 
1998 	while (1) {
1999 		monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2000 		if (!monitor)
2001 			break;
2002 
2003 		status = hci_remove_adv_monitor(hdev, monitor);
2004 		if (status)
2005 			return status;
2006 
2007 		idr_next_id++;
2008 	}
2009 
2010 	return status;
2011 }
2012 
2013 /* This function requires the caller holds hdev->lock */
2014 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2015 {
2016 	return !idr_is_empty(&hdev->adv_monitors_idr);
2017 }
2018 
2019 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2020 {
2021 	if (msft_monitor_supported(hdev))
2022 		return HCI_ADV_MONITOR_EXT_MSFT;
2023 
2024 	return HCI_ADV_MONITOR_EXT_NONE;
2025 }
2026 
2027 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2028 					 bdaddr_t *bdaddr, u8 type)
2029 {
2030 	struct bdaddr_list *b;
2031 
2032 	list_for_each_entry(b, bdaddr_list, list) {
2033 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2034 			return b;
2035 	}
2036 
2037 	return NULL;
2038 }
2039 
2040 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2041 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2042 				u8 type)
2043 {
2044 	struct bdaddr_list_with_irk *b;
2045 
2046 	list_for_each_entry(b, bdaddr_list, list) {
2047 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2048 			return b;
2049 	}
2050 
2051 	return NULL;
2052 }
2053 
2054 struct bdaddr_list_with_flags *
2055 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2056 				  bdaddr_t *bdaddr, u8 type)
2057 {
2058 	struct bdaddr_list_with_flags *b;
2059 
2060 	list_for_each_entry(b, bdaddr_list, list) {
2061 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2062 			return b;
2063 	}
2064 
2065 	return NULL;
2066 }
2067 
2068 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2069 {
2070 	struct bdaddr_list *b, *n;
2071 
2072 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2073 		list_del(&b->list);
2074 		kfree(b);
2075 	}
2076 }
2077 
2078 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2079 {
2080 	struct bdaddr_list *entry;
2081 
2082 	if (!bacmp(bdaddr, BDADDR_ANY))
2083 		return -EBADF;
2084 
2085 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2086 		return -EEXIST;
2087 
2088 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2089 	if (!entry)
2090 		return -ENOMEM;
2091 
2092 	bacpy(&entry->bdaddr, bdaddr);
2093 	entry->bdaddr_type = type;
2094 
2095 	list_add(&entry->list, list);
2096 
2097 	return 0;
2098 }
2099 
2100 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2101 					u8 type, u8 *peer_irk, u8 *local_irk)
2102 {
2103 	struct bdaddr_list_with_irk *entry;
2104 
2105 	if (!bacmp(bdaddr, BDADDR_ANY))
2106 		return -EBADF;
2107 
2108 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2109 		return -EEXIST;
2110 
2111 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2112 	if (!entry)
2113 		return -ENOMEM;
2114 
2115 	bacpy(&entry->bdaddr, bdaddr);
2116 	entry->bdaddr_type = type;
2117 
2118 	if (peer_irk)
2119 		memcpy(entry->peer_irk, peer_irk, 16);
2120 
2121 	if (local_irk)
2122 		memcpy(entry->local_irk, local_irk, 16);
2123 
2124 	list_add(&entry->list, list);
2125 
2126 	return 0;
2127 }
2128 
2129 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2130 				   u8 type, u32 flags)
2131 {
2132 	struct bdaddr_list_with_flags *entry;
2133 
2134 	if (!bacmp(bdaddr, BDADDR_ANY))
2135 		return -EBADF;
2136 
2137 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2138 		return -EEXIST;
2139 
2140 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2141 	if (!entry)
2142 		return -ENOMEM;
2143 
2144 	bacpy(&entry->bdaddr, bdaddr);
2145 	entry->bdaddr_type = type;
2146 	entry->flags = flags;
2147 
2148 	list_add(&entry->list, list);
2149 
2150 	return 0;
2151 }
2152 
2153 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2154 {
2155 	struct bdaddr_list *entry;
2156 
2157 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2158 		hci_bdaddr_list_clear(list);
2159 		return 0;
2160 	}
2161 
2162 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2163 	if (!entry)
2164 		return -ENOENT;
2165 
2166 	list_del(&entry->list);
2167 	kfree(entry);
2168 
2169 	return 0;
2170 }
2171 
2172 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2173 							u8 type)
2174 {
2175 	struct bdaddr_list_with_irk *entry;
2176 
2177 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2178 		hci_bdaddr_list_clear(list);
2179 		return 0;
2180 	}
2181 
2182 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2183 	if (!entry)
2184 		return -ENOENT;
2185 
2186 	list_del(&entry->list);
2187 	kfree(entry);
2188 
2189 	return 0;
2190 }
2191 
2192 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2193 				   u8 type)
2194 {
2195 	struct bdaddr_list_with_flags *entry;
2196 
2197 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2198 		hci_bdaddr_list_clear(list);
2199 		return 0;
2200 	}
2201 
2202 	entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2203 	if (!entry)
2204 		return -ENOENT;
2205 
2206 	list_del(&entry->list);
2207 	kfree(entry);
2208 
2209 	return 0;
2210 }
2211 
2212 /* This function requires the caller holds hdev->lock */
2213 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2214 					       bdaddr_t *addr, u8 addr_type)
2215 {
2216 	struct hci_conn_params *params;
2217 
2218 	list_for_each_entry(params, &hdev->le_conn_params, list) {
2219 		if (bacmp(&params->addr, addr) == 0 &&
2220 		    params->addr_type == addr_type) {
2221 			return params;
2222 		}
2223 	}
2224 
2225 	return NULL;
2226 }
2227 
2228 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2229 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2230 						  bdaddr_t *addr, u8 addr_type)
2231 {
2232 	struct hci_conn_params *param;
2233 
2234 	rcu_read_lock();
2235 
2236 	list_for_each_entry_rcu(param, list, action) {
2237 		if (bacmp(&param->addr, addr) == 0 &&
2238 		    param->addr_type == addr_type) {
2239 			rcu_read_unlock();
2240 			return param;
2241 		}
2242 	}
2243 
2244 	rcu_read_unlock();
2245 
2246 	return NULL;
2247 }
2248 
2249 /* This function requires the caller holds hdev->lock */
2250 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2251 {
2252 	if (list_empty(&param->action))
2253 		return;
2254 
2255 	list_del_rcu(&param->action);
2256 	synchronize_rcu();
2257 	INIT_LIST_HEAD(&param->action);
2258 }
2259 
2260 /* This function requires the caller holds hdev->lock */
2261 void hci_pend_le_list_add(struct hci_conn_params *param,
2262 			  struct list_head *list)
2263 {
2264 	list_add_rcu(&param->action, list);
2265 }
2266 
2267 /* This function requires the caller holds hdev->lock */
2268 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2269 					    bdaddr_t *addr, u8 addr_type)
2270 {
2271 	struct hci_conn_params *params;
2272 
2273 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2274 	if (params)
2275 		return params;
2276 
2277 	params = kzalloc(sizeof(*params), GFP_KERNEL);
2278 	if (!params) {
2279 		bt_dev_err(hdev, "out of memory");
2280 		return NULL;
2281 	}
2282 
2283 	bacpy(&params->addr, addr);
2284 	params->addr_type = addr_type;
2285 
2286 	list_add(&params->list, &hdev->le_conn_params);
2287 	INIT_LIST_HEAD(&params->action);
2288 
2289 	params->conn_min_interval = hdev->le_conn_min_interval;
2290 	params->conn_max_interval = hdev->le_conn_max_interval;
2291 	params->conn_latency = hdev->le_conn_latency;
2292 	params->supervision_timeout = hdev->le_supv_timeout;
2293 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2294 
2295 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2296 
2297 	return params;
2298 }
2299 
2300 void hci_conn_params_free(struct hci_conn_params *params)
2301 {
2302 	hci_pend_le_list_del_init(params);
2303 
2304 	if (params->conn) {
2305 		hci_conn_drop(params->conn);
2306 		hci_conn_put(params->conn);
2307 	}
2308 
2309 	list_del(&params->list);
2310 	kfree(params);
2311 }
2312 
2313 /* This function requires the caller holds hdev->lock */
2314 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2315 {
2316 	struct hci_conn_params *params;
2317 
2318 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2319 	if (!params)
2320 		return;
2321 
2322 	hci_conn_params_free(params);
2323 
2324 	hci_update_passive_scan(hdev);
2325 
2326 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2327 }
2328 
2329 /* This function requires the caller holds hdev->lock */
2330 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2331 {
2332 	struct hci_conn_params *params, *tmp;
2333 
2334 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2335 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2336 			continue;
2337 
2338 		/* If trying to establish one time connection to disabled
2339 		 * device, leave the params, but mark them as just once.
2340 		 */
2341 		if (params->explicit_connect) {
2342 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2343 			continue;
2344 		}
2345 
2346 		hci_conn_params_free(params);
2347 	}
2348 
2349 	BT_DBG("All LE disabled connection parameters were removed");
2350 }
2351 
2352 /* This function requires the caller holds hdev->lock */
2353 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2354 {
2355 	struct hci_conn_params *params, *tmp;
2356 
2357 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2358 		hci_conn_params_free(params);
2359 
2360 	BT_DBG("All LE connection parameters were removed");
2361 }
2362 
2363 /* Copy the Identity Address of the controller.
2364  *
2365  * If the controller has a public BD_ADDR, then by default use that one.
2366  * If this is a LE only controller without a public address, default to
2367  * the static random address.
2368  *
2369  * For debugging purposes it is possible to force controllers with a
2370  * public address to use the static random address instead.
2371  *
2372  * In case BR/EDR has been disabled on a dual-mode controller and
2373  * userspace has configured a static address, then that address
2374  * becomes the identity address instead of the public BR/EDR address.
2375  */
2376 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2377 			       u8 *bdaddr_type)
2378 {
2379 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2380 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2381 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2382 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2383 		bacpy(bdaddr, &hdev->static_addr);
2384 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
2385 	} else {
2386 		bacpy(bdaddr, &hdev->bdaddr);
2387 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
2388 	}
2389 }
2390 
2391 static void hci_clear_wake_reason(struct hci_dev *hdev)
2392 {
2393 	hci_dev_lock(hdev);
2394 
2395 	hdev->wake_reason = 0;
2396 	bacpy(&hdev->wake_addr, BDADDR_ANY);
2397 	hdev->wake_addr_type = 0;
2398 
2399 	hci_dev_unlock(hdev);
2400 }
2401 
2402 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2403 				void *data)
2404 {
2405 	struct hci_dev *hdev =
2406 		container_of(nb, struct hci_dev, suspend_notifier);
2407 	int ret = 0;
2408 
2409 	/* Userspace has full control of this device. Do nothing. */
2410 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2411 		return NOTIFY_DONE;
2412 
2413 	/* To avoid a potential race with hci_unregister_dev. */
2414 	hci_dev_hold(hdev);
2415 
2416 	if (action == PM_SUSPEND_PREPARE)
2417 		ret = hci_suspend_dev(hdev);
2418 	else if (action == PM_POST_SUSPEND)
2419 		ret = hci_resume_dev(hdev);
2420 
2421 	if (ret)
2422 		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2423 			   action, ret);
2424 
2425 	hci_dev_put(hdev);
2426 	return NOTIFY_DONE;
2427 }
2428 
2429 /* Alloc HCI device */
2430 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2431 {
2432 	struct hci_dev *hdev;
2433 	unsigned int alloc_size;
2434 
2435 	alloc_size = sizeof(*hdev);
2436 	if (sizeof_priv) {
2437 		/* Fixme: May need ALIGN-ment? */
2438 		alloc_size += sizeof_priv;
2439 	}
2440 
2441 	hdev = kzalloc(alloc_size, GFP_KERNEL);
2442 	if (!hdev)
2443 		return NULL;
2444 
2445 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2446 	hdev->esco_type = (ESCO_HV1);
2447 	hdev->link_mode = (HCI_LM_ACCEPT);
2448 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
2449 	hdev->io_capability = 0x03;	/* No Input No Output */
2450 	hdev->manufacturer = 0xffff;	/* Default to internal use */
2451 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2452 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2453 	hdev->adv_instance_cnt = 0;
2454 	hdev->cur_adv_instance = 0x00;
2455 	hdev->adv_instance_timeout = 0;
2456 
2457 	hdev->advmon_allowlist_duration = 300;
2458 	hdev->advmon_no_filter_duration = 500;
2459 	hdev->enable_advmon_interleave_scan = 0x00;	/* Default to disable */
2460 
2461 	hdev->sniff_max_interval = 800;
2462 	hdev->sniff_min_interval = 80;
2463 
2464 	hdev->le_adv_channel_map = 0x07;
2465 	hdev->le_adv_min_interval = 0x0800;
2466 	hdev->le_adv_max_interval = 0x0800;
2467 	hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2468 	hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2469 	hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2470 	hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2471 	hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2472 	hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2473 	hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2474 	hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2475 	hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2476 	hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2477 	hdev->le_conn_min_interval = 0x0018;
2478 	hdev->le_conn_max_interval = 0x0028;
2479 	hdev->le_conn_latency = 0x0000;
2480 	hdev->le_supv_timeout = 0x002a;
2481 	hdev->le_def_tx_len = 0x001b;
2482 	hdev->le_def_tx_time = 0x0148;
2483 	hdev->le_max_tx_len = 0x001b;
2484 	hdev->le_max_tx_time = 0x0148;
2485 	hdev->le_max_rx_len = 0x001b;
2486 	hdev->le_max_rx_time = 0x0148;
2487 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2488 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2489 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2490 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2491 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2492 	hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2493 	hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2494 	hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2495 	hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2496 
2497 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2498 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2499 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2500 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2501 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2502 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2503 
2504 	/* default 1.28 sec page scan */
2505 	hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2506 	hdev->def_page_scan_int = 0x0800;
2507 	hdev->def_page_scan_window = 0x0012;
2508 
2509 	mutex_init(&hdev->lock);
2510 	mutex_init(&hdev->req_lock);
2511 
2512 	ida_init(&hdev->unset_handle_ida);
2513 
2514 	INIT_LIST_HEAD(&hdev->mesh_pending);
2515 	INIT_LIST_HEAD(&hdev->mgmt_pending);
2516 	INIT_LIST_HEAD(&hdev->reject_list);
2517 	INIT_LIST_HEAD(&hdev->accept_list);
2518 	INIT_LIST_HEAD(&hdev->uuids);
2519 	INIT_LIST_HEAD(&hdev->link_keys);
2520 	INIT_LIST_HEAD(&hdev->long_term_keys);
2521 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2522 	INIT_LIST_HEAD(&hdev->remote_oob_data);
2523 	INIT_LIST_HEAD(&hdev->le_accept_list);
2524 	INIT_LIST_HEAD(&hdev->le_resolv_list);
2525 	INIT_LIST_HEAD(&hdev->le_conn_params);
2526 	INIT_LIST_HEAD(&hdev->pend_le_conns);
2527 	INIT_LIST_HEAD(&hdev->pend_le_reports);
2528 	INIT_LIST_HEAD(&hdev->conn_hash.list);
2529 	INIT_LIST_HEAD(&hdev->adv_instances);
2530 	INIT_LIST_HEAD(&hdev->blocked_keys);
2531 	INIT_LIST_HEAD(&hdev->monitored_devices);
2532 
2533 	INIT_LIST_HEAD(&hdev->local_codecs);
2534 	INIT_WORK(&hdev->rx_work, hci_rx_work);
2535 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2536 	INIT_WORK(&hdev->tx_work, hci_tx_work);
2537 	INIT_WORK(&hdev->power_on, hci_power_on);
2538 	INIT_WORK(&hdev->error_reset, hci_error_reset);
2539 
2540 	hci_cmd_sync_init(hdev);
2541 
2542 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2543 
2544 	skb_queue_head_init(&hdev->rx_q);
2545 	skb_queue_head_init(&hdev->cmd_q);
2546 	skb_queue_head_init(&hdev->raw_q);
2547 
2548 	init_waitqueue_head(&hdev->req_wait_q);
2549 
2550 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2551 	INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2552 
2553 	hci_devcd_setup(hdev);
2554 
2555 	hci_init_sysfs(hdev);
2556 	discovery_init(hdev);
2557 
2558 	return hdev;
2559 }
2560 EXPORT_SYMBOL(hci_alloc_dev_priv);
2561 
2562 /* Free HCI device */
2563 void hci_free_dev(struct hci_dev *hdev)
2564 {
2565 	/* will free via device release */
2566 	put_device(&hdev->dev);
2567 }
2568 EXPORT_SYMBOL(hci_free_dev);
2569 
2570 /* Register HCI device */
2571 int hci_register_dev(struct hci_dev *hdev)
2572 {
2573 	int id, error;
2574 
2575 	if (!hdev->open || !hdev->close || !hdev->send)
2576 		return -EINVAL;
2577 
2578 	id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2579 	if (id < 0)
2580 		return id;
2581 
2582 	error = dev_set_name(&hdev->dev, "hci%u", id);
2583 	if (error)
2584 		return error;
2585 
2586 	hdev->name = dev_name(&hdev->dev);
2587 	hdev->id = id;
2588 
2589 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2590 
2591 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2592 	if (!hdev->workqueue) {
2593 		error = -ENOMEM;
2594 		goto err;
2595 	}
2596 
2597 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2598 						      hdev->name);
2599 	if (!hdev->req_workqueue) {
2600 		destroy_workqueue(hdev->workqueue);
2601 		error = -ENOMEM;
2602 		goto err;
2603 	}
2604 
2605 	if (!IS_ERR_OR_NULL(bt_debugfs))
2606 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2607 
2608 	error = device_add(&hdev->dev);
2609 	if (error < 0)
2610 		goto err_wqueue;
2611 
2612 	hci_leds_init(hdev);
2613 
2614 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2615 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2616 				    hdev);
2617 	if (hdev->rfkill) {
2618 		if (rfkill_register(hdev->rfkill) < 0) {
2619 			rfkill_destroy(hdev->rfkill);
2620 			hdev->rfkill = NULL;
2621 		}
2622 	}
2623 
2624 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2625 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2626 
2627 	hci_dev_set_flag(hdev, HCI_SETUP);
2628 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2629 
2630 	/* Assume BR/EDR support until proven otherwise (such as
2631 	 * through reading supported features during init.
2632 	 */
2633 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2634 
2635 	write_lock(&hci_dev_list_lock);
2636 	list_add(&hdev->list, &hci_dev_list);
2637 	write_unlock(&hci_dev_list_lock);
2638 
2639 	/* Devices that are marked for raw-only usage are unconfigured
2640 	 * and should not be included in normal operation.
2641 	 */
2642 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2643 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2644 
2645 	/* Mark Remote Wakeup connection flag as supported if driver has wakeup
2646 	 * callback.
2647 	 */
2648 	if (hdev->wakeup)
2649 		hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2650 
2651 	hci_sock_dev_event(hdev, HCI_DEV_REG);
2652 	hci_dev_hold(hdev);
2653 
2654 	error = hci_register_suspend_notifier(hdev);
2655 	if (error)
2656 		BT_WARN("register suspend notifier failed error:%d\n", error);
2657 
2658 	queue_work(hdev->req_workqueue, &hdev->power_on);
2659 
2660 	idr_init(&hdev->adv_monitors_idr);
2661 	msft_register(hdev);
2662 
2663 	return id;
2664 
2665 err_wqueue:
2666 	debugfs_remove_recursive(hdev->debugfs);
2667 	destroy_workqueue(hdev->workqueue);
2668 	destroy_workqueue(hdev->req_workqueue);
2669 err:
2670 	ida_free(&hci_index_ida, hdev->id);
2671 
2672 	return error;
2673 }
2674 EXPORT_SYMBOL(hci_register_dev);
2675 
2676 /* Unregister HCI device */
2677 void hci_unregister_dev(struct hci_dev *hdev)
2678 {
2679 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2680 
2681 	mutex_lock(&hdev->unregister_lock);
2682 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
2683 	mutex_unlock(&hdev->unregister_lock);
2684 
2685 	write_lock(&hci_dev_list_lock);
2686 	list_del(&hdev->list);
2687 	write_unlock(&hci_dev_list_lock);
2688 
2689 	cancel_work_sync(&hdev->rx_work);
2690 	cancel_work_sync(&hdev->cmd_work);
2691 	cancel_work_sync(&hdev->tx_work);
2692 	cancel_work_sync(&hdev->power_on);
2693 	cancel_work_sync(&hdev->error_reset);
2694 
2695 	hci_cmd_sync_clear(hdev);
2696 
2697 	hci_unregister_suspend_notifier(hdev);
2698 
2699 	hci_dev_do_close(hdev);
2700 
2701 	if (!test_bit(HCI_INIT, &hdev->flags) &&
2702 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
2703 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2704 		hci_dev_lock(hdev);
2705 		mgmt_index_removed(hdev);
2706 		hci_dev_unlock(hdev);
2707 	}
2708 
2709 	/* mgmt_index_removed should take care of emptying the
2710 	 * pending list */
2711 	BUG_ON(!list_empty(&hdev->mgmt_pending));
2712 
2713 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2714 
2715 	if (hdev->rfkill) {
2716 		rfkill_unregister(hdev->rfkill);
2717 		rfkill_destroy(hdev->rfkill);
2718 	}
2719 
2720 	device_del(&hdev->dev);
2721 	/* Actual cleanup is deferred until hci_release_dev(). */
2722 	hci_dev_put(hdev);
2723 }
2724 EXPORT_SYMBOL(hci_unregister_dev);
2725 
2726 /* Release HCI device */
2727 void hci_release_dev(struct hci_dev *hdev)
2728 {
2729 	debugfs_remove_recursive(hdev->debugfs);
2730 	kfree_const(hdev->hw_info);
2731 	kfree_const(hdev->fw_info);
2732 
2733 	destroy_workqueue(hdev->workqueue);
2734 	destroy_workqueue(hdev->req_workqueue);
2735 
2736 	hci_dev_lock(hdev);
2737 	hci_bdaddr_list_clear(&hdev->reject_list);
2738 	hci_bdaddr_list_clear(&hdev->accept_list);
2739 	hci_uuids_clear(hdev);
2740 	hci_link_keys_clear(hdev);
2741 	hci_smp_ltks_clear(hdev);
2742 	hci_smp_irks_clear(hdev);
2743 	hci_remote_oob_data_clear(hdev);
2744 	hci_adv_instances_clear(hdev);
2745 	hci_adv_monitors_clear(hdev);
2746 	hci_bdaddr_list_clear(&hdev->le_accept_list);
2747 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2748 	hci_conn_params_clear_all(hdev);
2749 	hci_discovery_filter_clear(hdev);
2750 	hci_blocked_keys_clear(hdev);
2751 	hci_codec_list_clear(&hdev->local_codecs);
2752 	msft_release(hdev);
2753 	hci_dev_unlock(hdev);
2754 
2755 	ida_destroy(&hdev->unset_handle_ida);
2756 	ida_free(&hci_index_ida, hdev->id);
2757 	kfree_skb(hdev->sent_cmd);
2758 	kfree_skb(hdev->req_skb);
2759 	kfree_skb(hdev->recv_event);
2760 	kfree(hdev);
2761 }
2762 EXPORT_SYMBOL(hci_release_dev);
2763 
2764 int hci_register_suspend_notifier(struct hci_dev *hdev)
2765 {
2766 	int ret = 0;
2767 
2768 	if (!hdev->suspend_notifier.notifier_call &&
2769 	    !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2770 		hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2771 		ret = register_pm_notifier(&hdev->suspend_notifier);
2772 	}
2773 
2774 	return ret;
2775 }
2776 
2777 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2778 {
2779 	int ret = 0;
2780 
2781 	if (hdev->suspend_notifier.notifier_call) {
2782 		ret = unregister_pm_notifier(&hdev->suspend_notifier);
2783 		if (!ret)
2784 			hdev->suspend_notifier.notifier_call = NULL;
2785 	}
2786 
2787 	return ret;
2788 }
2789 
2790 /* Cancel ongoing command synchronously:
2791  *
2792  * - Cancel command timer
2793  * - Reset command counter
2794  * - Cancel command request
2795  */
2796 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2797 {
2798 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
2799 
2800 	cancel_delayed_work_sync(&hdev->cmd_timer);
2801 	cancel_delayed_work_sync(&hdev->ncmd_timer);
2802 	atomic_set(&hdev->cmd_cnt, 1);
2803 
2804 	hci_cmd_sync_cancel_sync(hdev, err);
2805 }
2806 
2807 /* Suspend HCI device */
2808 int hci_suspend_dev(struct hci_dev *hdev)
2809 {
2810 	int ret;
2811 
2812 	bt_dev_dbg(hdev, "");
2813 
2814 	/* Suspend should only act on when powered. */
2815 	if (!hdev_is_powered(hdev) ||
2816 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2817 		return 0;
2818 
2819 	/* If powering down don't attempt to suspend */
2820 	if (mgmt_powering_down(hdev))
2821 		return 0;
2822 
2823 	/* Cancel potentially blocking sync operation before suspend */
2824 	hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2825 
2826 	hci_req_sync_lock(hdev);
2827 	ret = hci_suspend_sync(hdev);
2828 	hci_req_sync_unlock(hdev);
2829 
2830 	hci_clear_wake_reason(hdev);
2831 	mgmt_suspending(hdev, hdev->suspend_state);
2832 
2833 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2834 	return ret;
2835 }
2836 EXPORT_SYMBOL(hci_suspend_dev);
2837 
2838 /* Resume HCI device */
2839 int hci_resume_dev(struct hci_dev *hdev)
2840 {
2841 	int ret;
2842 
2843 	bt_dev_dbg(hdev, "");
2844 
2845 	/* Resume should only act on when powered. */
2846 	if (!hdev_is_powered(hdev) ||
2847 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2848 		return 0;
2849 
2850 	/* If powering down don't attempt to resume */
2851 	if (mgmt_powering_down(hdev))
2852 		return 0;
2853 
2854 	hci_req_sync_lock(hdev);
2855 	ret = hci_resume_sync(hdev);
2856 	hci_req_sync_unlock(hdev);
2857 
2858 	mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2859 		      hdev->wake_addr_type);
2860 
2861 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2862 	return ret;
2863 }
2864 EXPORT_SYMBOL(hci_resume_dev);
2865 
2866 /* Reset HCI device */
2867 int hci_reset_dev(struct hci_dev *hdev)
2868 {
2869 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2870 	struct sk_buff *skb;
2871 
2872 	skb = bt_skb_alloc(3, GFP_ATOMIC);
2873 	if (!skb)
2874 		return -ENOMEM;
2875 
2876 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2877 	skb_put_data(skb, hw_err, 3);
2878 
2879 	bt_dev_err(hdev, "Injecting HCI hardware error event");
2880 
2881 	/* Send Hardware Error to upper stack */
2882 	return hci_recv_frame(hdev, skb);
2883 }
2884 EXPORT_SYMBOL(hci_reset_dev);
2885 
2886 static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
2887 {
2888 	if (hdev->classify_pkt_type)
2889 		return hdev->classify_pkt_type(hdev, skb);
2890 
2891 	return hci_skb_pkt_type(skb);
2892 }
2893 
2894 /* Receive frame from HCI drivers */
2895 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2896 {
2897 	u8 dev_pkt_type;
2898 
2899 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2900 		      && !test_bit(HCI_INIT, &hdev->flags))) {
2901 		kfree_skb(skb);
2902 		return -ENXIO;
2903 	}
2904 
2905 	/* Check if the driver agree with packet type classification */
2906 	dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
2907 	if (hci_skb_pkt_type(skb) != dev_pkt_type) {
2908 		hci_skb_pkt_type(skb) = dev_pkt_type;
2909 	}
2910 
2911 	switch (hci_skb_pkt_type(skb)) {
2912 	case HCI_EVENT_PKT:
2913 		break;
2914 	case HCI_ACLDATA_PKT:
2915 		/* Detect if ISO packet has been sent as ACL */
2916 		if (hci_conn_num(hdev, ISO_LINK)) {
2917 			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2918 			__u8 type;
2919 
2920 			type = hci_conn_lookup_type(hdev, hci_handle(handle));
2921 			if (type == ISO_LINK)
2922 				hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2923 		}
2924 		break;
2925 	case HCI_SCODATA_PKT:
2926 		break;
2927 	case HCI_ISODATA_PKT:
2928 		break;
2929 	default:
2930 		kfree_skb(skb);
2931 		return -EINVAL;
2932 	}
2933 
2934 	/* Incoming skb */
2935 	bt_cb(skb)->incoming = 1;
2936 
2937 	/* Time stamp */
2938 	__net_timestamp(skb);
2939 
2940 	skb_queue_tail(&hdev->rx_q, skb);
2941 	queue_work(hdev->workqueue, &hdev->rx_work);
2942 
2943 	return 0;
2944 }
2945 EXPORT_SYMBOL(hci_recv_frame);
2946 
2947 /* Receive diagnostic message from HCI drivers */
2948 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2949 {
2950 	/* Mark as diagnostic packet */
2951 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2952 
2953 	/* Time stamp */
2954 	__net_timestamp(skb);
2955 
2956 	skb_queue_tail(&hdev->rx_q, skb);
2957 	queue_work(hdev->workqueue, &hdev->rx_work);
2958 
2959 	return 0;
2960 }
2961 EXPORT_SYMBOL(hci_recv_diag);
2962 
2963 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2964 {
2965 	va_list vargs;
2966 
2967 	va_start(vargs, fmt);
2968 	kfree_const(hdev->hw_info);
2969 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2970 	va_end(vargs);
2971 }
2972 EXPORT_SYMBOL(hci_set_hw_info);
2973 
2974 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2975 {
2976 	va_list vargs;
2977 
2978 	va_start(vargs, fmt);
2979 	kfree_const(hdev->fw_info);
2980 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2981 	va_end(vargs);
2982 }
2983 EXPORT_SYMBOL(hci_set_fw_info);
2984 
2985 /* ---- Interface to upper protocols ---- */
2986 
2987 int hci_register_cb(struct hci_cb *cb)
2988 {
2989 	BT_DBG("%p name %s", cb, cb->name);
2990 
2991 	mutex_lock(&hci_cb_list_lock);
2992 	list_add_tail(&cb->list, &hci_cb_list);
2993 	mutex_unlock(&hci_cb_list_lock);
2994 
2995 	return 0;
2996 }
2997 EXPORT_SYMBOL(hci_register_cb);
2998 
2999 int hci_unregister_cb(struct hci_cb *cb)
3000 {
3001 	BT_DBG("%p name %s", cb, cb->name);
3002 
3003 	mutex_lock(&hci_cb_list_lock);
3004 	list_del(&cb->list);
3005 	mutex_unlock(&hci_cb_list_lock);
3006 
3007 	return 0;
3008 }
3009 EXPORT_SYMBOL(hci_unregister_cb);
3010 
3011 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3012 {
3013 	int err;
3014 
3015 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3016 	       skb->len);
3017 
3018 	/* Time stamp */
3019 	__net_timestamp(skb);
3020 
3021 	/* Send copy to monitor */
3022 	hci_send_to_monitor(hdev, skb);
3023 
3024 	if (atomic_read(&hdev->promisc)) {
3025 		/* Send copy to the sockets */
3026 		hci_send_to_sock(hdev, skb);
3027 	}
3028 
3029 	/* Get rid of skb owner, prior to sending to the driver. */
3030 	skb_orphan(skb);
3031 
3032 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3033 		kfree_skb(skb);
3034 		return -EINVAL;
3035 	}
3036 
3037 	err = hdev->send(hdev, skb);
3038 	if (err < 0) {
3039 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3040 		kfree_skb(skb);
3041 		return err;
3042 	}
3043 
3044 	return 0;
3045 }
3046 
3047 /* Send HCI command */
3048 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3049 		 const void *param)
3050 {
3051 	struct sk_buff *skb;
3052 
3053 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3054 
3055 	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3056 	if (!skb) {
3057 		bt_dev_err(hdev, "no memory for command");
3058 		return -ENOMEM;
3059 	}
3060 
3061 	/* Stand-alone HCI commands must be flagged as
3062 	 * single-command requests.
3063 	 */
3064 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3065 
3066 	skb_queue_tail(&hdev->cmd_q, skb);
3067 	queue_work(hdev->workqueue, &hdev->cmd_work);
3068 
3069 	return 0;
3070 }
3071 
3072 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3073 		   const void *param)
3074 {
3075 	struct sk_buff *skb;
3076 
3077 	if (hci_opcode_ogf(opcode) != 0x3f) {
3078 		/* A controller receiving a command shall respond with either
3079 		 * a Command Status Event or a Command Complete Event.
3080 		 * Therefore, all standard HCI commands must be sent via the
3081 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3082 		 * Some vendors do not comply with this rule for vendor-specific
3083 		 * commands and do not return any event. We want to support
3084 		 * unresponded commands for such cases only.
3085 		 */
3086 		bt_dev_err(hdev, "unresponded command not supported");
3087 		return -EINVAL;
3088 	}
3089 
3090 	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3091 	if (!skb) {
3092 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3093 			   opcode);
3094 		return -ENOMEM;
3095 	}
3096 
3097 	hci_send_frame(hdev, skb);
3098 
3099 	return 0;
3100 }
3101 EXPORT_SYMBOL(__hci_cmd_send);
3102 
3103 /* Get data from the previously sent command */
3104 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3105 {
3106 	struct hci_command_hdr *hdr;
3107 
3108 	if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3109 		return NULL;
3110 
3111 	hdr = (void *)skb->data;
3112 
3113 	if (hdr->opcode != cpu_to_le16(opcode))
3114 		return NULL;
3115 
3116 	return skb->data + HCI_COMMAND_HDR_SIZE;
3117 }
3118 
3119 /* Get data from the previously sent command */
3120 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3121 {
3122 	void *data;
3123 
3124 	/* Check if opcode matches last sent command */
3125 	data = hci_cmd_data(hdev->sent_cmd, opcode);
3126 	if (!data)
3127 		/* Check if opcode matches last request */
3128 		data = hci_cmd_data(hdev->req_skb, opcode);
3129 
3130 	return data;
3131 }
3132 
3133 /* Get data from last received event */
3134 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3135 {
3136 	struct hci_event_hdr *hdr;
3137 	int offset;
3138 
3139 	if (!hdev->recv_event)
3140 		return NULL;
3141 
3142 	hdr = (void *)hdev->recv_event->data;
3143 	offset = sizeof(*hdr);
3144 
3145 	if (hdr->evt != event) {
3146 		/* In case of LE metaevent check the subevent match */
3147 		if (hdr->evt == HCI_EV_LE_META) {
3148 			struct hci_ev_le_meta *ev;
3149 
3150 			ev = (void *)hdev->recv_event->data + offset;
3151 			offset += sizeof(*ev);
3152 			if (ev->subevent == event)
3153 				goto found;
3154 		}
3155 		return NULL;
3156 	}
3157 
3158 found:
3159 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
3160 
3161 	return hdev->recv_event->data + offset;
3162 }
3163 
3164 /* Send ACL data */
3165 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3166 {
3167 	struct hci_acl_hdr *hdr;
3168 	int len = skb->len;
3169 
3170 	skb_push(skb, HCI_ACL_HDR_SIZE);
3171 	skb_reset_transport_header(skb);
3172 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3173 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3174 	hdr->dlen   = cpu_to_le16(len);
3175 }
3176 
3177 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3178 			  struct sk_buff *skb, __u16 flags)
3179 {
3180 	struct hci_conn *conn = chan->conn;
3181 	struct hci_dev *hdev = conn->hdev;
3182 	struct sk_buff *list;
3183 
3184 	skb->len = skb_headlen(skb);
3185 	skb->data_len = 0;
3186 
3187 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3188 
3189 	hci_add_acl_hdr(skb, conn->handle, flags);
3190 
3191 	list = skb_shinfo(skb)->frag_list;
3192 	if (!list) {
3193 		/* Non fragmented */
3194 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3195 
3196 		skb_queue_tail(queue, skb);
3197 	} else {
3198 		/* Fragmented */
3199 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3200 
3201 		skb_shinfo(skb)->frag_list = NULL;
3202 
3203 		/* Queue all fragments atomically. We need to use spin_lock_bh
3204 		 * here because of 6LoWPAN links, as there this function is
3205 		 * called from softirq and using normal spin lock could cause
3206 		 * deadlocks.
3207 		 */
3208 		spin_lock_bh(&queue->lock);
3209 
3210 		__skb_queue_tail(queue, skb);
3211 
3212 		flags &= ~ACL_START;
3213 		flags |= ACL_CONT;
3214 		do {
3215 			skb = list; list = list->next;
3216 
3217 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3218 			hci_add_acl_hdr(skb, conn->handle, flags);
3219 
3220 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3221 
3222 			__skb_queue_tail(queue, skb);
3223 		} while (list);
3224 
3225 		spin_unlock_bh(&queue->lock);
3226 	}
3227 }
3228 
3229 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3230 {
3231 	struct hci_dev *hdev = chan->conn->hdev;
3232 
3233 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3234 
3235 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3236 
3237 	queue_work(hdev->workqueue, &hdev->tx_work);
3238 }
3239 
3240 /* Send SCO data */
3241 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3242 {
3243 	struct hci_dev *hdev = conn->hdev;
3244 	struct hci_sco_hdr hdr;
3245 
3246 	BT_DBG("%s len %d", hdev->name, skb->len);
3247 
3248 	hdr.handle = cpu_to_le16(conn->handle);
3249 	hdr.dlen   = skb->len;
3250 
3251 	skb_push(skb, HCI_SCO_HDR_SIZE);
3252 	skb_reset_transport_header(skb);
3253 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3254 
3255 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3256 
3257 	skb_queue_tail(&conn->data_q, skb);
3258 	queue_work(hdev->workqueue, &hdev->tx_work);
3259 }
3260 
3261 /* Send ISO data */
3262 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3263 {
3264 	struct hci_iso_hdr *hdr;
3265 	int len = skb->len;
3266 
3267 	skb_push(skb, HCI_ISO_HDR_SIZE);
3268 	skb_reset_transport_header(skb);
3269 	hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3270 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3271 	hdr->dlen   = cpu_to_le16(len);
3272 }
3273 
3274 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3275 			  struct sk_buff *skb)
3276 {
3277 	struct hci_dev *hdev = conn->hdev;
3278 	struct sk_buff *list;
3279 	__u16 flags;
3280 
3281 	skb->len = skb_headlen(skb);
3282 	skb->data_len = 0;
3283 
3284 	hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3285 
3286 	list = skb_shinfo(skb)->frag_list;
3287 
3288 	flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3289 	hci_add_iso_hdr(skb, conn->handle, flags);
3290 
3291 	if (!list) {
3292 		/* Non fragmented */
3293 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3294 
3295 		skb_queue_tail(queue, skb);
3296 	} else {
3297 		/* Fragmented */
3298 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3299 
3300 		skb_shinfo(skb)->frag_list = NULL;
3301 
3302 		__skb_queue_tail(queue, skb);
3303 
3304 		do {
3305 			skb = list; list = list->next;
3306 
3307 			hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3308 			flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3309 						   0x00);
3310 			hci_add_iso_hdr(skb, conn->handle, flags);
3311 
3312 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3313 
3314 			__skb_queue_tail(queue, skb);
3315 		} while (list);
3316 	}
3317 }
3318 
3319 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3320 {
3321 	struct hci_dev *hdev = conn->hdev;
3322 
3323 	BT_DBG("%s len %d", hdev->name, skb->len);
3324 
3325 	hci_queue_iso(conn, &conn->data_q, skb);
3326 
3327 	queue_work(hdev->workqueue, &hdev->tx_work);
3328 }
3329 
3330 /* ---- HCI TX task (outgoing data) ---- */
3331 
3332 /* HCI Connection scheduler */
3333 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3334 {
3335 	struct hci_dev *hdev;
3336 	int cnt, q;
3337 
3338 	if (!conn) {
3339 		*quote = 0;
3340 		return;
3341 	}
3342 
3343 	hdev = conn->hdev;
3344 
3345 	switch (conn->type) {
3346 	case ACL_LINK:
3347 		cnt = hdev->acl_cnt;
3348 		break;
3349 	case SCO_LINK:
3350 	case ESCO_LINK:
3351 		cnt = hdev->sco_cnt;
3352 		break;
3353 	case LE_LINK:
3354 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3355 		break;
3356 	case ISO_LINK:
3357 		cnt = hdev->iso_mtu ? hdev->iso_cnt :
3358 			hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3359 		break;
3360 	default:
3361 		cnt = 0;
3362 		bt_dev_err(hdev, "unknown link type %d", conn->type);
3363 	}
3364 
3365 	q = cnt / num;
3366 	*quote = q ? q : 1;
3367 }
3368 
3369 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3370 				     int *quote)
3371 {
3372 	struct hci_conn_hash *h = &hdev->conn_hash;
3373 	struct hci_conn *conn = NULL, *c;
3374 	unsigned int num = 0, min = ~0;
3375 
3376 	/* We don't have to lock device here. Connections are always
3377 	 * added and removed with TX task disabled. */
3378 
3379 	rcu_read_lock();
3380 
3381 	list_for_each_entry_rcu(c, &h->list, list) {
3382 		if (c->type != type || skb_queue_empty(&c->data_q))
3383 			continue;
3384 
3385 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3386 			continue;
3387 
3388 		num++;
3389 
3390 		if (c->sent < min) {
3391 			min  = c->sent;
3392 			conn = c;
3393 		}
3394 
3395 		if (hci_conn_num(hdev, type) == num)
3396 			break;
3397 	}
3398 
3399 	rcu_read_unlock();
3400 
3401 	hci_quote_sent(conn, num, quote);
3402 
3403 	BT_DBG("conn %p quote %d", conn, *quote);
3404 	return conn;
3405 }
3406 
3407 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3408 {
3409 	struct hci_conn_hash *h = &hdev->conn_hash;
3410 	struct hci_conn *c;
3411 
3412 	bt_dev_err(hdev, "link tx timeout");
3413 
3414 	rcu_read_lock();
3415 
3416 	/* Kill stalled connections */
3417 	list_for_each_entry_rcu(c, &h->list, list) {
3418 		if (c->type == type && c->sent) {
3419 			bt_dev_err(hdev, "killing stalled connection %pMR",
3420 				   &c->dst);
3421 			/* hci_disconnect might sleep, so, we have to release
3422 			 * the RCU read lock before calling it.
3423 			 */
3424 			rcu_read_unlock();
3425 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3426 			rcu_read_lock();
3427 		}
3428 	}
3429 
3430 	rcu_read_unlock();
3431 }
3432 
3433 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3434 				      int *quote)
3435 {
3436 	struct hci_conn_hash *h = &hdev->conn_hash;
3437 	struct hci_chan *chan = NULL;
3438 	unsigned int num = 0, min = ~0, cur_prio = 0;
3439 	struct hci_conn *conn;
3440 	int conn_num = 0;
3441 
3442 	BT_DBG("%s", hdev->name);
3443 
3444 	rcu_read_lock();
3445 
3446 	list_for_each_entry_rcu(conn, &h->list, list) {
3447 		struct hci_chan *tmp;
3448 
3449 		if (conn->type != type)
3450 			continue;
3451 
3452 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3453 			continue;
3454 
3455 		conn_num++;
3456 
3457 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3458 			struct sk_buff *skb;
3459 
3460 			if (skb_queue_empty(&tmp->data_q))
3461 				continue;
3462 
3463 			skb = skb_peek(&tmp->data_q);
3464 			if (skb->priority < cur_prio)
3465 				continue;
3466 
3467 			if (skb->priority > cur_prio) {
3468 				num = 0;
3469 				min = ~0;
3470 				cur_prio = skb->priority;
3471 			}
3472 
3473 			num++;
3474 
3475 			if (conn->sent < min) {
3476 				min  = conn->sent;
3477 				chan = tmp;
3478 			}
3479 		}
3480 
3481 		if (hci_conn_num(hdev, type) == conn_num)
3482 			break;
3483 	}
3484 
3485 	rcu_read_unlock();
3486 
3487 	if (!chan)
3488 		return NULL;
3489 
3490 	hci_quote_sent(chan->conn, num, quote);
3491 
3492 	BT_DBG("chan %p quote %d", chan, *quote);
3493 	return chan;
3494 }
3495 
3496 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3497 {
3498 	struct hci_conn_hash *h = &hdev->conn_hash;
3499 	struct hci_conn *conn;
3500 	int num = 0;
3501 
3502 	BT_DBG("%s", hdev->name);
3503 
3504 	rcu_read_lock();
3505 
3506 	list_for_each_entry_rcu(conn, &h->list, list) {
3507 		struct hci_chan *chan;
3508 
3509 		if (conn->type != type)
3510 			continue;
3511 
3512 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3513 			continue;
3514 
3515 		num++;
3516 
3517 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3518 			struct sk_buff *skb;
3519 
3520 			if (chan->sent) {
3521 				chan->sent = 0;
3522 				continue;
3523 			}
3524 
3525 			if (skb_queue_empty(&chan->data_q))
3526 				continue;
3527 
3528 			skb = skb_peek(&chan->data_q);
3529 			if (skb->priority >= HCI_PRIO_MAX - 1)
3530 				continue;
3531 
3532 			skb->priority = HCI_PRIO_MAX - 1;
3533 
3534 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3535 			       skb->priority);
3536 		}
3537 
3538 		if (hci_conn_num(hdev, type) == num)
3539 			break;
3540 	}
3541 
3542 	rcu_read_unlock();
3543 
3544 }
3545 
3546 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3547 {
3548 	unsigned long last_tx;
3549 
3550 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3551 		return;
3552 
3553 	switch (type) {
3554 	case LE_LINK:
3555 		last_tx = hdev->le_last_tx;
3556 		break;
3557 	default:
3558 		last_tx = hdev->acl_last_tx;
3559 		break;
3560 	}
3561 
3562 	/* tx timeout must be longer than maximum link supervision timeout
3563 	 * (40.9 seconds)
3564 	 */
3565 	if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3566 		hci_link_tx_to(hdev, type);
3567 }
3568 
3569 /* Schedule SCO */
3570 static void hci_sched_sco(struct hci_dev *hdev)
3571 {
3572 	struct hci_conn *conn;
3573 	struct sk_buff *skb;
3574 	int quote;
3575 
3576 	BT_DBG("%s", hdev->name);
3577 
3578 	if (!hci_conn_num(hdev, SCO_LINK))
3579 		return;
3580 
3581 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3582 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3583 			BT_DBG("skb %p len %d", skb, skb->len);
3584 			hci_send_frame(hdev, skb);
3585 
3586 			conn->sent++;
3587 			if (conn->sent == ~0)
3588 				conn->sent = 0;
3589 		}
3590 	}
3591 }
3592 
3593 static void hci_sched_esco(struct hci_dev *hdev)
3594 {
3595 	struct hci_conn *conn;
3596 	struct sk_buff *skb;
3597 	int quote;
3598 
3599 	BT_DBG("%s", hdev->name);
3600 
3601 	if (!hci_conn_num(hdev, ESCO_LINK))
3602 		return;
3603 
3604 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3605 						     &quote))) {
3606 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3607 			BT_DBG("skb %p len %d", skb, skb->len);
3608 			hci_send_frame(hdev, skb);
3609 
3610 			conn->sent++;
3611 			if (conn->sent == ~0)
3612 				conn->sent = 0;
3613 		}
3614 	}
3615 }
3616 
3617 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3618 {
3619 	unsigned int cnt = hdev->acl_cnt;
3620 	struct hci_chan *chan;
3621 	struct sk_buff *skb;
3622 	int quote;
3623 
3624 	__check_timeout(hdev, cnt, ACL_LINK);
3625 
3626 	while (hdev->acl_cnt &&
3627 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3628 		u32 priority = (skb_peek(&chan->data_q))->priority;
3629 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3630 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3631 			       skb->len, skb->priority);
3632 
3633 			/* Stop if priority has changed */
3634 			if (skb->priority < priority)
3635 				break;
3636 
3637 			skb = skb_dequeue(&chan->data_q);
3638 
3639 			hci_conn_enter_active_mode(chan->conn,
3640 						   bt_cb(skb)->force_active);
3641 
3642 			hci_send_frame(hdev, skb);
3643 			hdev->acl_last_tx = jiffies;
3644 
3645 			hdev->acl_cnt--;
3646 			chan->sent++;
3647 			chan->conn->sent++;
3648 
3649 			/* Send pending SCO packets right away */
3650 			hci_sched_sco(hdev);
3651 			hci_sched_esco(hdev);
3652 		}
3653 	}
3654 
3655 	if (cnt != hdev->acl_cnt)
3656 		hci_prio_recalculate(hdev, ACL_LINK);
3657 }
3658 
3659 static void hci_sched_acl(struct hci_dev *hdev)
3660 {
3661 	BT_DBG("%s", hdev->name);
3662 
3663 	/* No ACL link over BR/EDR controller */
3664 	if (!hci_conn_num(hdev, ACL_LINK))
3665 		return;
3666 
3667 	hci_sched_acl_pkt(hdev);
3668 }
3669 
3670 static void hci_sched_le(struct hci_dev *hdev)
3671 {
3672 	struct hci_chan *chan;
3673 	struct sk_buff *skb;
3674 	int quote, cnt, tmp;
3675 
3676 	BT_DBG("%s", hdev->name);
3677 
3678 	if (!hci_conn_num(hdev, LE_LINK))
3679 		return;
3680 
3681 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3682 
3683 	__check_timeout(hdev, cnt, LE_LINK);
3684 
3685 	tmp = cnt;
3686 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3687 		u32 priority = (skb_peek(&chan->data_q))->priority;
3688 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3689 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3690 			       skb->len, skb->priority);
3691 
3692 			/* Stop if priority has changed */
3693 			if (skb->priority < priority)
3694 				break;
3695 
3696 			skb = skb_dequeue(&chan->data_q);
3697 
3698 			hci_send_frame(hdev, skb);
3699 			hdev->le_last_tx = jiffies;
3700 
3701 			cnt--;
3702 			chan->sent++;
3703 			chan->conn->sent++;
3704 
3705 			/* Send pending SCO packets right away */
3706 			hci_sched_sco(hdev);
3707 			hci_sched_esco(hdev);
3708 		}
3709 	}
3710 
3711 	if (hdev->le_pkts)
3712 		hdev->le_cnt = cnt;
3713 	else
3714 		hdev->acl_cnt = cnt;
3715 
3716 	if (cnt != tmp)
3717 		hci_prio_recalculate(hdev, LE_LINK);
3718 }
3719 
3720 /* Schedule CIS */
3721 static void hci_sched_iso(struct hci_dev *hdev)
3722 {
3723 	struct hci_conn *conn;
3724 	struct sk_buff *skb;
3725 	int quote, *cnt;
3726 
3727 	BT_DBG("%s", hdev->name);
3728 
3729 	if (!hci_conn_num(hdev, ISO_LINK))
3730 		return;
3731 
3732 	cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3733 		hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3734 	while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3735 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3736 			BT_DBG("skb %p len %d", skb, skb->len);
3737 			hci_send_frame(hdev, skb);
3738 
3739 			conn->sent++;
3740 			if (conn->sent == ~0)
3741 				conn->sent = 0;
3742 			(*cnt)--;
3743 		}
3744 	}
3745 }
3746 
3747 static void hci_tx_work(struct work_struct *work)
3748 {
3749 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3750 	struct sk_buff *skb;
3751 
3752 	BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3753 	       hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3754 
3755 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3756 		/* Schedule queues and send stuff to HCI driver */
3757 		hci_sched_sco(hdev);
3758 		hci_sched_esco(hdev);
3759 		hci_sched_iso(hdev);
3760 		hci_sched_acl(hdev);
3761 		hci_sched_le(hdev);
3762 	}
3763 
3764 	/* Send next queued raw (unknown type) packet */
3765 	while ((skb = skb_dequeue(&hdev->raw_q)))
3766 		hci_send_frame(hdev, skb);
3767 }
3768 
3769 /* ----- HCI RX task (incoming data processing) ----- */
3770 
3771 /* ACL data packet */
3772 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3773 {
3774 	struct hci_acl_hdr *hdr = (void *) skb->data;
3775 	struct hci_conn *conn;
3776 	__u16 handle, flags;
3777 
3778 	skb_pull(skb, HCI_ACL_HDR_SIZE);
3779 
3780 	handle = __le16_to_cpu(hdr->handle);
3781 	flags  = hci_flags(handle);
3782 	handle = hci_handle(handle);
3783 
3784 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3785 	       handle, flags);
3786 
3787 	hdev->stat.acl_rx++;
3788 
3789 	hci_dev_lock(hdev);
3790 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3791 	hci_dev_unlock(hdev);
3792 
3793 	if (conn) {
3794 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3795 
3796 		/* Send to upper protocol */
3797 		l2cap_recv_acldata(conn, skb, flags);
3798 		return;
3799 	} else {
3800 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3801 			   handle);
3802 	}
3803 
3804 	kfree_skb(skb);
3805 }
3806 
3807 /* SCO data packet */
3808 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3809 {
3810 	struct hci_sco_hdr *hdr = (void *) skb->data;
3811 	struct hci_conn *conn;
3812 	__u16 handle, flags;
3813 
3814 	skb_pull(skb, HCI_SCO_HDR_SIZE);
3815 
3816 	handle = __le16_to_cpu(hdr->handle);
3817 	flags  = hci_flags(handle);
3818 	handle = hci_handle(handle);
3819 
3820 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3821 	       handle, flags);
3822 
3823 	hdev->stat.sco_rx++;
3824 
3825 	hci_dev_lock(hdev);
3826 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3827 	hci_dev_unlock(hdev);
3828 
3829 	if (conn) {
3830 		/* Send to upper protocol */
3831 		hci_skb_pkt_status(skb) = flags & 0x03;
3832 		sco_recv_scodata(conn, skb);
3833 		return;
3834 	} else {
3835 		bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3836 				       handle);
3837 	}
3838 
3839 	kfree_skb(skb);
3840 }
3841 
3842 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3843 {
3844 	struct hci_iso_hdr *hdr;
3845 	struct hci_conn *conn;
3846 	__u16 handle, flags;
3847 
3848 	hdr = skb_pull_data(skb, sizeof(*hdr));
3849 	if (!hdr) {
3850 		bt_dev_err(hdev, "ISO packet too small");
3851 		goto drop;
3852 	}
3853 
3854 	handle = __le16_to_cpu(hdr->handle);
3855 	flags  = hci_flags(handle);
3856 	handle = hci_handle(handle);
3857 
3858 	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3859 		   handle, flags);
3860 
3861 	hci_dev_lock(hdev);
3862 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3863 	hci_dev_unlock(hdev);
3864 
3865 	if (!conn) {
3866 		bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3867 			   handle);
3868 		goto drop;
3869 	}
3870 
3871 	/* Send to upper protocol */
3872 	iso_recv(conn, skb, flags);
3873 	return;
3874 
3875 drop:
3876 	kfree_skb(skb);
3877 }
3878 
3879 static bool hci_req_is_complete(struct hci_dev *hdev)
3880 {
3881 	struct sk_buff *skb;
3882 
3883 	skb = skb_peek(&hdev->cmd_q);
3884 	if (!skb)
3885 		return true;
3886 
3887 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3888 }
3889 
3890 static void hci_resend_last(struct hci_dev *hdev)
3891 {
3892 	struct hci_command_hdr *sent;
3893 	struct sk_buff *skb;
3894 	u16 opcode;
3895 
3896 	if (!hdev->sent_cmd)
3897 		return;
3898 
3899 	sent = (void *) hdev->sent_cmd->data;
3900 	opcode = __le16_to_cpu(sent->opcode);
3901 	if (opcode == HCI_OP_RESET)
3902 		return;
3903 
3904 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3905 	if (!skb)
3906 		return;
3907 
3908 	skb_queue_head(&hdev->cmd_q, skb);
3909 	queue_work(hdev->workqueue, &hdev->cmd_work);
3910 }
3911 
3912 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3913 			  hci_req_complete_t *req_complete,
3914 			  hci_req_complete_skb_t *req_complete_skb)
3915 {
3916 	struct sk_buff *skb;
3917 	unsigned long flags;
3918 
3919 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3920 
3921 	/* If the completed command doesn't match the last one that was
3922 	 * sent we need to do special handling of it.
3923 	 */
3924 	if (!hci_sent_cmd_data(hdev, opcode)) {
3925 		/* Some CSR based controllers generate a spontaneous
3926 		 * reset complete event during init and any pending
3927 		 * command will never be completed. In such a case we
3928 		 * need to resend whatever was the last sent
3929 		 * command.
3930 		 */
3931 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3932 			hci_resend_last(hdev);
3933 
3934 		return;
3935 	}
3936 
3937 	/* If we reach this point this event matches the last command sent */
3938 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3939 
3940 	/* If the command succeeded and there's still more commands in
3941 	 * this request the request is not yet complete.
3942 	 */
3943 	if (!status && !hci_req_is_complete(hdev))
3944 		return;
3945 
3946 	skb = hdev->req_skb;
3947 
3948 	/* If this was the last command in a request the complete
3949 	 * callback would be found in hdev->req_skb instead of the
3950 	 * command queue (hdev->cmd_q).
3951 	 */
3952 	if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3953 		*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3954 		return;
3955 	}
3956 
3957 	if (skb && bt_cb(skb)->hci.req_complete) {
3958 		*req_complete = bt_cb(skb)->hci.req_complete;
3959 		return;
3960 	}
3961 
3962 	/* Remove all pending commands belonging to this request */
3963 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3964 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3965 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3966 			__skb_queue_head(&hdev->cmd_q, skb);
3967 			break;
3968 		}
3969 
3970 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3971 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3972 		else
3973 			*req_complete = bt_cb(skb)->hci.req_complete;
3974 		dev_kfree_skb_irq(skb);
3975 	}
3976 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3977 }
3978 
3979 static void hci_rx_work(struct work_struct *work)
3980 {
3981 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3982 	struct sk_buff *skb;
3983 
3984 	BT_DBG("%s", hdev->name);
3985 
3986 	/* The kcov_remote functions used for collecting packet parsing
3987 	 * coverage information from this background thread and associate
3988 	 * the coverage with the syscall's thread which originally injected
3989 	 * the packet. This helps fuzzing the kernel.
3990 	 */
3991 	for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3992 		kcov_remote_start_common(skb_get_kcov_handle(skb));
3993 
3994 		/* Send copy to monitor */
3995 		hci_send_to_monitor(hdev, skb);
3996 
3997 		if (atomic_read(&hdev->promisc)) {
3998 			/* Send copy to the sockets */
3999 			hci_send_to_sock(hdev, skb);
4000 		}
4001 
4002 		/* If the device has been opened in HCI_USER_CHANNEL,
4003 		 * the userspace has exclusive access to device.
4004 		 * When device is HCI_INIT, we still need to process
4005 		 * the data packets to the driver in order
4006 		 * to complete its setup().
4007 		 */
4008 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4009 		    !test_bit(HCI_INIT, &hdev->flags)) {
4010 			kfree_skb(skb);
4011 			continue;
4012 		}
4013 
4014 		if (test_bit(HCI_INIT, &hdev->flags)) {
4015 			/* Don't process data packets in this states. */
4016 			switch (hci_skb_pkt_type(skb)) {
4017 			case HCI_ACLDATA_PKT:
4018 			case HCI_SCODATA_PKT:
4019 			case HCI_ISODATA_PKT:
4020 				kfree_skb(skb);
4021 				continue;
4022 			}
4023 		}
4024 
4025 		/* Process frame */
4026 		switch (hci_skb_pkt_type(skb)) {
4027 		case HCI_EVENT_PKT:
4028 			BT_DBG("%s Event packet", hdev->name);
4029 			hci_event_packet(hdev, skb);
4030 			break;
4031 
4032 		case HCI_ACLDATA_PKT:
4033 			BT_DBG("%s ACL data packet", hdev->name);
4034 			hci_acldata_packet(hdev, skb);
4035 			break;
4036 
4037 		case HCI_SCODATA_PKT:
4038 			BT_DBG("%s SCO data packet", hdev->name);
4039 			hci_scodata_packet(hdev, skb);
4040 			break;
4041 
4042 		case HCI_ISODATA_PKT:
4043 			BT_DBG("%s ISO data packet", hdev->name);
4044 			hci_isodata_packet(hdev, skb);
4045 			break;
4046 
4047 		default:
4048 			kfree_skb(skb);
4049 			break;
4050 		}
4051 	}
4052 }
4053 
4054 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4055 {
4056 	int err;
4057 
4058 	bt_dev_dbg(hdev, "skb %p", skb);
4059 
4060 	kfree_skb(hdev->sent_cmd);
4061 
4062 	hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4063 	if (!hdev->sent_cmd) {
4064 		skb_queue_head(&hdev->cmd_q, skb);
4065 		queue_work(hdev->workqueue, &hdev->cmd_work);
4066 		return;
4067 	}
4068 
4069 	err = hci_send_frame(hdev, skb);
4070 	if (err < 0) {
4071 		hci_cmd_sync_cancel_sync(hdev, -err);
4072 		return;
4073 	}
4074 
4075 	if (hdev->req_status == HCI_REQ_PEND &&
4076 	    !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4077 		kfree_skb(hdev->req_skb);
4078 		hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4079 	}
4080 
4081 	atomic_dec(&hdev->cmd_cnt);
4082 }
4083 
4084 static void hci_cmd_work(struct work_struct *work)
4085 {
4086 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4087 	struct sk_buff *skb;
4088 
4089 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4090 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4091 
4092 	/* Send queued commands */
4093 	if (atomic_read(&hdev->cmd_cnt)) {
4094 		skb = skb_dequeue(&hdev->cmd_q);
4095 		if (!skb)
4096 			return;
4097 
4098 		hci_send_cmd_sync(hdev, skb);
4099 
4100 		rcu_read_lock();
4101 		if (test_bit(HCI_RESET, &hdev->flags) ||
4102 		    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4103 			cancel_delayed_work(&hdev->cmd_timer);
4104 		else
4105 			queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4106 					   HCI_CMD_TIMEOUT);
4107 		rcu_read_unlock();
4108 	}
4109 }
4110