xref: /linux/net/bluetooth/hci_core.c (revision 4359a011e259a4608afc7fb3635370c9d4ba5943)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37 
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42 
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50 
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54 
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58 
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62 
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65 
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68 	__u8 scan = opt;
69 
70 	BT_DBG("%s %x", req->hdev->name, scan);
71 
72 	/* Inquiry and Page scans */
73 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74 	return 0;
75 }
76 
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79 	__u8 auth = opt;
80 
81 	BT_DBG("%s %x", req->hdev->name, auth);
82 
83 	/* Authentication */
84 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85 	return 0;
86 }
87 
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90 	__u8 encrypt = opt;
91 
92 	BT_DBG("%s %x", req->hdev->name, encrypt);
93 
94 	/* Encryption */
95 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96 	return 0;
97 }
98 
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101 	__le16 policy = cpu_to_le16(opt);
102 
103 	BT_DBG("%s %x", req->hdev->name, policy);
104 
105 	/* Default link policy */
106 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107 	return 0;
108 }
109 
110 /* Get HCI device by index.
111  * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
113 {
114 	struct hci_dev *hdev = NULL, *d;
115 
116 	BT_DBG("%d", index);
117 
118 	if (index < 0)
119 		return NULL;
120 
121 	read_lock(&hci_dev_list_lock);
122 	list_for_each_entry(d, &hci_dev_list, list) {
123 		if (d->id == index) {
124 			hdev = hci_dev_hold(d);
125 			break;
126 		}
127 	}
128 	read_unlock(&hci_dev_list_lock);
129 	return hdev;
130 }
131 
132 /* ---- Inquiry support ---- */
133 
134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136 	struct discovery_state *discov = &hdev->discovery;
137 
138 	switch (discov->state) {
139 	case DISCOVERY_FINDING:
140 	case DISCOVERY_RESOLVING:
141 		return true;
142 
143 	default:
144 		return false;
145 	}
146 }
147 
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150 	int old_state = hdev->discovery.state;
151 
152 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153 
154 	if (old_state == state)
155 		return;
156 
157 	hdev->discovery.state = state;
158 
159 	switch (state) {
160 	case DISCOVERY_STOPPED:
161 		hci_update_passive_scan(hdev);
162 
163 		if (old_state != DISCOVERY_STARTING)
164 			mgmt_discovering(hdev, 0);
165 		break;
166 	case DISCOVERY_STARTING:
167 		break;
168 	case DISCOVERY_FINDING:
169 		mgmt_discovering(hdev, 1);
170 		break;
171 	case DISCOVERY_RESOLVING:
172 		break;
173 	case DISCOVERY_STOPPING:
174 		break;
175 	}
176 }
177 
178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
179 {
180 	struct discovery_state *cache = &hdev->discovery;
181 	struct inquiry_entry *p, *n;
182 
183 	list_for_each_entry_safe(p, n, &cache->all, all) {
184 		list_del(&p->all);
185 		kfree(p);
186 	}
187 
188 	INIT_LIST_HEAD(&cache->unknown);
189 	INIT_LIST_HEAD(&cache->resolve);
190 }
191 
192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
193 					       bdaddr_t *bdaddr)
194 {
195 	struct discovery_state *cache = &hdev->discovery;
196 	struct inquiry_entry *e;
197 
198 	BT_DBG("cache %p, %pMR", cache, bdaddr);
199 
200 	list_for_each_entry(e, &cache->all, all) {
201 		if (!bacmp(&e->data.bdaddr, bdaddr))
202 			return e;
203 	}
204 
205 	return NULL;
206 }
207 
208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
209 						       bdaddr_t *bdaddr)
210 {
211 	struct discovery_state *cache = &hdev->discovery;
212 	struct inquiry_entry *e;
213 
214 	BT_DBG("cache %p, %pMR", cache, bdaddr);
215 
216 	list_for_each_entry(e, &cache->unknown, list) {
217 		if (!bacmp(&e->data.bdaddr, bdaddr))
218 			return e;
219 	}
220 
221 	return NULL;
222 }
223 
224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
225 						       bdaddr_t *bdaddr,
226 						       int state)
227 {
228 	struct discovery_state *cache = &hdev->discovery;
229 	struct inquiry_entry *e;
230 
231 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232 
233 	list_for_each_entry(e, &cache->resolve, list) {
234 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
235 			return e;
236 		if (!bacmp(&e->data.bdaddr, bdaddr))
237 			return e;
238 	}
239 
240 	return NULL;
241 }
242 
243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244 				      struct inquiry_entry *ie)
245 {
246 	struct discovery_state *cache = &hdev->discovery;
247 	struct list_head *pos = &cache->resolve;
248 	struct inquiry_entry *p;
249 
250 	list_del(&ie->list);
251 
252 	list_for_each_entry(p, &cache->resolve, list) {
253 		if (p->name_state != NAME_PENDING &&
254 		    abs(p->data.rssi) >= abs(ie->data.rssi))
255 			break;
256 		pos = &p->list;
257 	}
258 
259 	list_add(&ie->list, pos);
260 }
261 
262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
263 			     bool name_known)
264 {
265 	struct discovery_state *cache = &hdev->discovery;
266 	struct inquiry_entry *ie;
267 	u32 flags = 0;
268 
269 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270 
271 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
272 
273 	if (!data->ssp_mode)
274 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275 
276 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
277 	if (ie) {
278 		if (!ie->data.ssp_mode)
279 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280 
281 		if (ie->name_state == NAME_NEEDED &&
282 		    data->rssi != ie->data.rssi) {
283 			ie->data.rssi = data->rssi;
284 			hci_inquiry_cache_update_resolve(hdev, ie);
285 		}
286 
287 		goto update;
288 	}
289 
290 	/* Entry not in the cache. Add new one. */
291 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
292 	if (!ie) {
293 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
294 		goto done;
295 	}
296 
297 	list_add(&ie->all, &cache->all);
298 
299 	if (name_known) {
300 		ie->name_state = NAME_KNOWN;
301 	} else {
302 		ie->name_state = NAME_NOT_KNOWN;
303 		list_add(&ie->list, &cache->unknown);
304 	}
305 
306 update:
307 	if (name_known && ie->name_state != NAME_KNOWN &&
308 	    ie->name_state != NAME_PENDING) {
309 		ie->name_state = NAME_KNOWN;
310 		list_del(&ie->list);
311 	}
312 
313 	memcpy(&ie->data, data, sizeof(*data));
314 	ie->timestamp = jiffies;
315 	cache->timestamp = jiffies;
316 
317 	if (ie->name_state == NAME_NOT_KNOWN)
318 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
319 
320 done:
321 	return flags;
322 }
323 
324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325 {
326 	struct discovery_state *cache = &hdev->discovery;
327 	struct inquiry_info *info = (struct inquiry_info *) buf;
328 	struct inquiry_entry *e;
329 	int copied = 0;
330 
331 	list_for_each_entry(e, &cache->all, all) {
332 		struct inquiry_data *data = &e->data;
333 
334 		if (copied >= num)
335 			break;
336 
337 		bacpy(&info->bdaddr, &data->bdaddr);
338 		info->pscan_rep_mode	= data->pscan_rep_mode;
339 		info->pscan_period_mode	= data->pscan_period_mode;
340 		info->pscan_mode	= data->pscan_mode;
341 		memcpy(info->dev_class, data->dev_class, 3);
342 		info->clock_offset	= data->clock_offset;
343 
344 		info++;
345 		copied++;
346 	}
347 
348 	BT_DBG("cache %p, copied %d", cache, copied);
349 	return copied;
350 }
351 
352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
353 {
354 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 	struct hci_dev *hdev = req->hdev;
356 	struct hci_cp_inquiry cp;
357 
358 	BT_DBG("%s", hdev->name);
359 
360 	if (test_bit(HCI_INQUIRY, &hdev->flags))
361 		return 0;
362 
363 	/* Start Inquiry */
364 	memcpy(&cp.lap, &ir->lap, 3);
365 	cp.length  = ir->length;
366 	cp.num_rsp = ir->num_rsp;
367 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
368 
369 	return 0;
370 }
371 
372 int hci_inquiry(void __user *arg)
373 {
374 	__u8 __user *ptr = arg;
375 	struct hci_inquiry_req ir;
376 	struct hci_dev *hdev;
377 	int err = 0, do_inquiry = 0, max_rsp;
378 	long timeo;
379 	__u8 *buf;
380 
381 	if (copy_from_user(&ir, ptr, sizeof(ir)))
382 		return -EFAULT;
383 
384 	hdev = hci_dev_get(ir.dev_id);
385 	if (!hdev)
386 		return -ENODEV;
387 
388 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
389 		err = -EBUSY;
390 		goto done;
391 	}
392 
393 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
394 		err = -EOPNOTSUPP;
395 		goto done;
396 	}
397 
398 	if (hdev->dev_type != HCI_PRIMARY) {
399 		err = -EOPNOTSUPP;
400 		goto done;
401 	}
402 
403 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
404 		err = -EOPNOTSUPP;
405 		goto done;
406 	}
407 
408 	/* Restrict maximum inquiry length to 60 seconds */
409 	if (ir.length > 60) {
410 		err = -EINVAL;
411 		goto done;
412 	}
413 
414 	hci_dev_lock(hdev);
415 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417 		hci_inquiry_cache_flush(hdev);
418 		do_inquiry = 1;
419 	}
420 	hci_dev_unlock(hdev);
421 
422 	timeo = ir.length * msecs_to_jiffies(2000);
423 
424 	if (do_inquiry) {
425 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
426 				   timeo, NULL);
427 		if (err < 0)
428 			goto done;
429 
430 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431 		 * cleared). If it is interrupted by a signal, return -EINTR.
432 		 */
433 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434 				TASK_INTERRUPTIBLE)) {
435 			err = -EINTR;
436 			goto done;
437 		}
438 	}
439 
440 	/* for unlimited number of responses we will use buffer with
441 	 * 255 entries
442 	 */
443 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
444 
445 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
446 	 * copy it to the user space.
447 	 */
448 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
449 	if (!buf) {
450 		err = -ENOMEM;
451 		goto done;
452 	}
453 
454 	hci_dev_lock(hdev);
455 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456 	hci_dev_unlock(hdev);
457 
458 	BT_DBG("num_rsp %d", ir.num_rsp);
459 
460 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
461 		ptr += sizeof(ir);
462 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
463 				 ir.num_rsp))
464 			err = -EFAULT;
465 	} else
466 		err = -EFAULT;
467 
468 	kfree(buf);
469 
470 done:
471 	hci_dev_put(hdev);
472 	return err;
473 }
474 
475 static int hci_dev_do_open(struct hci_dev *hdev)
476 {
477 	int ret = 0;
478 
479 	BT_DBG("%s %p", hdev->name, hdev);
480 
481 	hci_req_sync_lock(hdev);
482 
483 	ret = hci_dev_open_sync(hdev);
484 
485 	hci_req_sync_unlock(hdev);
486 	return ret;
487 }
488 
489 /* ---- HCI ioctl helpers ---- */
490 
491 int hci_dev_open(__u16 dev)
492 {
493 	struct hci_dev *hdev;
494 	int err;
495 
496 	hdev = hci_dev_get(dev);
497 	if (!hdev)
498 		return -ENODEV;
499 
500 	/* Devices that are marked as unconfigured can only be powered
501 	 * up as user channel. Trying to bring them up as normal devices
502 	 * will result into a failure. Only user channel operation is
503 	 * possible.
504 	 *
505 	 * When this function is called for a user channel, the flag
506 	 * HCI_USER_CHANNEL will be set first before attempting to
507 	 * open the device.
508 	 */
509 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
511 		err = -EOPNOTSUPP;
512 		goto done;
513 	}
514 
515 	/* We need to ensure that no other power on/off work is pending
516 	 * before proceeding to call hci_dev_do_open. This is
517 	 * particularly important if the setup procedure has not yet
518 	 * completed.
519 	 */
520 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521 		cancel_delayed_work(&hdev->power_off);
522 
523 	/* After this call it is guaranteed that the setup procedure
524 	 * has finished. This means that error conditions like RFKILL
525 	 * or no valid public or static random address apply.
526 	 */
527 	flush_workqueue(hdev->req_workqueue);
528 
529 	/* For controllers not using the management interface and that
530 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531 	 * so that pairing works for them. Once the management interface
532 	 * is in use this bit will be cleared again and userspace has
533 	 * to explicitly enable it.
534 	 */
535 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536 	    !hci_dev_test_flag(hdev, HCI_MGMT))
537 		hci_dev_set_flag(hdev, HCI_BONDABLE);
538 
539 	err = hci_dev_do_open(hdev);
540 
541 done:
542 	hci_dev_put(hdev);
543 	return err;
544 }
545 
546 int hci_dev_do_close(struct hci_dev *hdev)
547 {
548 	int err;
549 
550 	BT_DBG("%s %p", hdev->name, hdev);
551 
552 	hci_req_sync_lock(hdev);
553 
554 	err = hci_dev_close_sync(hdev);
555 
556 	hci_req_sync_unlock(hdev);
557 
558 	return err;
559 }
560 
561 int hci_dev_close(__u16 dev)
562 {
563 	struct hci_dev *hdev;
564 	int err;
565 
566 	hdev = hci_dev_get(dev);
567 	if (!hdev)
568 		return -ENODEV;
569 
570 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
571 		err = -EBUSY;
572 		goto done;
573 	}
574 
575 	cancel_work_sync(&hdev->power_on);
576 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577 		cancel_delayed_work(&hdev->power_off);
578 
579 	err = hci_dev_do_close(hdev);
580 
581 done:
582 	hci_dev_put(hdev);
583 	return err;
584 }
585 
586 static int hci_dev_do_reset(struct hci_dev *hdev)
587 {
588 	int ret;
589 
590 	BT_DBG("%s %p", hdev->name, hdev);
591 
592 	hci_req_sync_lock(hdev);
593 
594 	/* Drop queues */
595 	skb_queue_purge(&hdev->rx_q);
596 	skb_queue_purge(&hdev->cmd_q);
597 
598 	/* Cancel these to avoid queueing non-chained pending work */
599 	hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
600 	cancel_delayed_work(&hdev->cmd_timer);
601 	cancel_delayed_work(&hdev->ncmd_timer);
602 
603 	/* Avoid potential lockdep warnings from the *_flush() calls by
604 	 * ensuring the workqueue is empty up front.
605 	 */
606 	drain_workqueue(hdev->workqueue);
607 
608 	hci_dev_lock(hdev);
609 	hci_inquiry_cache_flush(hdev);
610 	hci_conn_hash_flush(hdev);
611 	hci_dev_unlock(hdev);
612 
613 	if (hdev->flush)
614 		hdev->flush(hdev);
615 
616 	hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
617 
618 	atomic_set(&hdev->cmd_cnt, 1);
619 	hdev->acl_cnt = 0;
620 	hdev->sco_cnt = 0;
621 	hdev->le_cnt = 0;
622 	hdev->iso_cnt = 0;
623 
624 	ret = hci_reset_sync(hdev);
625 
626 	hci_req_sync_unlock(hdev);
627 	return ret;
628 }
629 
630 int hci_dev_reset(__u16 dev)
631 {
632 	struct hci_dev *hdev;
633 	int err;
634 
635 	hdev = hci_dev_get(dev);
636 	if (!hdev)
637 		return -ENODEV;
638 
639 	if (!test_bit(HCI_UP, &hdev->flags)) {
640 		err = -ENETDOWN;
641 		goto done;
642 	}
643 
644 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
645 		err = -EBUSY;
646 		goto done;
647 	}
648 
649 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
650 		err = -EOPNOTSUPP;
651 		goto done;
652 	}
653 
654 	err = hci_dev_do_reset(hdev);
655 
656 done:
657 	hci_dev_put(hdev);
658 	return err;
659 }
660 
661 int hci_dev_reset_stat(__u16 dev)
662 {
663 	struct hci_dev *hdev;
664 	int ret = 0;
665 
666 	hdev = hci_dev_get(dev);
667 	if (!hdev)
668 		return -ENODEV;
669 
670 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
671 		ret = -EBUSY;
672 		goto done;
673 	}
674 
675 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
676 		ret = -EOPNOTSUPP;
677 		goto done;
678 	}
679 
680 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
681 
682 done:
683 	hci_dev_put(hdev);
684 	return ret;
685 }
686 
687 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
688 {
689 	bool conn_changed, discov_changed;
690 
691 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
692 
693 	if ((scan & SCAN_PAGE))
694 		conn_changed = !hci_dev_test_and_set_flag(hdev,
695 							  HCI_CONNECTABLE);
696 	else
697 		conn_changed = hci_dev_test_and_clear_flag(hdev,
698 							   HCI_CONNECTABLE);
699 
700 	if ((scan & SCAN_INQUIRY)) {
701 		discov_changed = !hci_dev_test_and_set_flag(hdev,
702 							    HCI_DISCOVERABLE);
703 	} else {
704 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
705 		discov_changed = hci_dev_test_and_clear_flag(hdev,
706 							     HCI_DISCOVERABLE);
707 	}
708 
709 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
710 		return;
711 
712 	if (conn_changed || discov_changed) {
713 		/* In case this was disabled through mgmt */
714 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
715 
716 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
717 			hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
718 
719 		mgmt_new_settings(hdev);
720 	}
721 }
722 
723 int hci_dev_cmd(unsigned int cmd, void __user *arg)
724 {
725 	struct hci_dev *hdev;
726 	struct hci_dev_req dr;
727 	int err = 0;
728 
729 	if (copy_from_user(&dr, arg, sizeof(dr)))
730 		return -EFAULT;
731 
732 	hdev = hci_dev_get(dr.dev_id);
733 	if (!hdev)
734 		return -ENODEV;
735 
736 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
737 		err = -EBUSY;
738 		goto done;
739 	}
740 
741 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
742 		err = -EOPNOTSUPP;
743 		goto done;
744 	}
745 
746 	if (hdev->dev_type != HCI_PRIMARY) {
747 		err = -EOPNOTSUPP;
748 		goto done;
749 	}
750 
751 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
752 		err = -EOPNOTSUPP;
753 		goto done;
754 	}
755 
756 	switch (cmd) {
757 	case HCISETAUTH:
758 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
759 				   HCI_INIT_TIMEOUT, NULL);
760 		break;
761 
762 	case HCISETENCRYPT:
763 		if (!lmp_encrypt_capable(hdev)) {
764 			err = -EOPNOTSUPP;
765 			break;
766 		}
767 
768 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
769 			/* Auth must be enabled first */
770 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
771 					   HCI_INIT_TIMEOUT, NULL);
772 			if (err)
773 				break;
774 		}
775 
776 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
777 				   HCI_INIT_TIMEOUT, NULL);
778 		break;
779 
780 	case HCISETSCAN:
781 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
782 				   HCI_INIT_TIMEOUT, NULL);
783 
784 		/* Ensure that the connectable and discoverable states
785 		 * get correctly modified as this was a non-mgmt change.
786 		 */
787 		if (!err)
788 			hci_update_passive_scan_state(hdev, dr.dev_opt);
789 		break;
790 
791 	case HCISETLINKPOL:
792 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
793 				   HCI_INIT_TIMEOUT, NULL);
794 		break;
795 
796 	case HCISETLINKMODE:
797 		hdev->link_mode = ((__u16) dr.dev_opt) &
798 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
799 		break;
800 
801 	case HCISETPTYPE:
802 		if (hdev->pkt_type == (__u16) dr.dev_opt)
803 			break;
804 
805 		hdev->pkt_type = (__u16) dr.dev_opt;
806 		mgmt_phy_configuration_changed(hdev, NULL);
807 		break;
808 
809 	case HCISETACLMTU:
810 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
811 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
812 		break;
813 
814 	case HCISETSCOMTU:
815 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
816 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
817 		break;
818 
819 	default:
820 		err = -EINVAL;
821 		break;
822 	}
823 
824 done:
825 	hci_dev_put(hdev);
826 	return err;
827 }
828 
829 int hci_get_dev_list(void __user *arg)
830 {
831 	struct hci_dev *hdev;
832 	struct hci_dev_list_req *dl;
833 	struct hci_dev_req *dr;
834 	int n = 0, size, err;
835 	__u16 dev_num;
836 
837 	if (get_user(dev_num, (__u16 __user *) arg))
838 		return -EFAULT;
839 
840 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
841 		return -EINVAL;
842 
843 	size = sizeof(*dl) + dev_num * sizeof(*dr);
844 
845 	dl = kzalloc(size, GFP_KERNEL);
846 	if (!dl)
847 		return -ENOMEM;
848 
849 	dr = dl->dev_req;
850 
851 	read_lock(&hci_dev_list_lock);
852 	list_for_each_entry(hdev, &hci_dev_list, list) {
853 		unsigned long flags = hdev->flags;
854 
855 		/* When the auto-off is configured it means the transport
856 		 * is running, but in that case still indicate that the
857 		 * device is actually down.
858 		 */
859 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
860 			flags &= ~BIT(HCI_UP);
861 
862 		(dr + n)->dev_id  = hdev->id;
863 		(dr + n)->dev_opt = flags;
864 
865 		if (++n >= dev_num)
866 			break;
867 	}
868 	read_unlock(&hci_dev_list_lock);
869 
870 	dl->dev_num = n;
871 	size = sizeof(*dl) + n * sizeof(*dr);
872 
873 	err = copy_to_user(arg, dl, size);
874 	kfree(dl);
875 
876 	return err ? -EFAULT : 0;
877 }
878 
879 int hci_get_dev_info(void __user *arg)
880 {
881 	struct hci_dev *hdev;
882 	struct hci_dev_info di;
883 	unsigned long flags;
884 	int err = 0;
885 
886 	if (copy_from_user(&di, arg, sizeof(di)))
887 		return -EFAULT;
888 
889 	hdev = hci_dev_get(di.dev_id);
890 	if (!hdev)
891 		return -ENODEV;
892 
893 	/* When the auto-off is configured it means the transport
894 	 * is running, but in that case still indicate that the
895 	 * device is actually down.
896 	 */
897 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
898 		flags = hdev->flags & ~BIT(HCI_UP);
899 	else
900 		flags = hdev->flags;
901 
902 	strcpy(di.name, hdev->name);
903 	di.bdaddr   = hdev->bdaddr;
904 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
905 	di.flags    = flags;
906 	di.pkt_type = hdev->pkt_type;
907 	if (lmp_bredr_capable(hdev)) {
908 		di.acl_mtu  = hdev->acl_mtu;
909 		di.acl_pkts = hdev->acl_pkts;
910 		di.sco_mtu  = hdev->sco_mtu;
911 		di.sco_pkts = hdev->sco_pkts;
912 	} else {
913 		di.acl_mtu  = hdev->le_mtu;
914 		di.acl_pkts = hdev->le_pkts;
915 		di.sco_mtu  = 0;
916 		di.sco_pkts = 0;
917 	}
918 	di.link_policy = hdev->link_policy;
919 	di.link_mode   = hdev->link_mode;
920 
921 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
922 	memcpy(&di.features, &hdev->features, sizeof(di.features));
923 
924 	if (copy_to_user(arg, &di, sizeof(di)))
925 		err = -EFAULT;
926 
927 	hci_dev_put(hdev);
928 
929 	return err;
930 }
931 
932 /* ---- Interface to HCI drivers ---- */
933 
934 static int hci_rfkill_set_block(void *data, bool blocked)
935 {
936 	struct hci_dev *hdev = data;
937 
938 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
939 
940 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
941 		return -EBUSY;
942 
943 	if (blocked) {
944 		hci_dev_set_flag(hdev, HCI_RFKILLED);
945 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
946 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
947 			hci_dev_do_close(hdev);
948 	} else {
949 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
950 	}
951 
952 	return 0;
953 }
954 
955 static const struct rfkill_ops hci_rfkill_ops = {
956 	.set_block = hci_rfkill_set_block,
957 };
958 
959 static void hci_power_on(struct work_struct *work)
960 {
961 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
962 	int err;
963 
964 	BT_DBG("%s", hdev->name);
965 
966 	if (test_bit(HCI_UP, &hdev->flags) &&
967 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
968 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
969 		cancel_delayed_work(&hdev->power_off);
970 		err = hci_powered_update_sync(hdev);
971 		mgmt_power_on(hdev, err);
972 		return;
973 	}
974 
975 	err = hci_dev_do_open(hdev);
976 	if (err < 0) {
977 		hci_dev_lock(hdev);
978 		mgmt_set_powered_failed(hdev, err);
979 		hci_dev_unlock(hdev);
980 		return;
981 	}
982 
983 	/* During the HCI setup phase, a few error conditions are
984 	 * ignored and they need to be checked now. If they are still
985 	 * valid, it is important to turn the device back off.
986 	 */
987 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
988 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
989 	    (hdev->dev_type == HCI_PRIMARY &&
990 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
991 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
992 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
993 		hci_dev_do_close(hdev);
994 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
995 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
996 				   HCI_AUTO_OFF_TIMEOUT);
997 	}
998 
999 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1000 		/* For unconfigured devices, set the HCI_RAW flag
1001 		 * so that userspace can easily identify them.
1002 		 */
1003 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1004 			set_bit(HCI_RAW, &hdev->flags);
1005 
1006 		/* For fully configured devices, this will send
1007 		 * the Index Added event. For unconfigured devices,
1008 		 * it will send Unconfigued Index Added event.
1009 		 *
1010 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1011 		 * and no event will be send.
1012 		 */
1013 		mgmt_index_added(hdev);
1014 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1015 		/* When the controller is now configured, then it
1016 		 * is important to clear the HCI_RAW flag.
1017 		 */
1018 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1019 			clear_bit(HCI_RAW, &hdev->flags);
1020 
1021 		/* Powering on the controller with HCI_CONFIG set only
1022 		 * happens with the transition from unconfigured to
1023 		 * configured. This will send the Index Added event.
1024 		 */
1025 		mgmt_index_added(hdev);
1026 	}
1027 }
1028 
1029 static void hci_power_off(struct work_struct *work)
1030 {
1031 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1032 					    power_off.work);
1033 
1034 	BT_DBG("%s", hdev->name);
1035 
1036 	hci_dev_do_close(hdev);
1037 }
1038 
1039 static void hci_error_reset(struct work_struct *work)
1040 {
1041 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1042 
1043 	BT_DBG("%s", hdev->name);
1044 
1045 	if (hdev->hw_error)
1046 		hdev->hw_error(hdev, hdev->hw_error_code);
1047 	else
1048 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1049 
1050 	if (hci_dev_do_close(hdev))
1051 		return;
1052 
1053 	hci_dev_do_open(hdev);
1054 }
1055 
1056 void hci_uuids_clear(struct hci_dev *hdev)
1057 {
1058 	struct bt_uuid *uuid, *tmp;
1059 
1060 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1061 		list_del(&uuid->list);
1062 		kfree(uuid);
1063 	}
1064 }
1065 
1066 void hci_link_keys_clear(struct hci_dev *hdev)
1067 {
1068 	struct link_key *key;
1069 
1070 	list_for_each_entry(key, &hdev->link_keys, list) {
1071 		list_del_rcu(&key->list);
1072 		kfree_rcu(key, rcu);
1073 	}
1074 }
1075 
1076 void hci_smp_ltks_clear(struct hci_dev *hdev)
1077 {
1078 	struct smp_ltk *k;
1079 
1080 	list_for_each_entry(k, &hdev->long_term_keys, list) {
1081 		list_del_rcu(&k->list);
1082 		kfree_rcu(k, rcu);
1083 	}
1084 }
1085 
1086 void hci_smp_irks_clear(struct hci_dev *hdev)
1087 {
1088 	struct smp_irk *k;
1089 
1090 	list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1091 		list_del_rcu(&k->list);
1092 		kfree_rcu(k, rcu);
1093 	}
1094 }
1095 
1096 void hci_blocked_keys_clear(struct hci_dev *hdev)
1097 {
1098 	struct blocked_key *b;
1099 
1100 	list_for_each_entry(b, &hdev->blocked_keys, list) {
1101 		list_del_rcu(&b->list);
1102 		kfree_rcu(b, rcu);
1103 	}
1104 }
1105 
1106 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1107 {
1108 	bool blocked = false;
1109 	struct blocked_key *b;
1110 
1111 	rcu_read_lock();
1112 	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1113 		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1114 			blocked = true;
1115 			break;
1116 		}
1117 	}
1118 
1119 	rcu_read_unlock();
1120 	return blocked;
1121 }
1122 
1123 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1124 {
1125 	struct link_key *k;
1126 
1127 	rcu_read_lock();
1128 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1129 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
1130 			rcu_read_unlock();
1131 
1132 			if (hci_is_blocked_key(hdev,
1133 					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
1134 					       k->val)) {
1135 				bt_dev_warn_ratelimited(hdev,
1136 							"Link key blocked for %pMR",
1137 							&k->bdaddr);
1138 				return NULL;
1139 			}
1140 
1141 			return k;
1142 		}
1143 	}
1144 	rcu_read_unlock();
1145 
1146 	return NULL;
1147 }
1148 
1149 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1150 			       u8 key_type, u8 old_key_type)
1151 {
1152 	/* Legacy key */
1153 	if (key_type < 0x03)
1154 		return true;
1155 
1156 	/* Debug keys are insecure so don't store them persistently */
1157 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1158 		return false;
1159 
1160 	/* Changed combination key and there's no previous one */
1161 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1162 		return false;
1163 
1164 	/* Security mode 3 case */
1165 	if (!conn)
1166 		return true;
1167 
1168 	/* BR/EDR key derived using SC from an LE link */
1169 	if (conn->type == LE_LINK)
1170 		return true;
1171 
1172 	/* Neither local nor remote side had no-bonding as requirement */
1173 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1174 		return true;
1175 
1176 	/* Local side had dedicated bonding as requirement */
1177 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1178 		return true;
1179 
1180 	/* Remote side had dedicated bonding as requirement */
1181 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1182 		return true;
1183 
1184 	/* If none of the above criteria match, then don't store the key
1185 	 * persistently */
1186 	return false;
1187 }
1188 
1189 static u8 ltk_role(u8 type)
1190 {
1191 	if (type == SMP_LTK)
1192 		return HCI_ROLE_MASTER;
1193 
1194 	return HCI_ROLE_SLAVE;
1195 }
1196 
1197 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1198 			     u8 addr_type, u8 role)
1199 {
1200 	struct smp_ltk *k;
1201 
1202 	rcu_read_lock();
1203 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1204 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1205 			continue;
1206 
1207 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1208 			rcu_read_unlock();
1209 
1210 			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1211 					       k->val)) {
1212 				bt_dev_warn_ratelimited(hdev,
1213 							"LTK blocked for %pMR",
1214 							&k->bdaddr);
1215 				return NULL;
1216 			}
1217 
1218 			return k;
1219 		}
1220 	}
1221 	rcu_read_unlock();
1222 
1223 	return NULL;
1224 }
1225 
1226 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1227 {
1228 	struct smp_irk *irk_to_return = NULL;
1229 	struct smp_irk *irk;
1230 
1231 	rcu_read_lock();
1232 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1233 		if (!bacmp(&irk->rpa, rpa)) {
1234 			irk_to_return = irk;
1235 			goto done;
1236 		}
1237 	}
1238 
1239 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1240 		if (smp_irk_matches(hdev, irk->val, rpa)) {
1241 			bacpy(&irk->rpa, rpa);
1242 			irk_to_return = irk;
1243 			goto done;
1244 		}
1245 	}
1246 
1247 done:
1248 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1249 						irk_to_return->val)) {
1250 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1251 					&irk_to_return->bdaddr);
1252 		irk_to_return = NULL;
1253 	}
1254 
1255 	rcu_read_unlock();
1256 
1257 	return irk_to_return;
1258 }
1259 
1260 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1261 				     u8 addr_type)
1262 {
1263 	struct smp_irk *irk_to_return = NULL;
1264 	struct smp_irk *irk;
1265 
1266 	/* Identity Address must be public or static random */
1267 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1268 		return NULL;
1269 
1270 	rcu_read_lock();
1271 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1272 		if (addr_type == irk->addr_type &&
1273 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
1274 			irk_to_return = irk;
1275 			goto done;
1276 		}
1277 	}
1278 
1279 done:
1280 
1281 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1282 						irk_to_return->val)) {
1283 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1284 					&irk_to_return->bdaddr);
1285 		irk_to_return = NULL;
1286 	}
1287 
1288 	rcu_read_unlock();
1289 
1290 	return irk_to_return;
1291 }
1292 
1293 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1294 				  bdaddr_t *bdaddr, u8 *val, u8 type,
1295 				  u8 pin_len, bool *persistent)
1296 {
1297 	struct link_key *key, *old_key;
1298 	u8 old_key_type;
1299 
1300 	old_key = hci_find_link_key(hdev, bdaddr);
1301 	if (old_key) {
1302 		old_key_type = old_key->type;
1303 		key = old_key;
1304 	} else {
1305 		old_key_type = conn ? conn->key_type : 0xff;
1306 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1307 		if (!key)
1308 			return NULL;
1309 		list_add_rcu(&key->list, &hdev->link_keys);
1310 	}
1311 
1312 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1313 
1314 	/* Some buggy controller combinations generate a changed
1315 	 * combination key for legacy pairing even when there's no
1316 	 * previous key */
1317 	if (type == HCI_LK_CHANGED_COMBINATION &&
1318 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1319 		type = HCI_LK_COMBINATION;
1320 		if (conn)
1321 			conn->key_type = type;
1322 	}
1323 
1324 	bacpy(&key->bdaddr, bdaddr);
1325 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1326 	key->pin_len = pin_len;
1327 
1328 	if (type == HCI_LK_CHANGED_COMBINATION)
1329 		key->type = old_key_type;
1330 	else
1331 		key->type = type;
1332 
1333 	if (persistent)
1334 		*persistent = hci_persistent_key(hdev, conn, type,
1335 						 old_key_type);
1336 
1337 	return key;
1338 }
1339 
1340 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1341 			    u8 addr_type, u8 type, u8 authenticated,
1342 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1343 {
1344 	struct smp_ltk *key, *old_key;
1345 	u8 role = ltk_role(type);
1346 
1347 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1348 	if (old_key)
1349 		key = old_key;
1350 	else {
1351 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1352 		if (!key)
1353 			return NULL;
1354 		list_add_rcu(&key->list, &hdev->long_term_keys);
1355 	}
1356 
1357 	bacpy(&key->bdaddr, bdaddr);
1358 	key->bdaddr_type = addr_type;
1359 	memcpy(key->val, tk, sizeof(key->val));
1360 	key->authenticated = authenticated;
1361 	key->ediv = ediv;
1362 	key->rand = rand;
1363 	key->enc_size = enc_size;
1364 	key->type = type;
1365 
1366 	return key;
1367 }
1368 
1369 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1370 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
1371 {
1372 	struct smp_irk *irk;
1373 
1374 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1375 	if (!irk) {
1376 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1377 		if (!irk)
1378 			return NULL;
1379 
1380 		bacpy(&irk->bdaddr, bdaddr);
1381 		irk->addr_type = addr_type;
1382 
1383 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1384 	}
1385 
1386 	memcpy(irk->val, val, 16);
1387 	bacpy(&irk->rpa, rpa);
1388 
1389 	return irk;
1390 }
1391 
1392 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1393 {
1394 	struct link_key *key;
1395 
1396 	key = hci_find_link_key(hdev, bdaddr);
1397 	if (!key)
1398 		return -ENOENT;
1399 
1400 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1401 
1402 	list_del_rcu(&key->list);
1403 	kfree_rcu(key, rcu);
1404 
1405 	return 0;
1406 }
1407 
1408 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1409 {
1410 	struct smp_ltk *k;
1411 	int removed = 0;
1412 
1413 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1414 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1415 			continue;
1416 
1417 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1418 
1419 		list_del_rcu(&k->list);
1420 		kfree_rcu(k, rcu);
1421 		removed++;
1422 	}
1423 
1424 	return removed ? 0 : -ENOENT;
1425 }
1426 
1427 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1428 {
1429 	struct smp_irk *k;
1430 
1431 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1432 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1433 			continue;
1434 
1435 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1436 
1437 		list_del_rcu(&k->list);
1438 		kfree_rcu(k, rcu);
1439 	}
1440 }
1441 
1442 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1443 {
1444 	struct smp_ltk *k;
1445 	struct smp_irk *irk;
1446 	u8 addr_type;
1447 
1448 	if (type == BDADDR_BREDR) {
1449 		if (hci_find_link_key(hdev, bdaddr))
1450 			return true;
1451 		return false;
1452 	}
1453 
1454 	/* Convert to HCI addr type which struct smp_ltk uses */
1455 	if (type == BDADDR_LE_PUBLIC)
1456 		addr_type = ADDR_LE_DEV_PUBLIC;
1457 	else
1458 		addr_type = ADDR_LE_DEV_RANDOM;
1459 
1460 	irk = hci_get_irk(hdev, bdaddr, addr_type);
1461 	if (irk) {
1462 		bdaddr = &irk->bdaddr;
1463 		addr_type = irk->addr_type;
1464 	}
1465 
1466 	rcu_read_lock();
1467 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1468 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1469 			rcu_read_unlock();
1470 			return true;
1471 		}
1472 	}
1473 	rcu_read_unlock();
1474 
1475 	return false;
1476 }
1477 
1478 /* HCI command timer function */
1479 static void hci_cmd_timeout(struct work_struct *work)
1480 {
1481 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1482 					    cmd_timer.work);
1483 
1484 	if (hdev->sent_cmd) {
1485 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1486 		u16 opcode = __le16_to_cpu(sent->opcode);
1487 
1488 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1489 	} else {
1490 		bt_dev_err(hdev, "command tx timeout");
1491 	}
1492 
1493 	if (hdev->cmd_timeout)
1494 		hdev->cmd_timeout(hdev);
1495 
1496 	atomic_set(&hdev->cmd_cnt, 1);
1497 	queue_work(hdev->workqueue, &hdev->cmd_work);
1498 }
1499 
1500 /* HCI ncmd timer function */
1501 static void hci_ncmd_timeout(struct work_struct *work)
1502 {
1503 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1504 					    ncmd_timer.work);
1505 
1506 	bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1507 
1508 	/* During HCI_INIT phase no events can be injected if the ncmd timer
1509 	 * triggers since the procedure has its own timeout handling.
1510 	 */
1511 	if (test_bit(HCI_INIT, &hdev->flags))
1512 		return;
1513 
1514 	/* This is an irrecoverable state, inject hardware error event */
1515 	hci_reset_dev(hdev);
1516 }
1517 
1518 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1519 					  bdaddr_t *bdaddr, u8 bdaddr_type)
1520 {
1521 	struct oob_data *data;
1522 
1523 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
1524 		if (bacmp(bdaddr, &data->bdaddr) != 0)
1525 			continue;
1526 		if (data->bdaddr_type != bdaddr_type)
1527 			continue;
1528 		return data;
1529 	}
1530 
1531 	return NULL;
1532 }
1533 
1534 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1535 			       u8 bdaddr_type)
1536 {
1537 	struct oob_data *data;
1538 
1539 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1540 	if (!data)
1541 		return -ENOENT;
1542 
1543 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1544 
1545 	list_del(&data->list);
1546 	kfree(data);
1547 
1548 	return 0;
1549 }
1550 
1551 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1552 {
1553 	struct oob_data *data, *n;
1554 
1555 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1556 		list_del(&data->list);
1557 		kfree(data);
1558 	}
1559 }
1560 
1561 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1562 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1563 			    u8 *hash256, u8 *rand256)
1564 {
1565 	struct oob_data *data;
1566 
1567 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1568 	if (!data) {
1569 		data = kmalloc(sizeof(*data), GFP_KERNEL);
1570 		if (!data)
1571 			return -ENOMEM;
1572 
1573 		bacpy(&data->bdaddr, bdaddr);
1574 		data->bdaddr_type = bdaddr_type;
1575 		list_add(&data->list, &hdev->remote_oob_data);
1576 	}
1577 
1578 	if (hash192 && rand192) {
1579 		memcpy(data->hash192, hash192, sizeof(data->hash192));
1580 		memcpy(data->rand192, rand192, sizeof(data->rand192));
1581 		if (hash256 && rand256)
1582 			data->present = 0x03;
1583 	} else {
1584 		memset(data->hash192, 0, sizeof(data->hash192));
1585 		memset(data->rand192, 0, sizeof(data->rand192));
1586 		if (hash256 && rand256)
1587 			data->present = 0x02;
1588 		else
1589 			data->present = 0x00;
1590 	}
1591 
1592 	if (hash256 && rand256) {
1593 		memcpy(data->hash256, hash256, sizeof(data->hash256));
1594 		memcpy(data->rand256, rand256, sizeof(data->rand256));
1595 	} else {
1596 		memset(data->hash256, 0, sizeof(data->hash256));
1597 		memset(data->rand256, 0, sizeof(data->rand256));
1598 		if (hash192 && rand192)
1599 			data->present = 0x01;
1600 	}
1601 
1602 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1603 
1604 	return 0;
1605 }
1606 
1607 /* This function requires the caller holds hdev->lock */
1608 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1609 {
1610 	struct adv_info *adv_instance;
1611 
1612 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1613 		if (adv_instance->instance == instance)
1614 			return adv_instance;
1615 	}
1616 
1617 	return NULL;
1618 }
1619 
1620 /* This function requires the caller holds hdev->lock */
1621 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1622 {
1623 	struct adv_info *cur_instance;
1624 
1625 	cur_instance = hci_find_adv_instance(hdev, instance);
1626 	if (!cur_instance)
1627 		return NULL;
1628 
1629 	if (cur_instance == list_last_entry(&hdev->adv_instances,
1630 					    struct adv_info, list))
1631 		return list_first_entry(&hdev->adv_instances,
1632 						 struct adv_info, list);
1633 	else
1634 		return list_next_entry(cur_instance, list);
1635 }
1636 
1637 /* This function requires the caller holds hdev->lock */
1638 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1639 {
1640 	struct adv_info *adv_instance;
1641 
1642 	adv_instance = hci_find_adv_instance(hdev, instance);
1643 	if (!adv_instance)
1644 		return -ENOENT;
1645 
1646 	BT_DBG("%s removing %dMR", hdev->name, instance);
1647 
1648 	if (hdev->cur_adv_instance == instance) {
1649 		if (hdev->adv_instance_timeout) {
1650 			cancel_delayed_work(&hdev->adv_instance_expire);
1651 			hdev->adv_instance_timeout = 0;
1652 		}
1653 		hdev->cur_adv_instance = 0x00;
1654 	}
1655 
1656 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1657 
1658 	list_del(&adv_instance->list);
1659 	kfree(adv_instance);
1660 
1661 	hdev->adv_instance_cnt--;
1662 
1663 	return 0;
1664 }
1665 
1666 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1667 {
1668 	struct adv_info *adv_instance, *n;
1669 
1670 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1671 		adv_instance->rpa_expired = rpa_expired;
1672 }
1673 
1674 /* This function requires the caller holds hdev->lock */
1675 void hci_adv_instances_clear(struct hci_dev *hdev)
1676 {
1677 	struct adv_info *adv_instance, *n;
1678 
1679 	if (hdev->adv_instance_timeout) {
1680 		cancel_delayed_work(&hdev->adv_instance_expire);
1681 		hdev->adv_instance_timeout = 0;
1682 	}
1683 
1684 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1685 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1686 		list_del(&adv_instance->list);
1687 		kfree(adv_instance);
1688 	}
1689 
1690 	hdev->adv_instance_cnt = 0;
1691 	hdev->cur_adv_instance = 0x00;
1692 }
1693 
1694 static void adv_instance_rpa_expired(struct work_struct *work)
1695 {
1696 	struct adv_info *adv_instance = container_of(work, struct adv_info,
1697 						     rpa_expired_cb.work);
1698 
1699 	BT_DBG("");
1700 
1701 	adv_instance->rpa_expired = true;
1702 }
1703 
1704 /* This function requires the caller holds hdev->lock */
1705 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1706 				      u32 flags, u16 adv_data_len, u8 *adv_data,
1707 				      u16 scan_rsp_len, u8 *scan_rsp_data,
1708 				      u16 timeout, u16 duration, s8 tx_power,
1709 				      u32 min_interval, u32 max_interval)
1710 {
1711 	struct adv_info *adv;
1712 
1713 	adv = hci_find_adv_instance(hdev, instance);
1714 	if (adv) {
1715 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1716 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1717 		memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1718 	} else {
1719 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1720 		    instance < 1 || instance > hdev->le_num_of_adv_sets)
1721 			return ERR_PTR(-EOVERFLOW);
1722 
1723 		adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1724 		if (!adv)
1725 			return ERR_PTR(-ENOMEM);
1726 
1727 		adv->pending = true;
1728 		adv->instance = instance;
1729 		list_add(&adv->list, &hdev->adv_instances);
1730 		hdev->adv_instance_cnt++;
1731 	}
1732 
1733 	adv->flags = flags;
1734 	adv->min_interval = min_interval;
1735 	adv->max_interval = max_interval;
1736 	adv->tx_power = tx_power;
1737 
1738 	hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1739 				  scan_rsp_len, scan_rsp_data);
1740 
1741 	adv->timeout = timeout;
1742 	adv->remaining_time = timeout;
1743 
1744 	if (duration == 0)
1745 		adv->duration = hdev->def_multi_adv_rotation_duration;
1746 	else
1747 		adv->duration = duration;
1748 
1749 	INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1750 
1751 	BT_DBG("%s for %dMR", hdev->name, instance);
1752 
1753 	return adv;
1754 }
1755 
1756 /* This function requires the caller holds hdev->lock */
1757 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1758 				      u32 flags, u8 data_len, u8 *data,
1759 				      u32 min_interval, u32 max_interval)
1760 {
1761 	struct adv_info *adv;
1762 
1763 	adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1764 				   0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1765 				   min_interval, max_interval);
1766 	if (IS_ERR(adv))
1767 		return adv;
1768 
1769 	adv->periodic = true;
1770 	adv->per_adv_data_len = data_len;
1771 
1772 	if (data)
1773 		memcpy(adv->per_adv_data, data, data_len);
1774 
1775 	return adv;
1776 }
1777 
1778 /* This function requires the caller holds hdev->lock */
1779 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1780 			      u16 adv_data_len, u8 *adv_data,
1781 			      u16 scan_rsp_len, u8 *scan_rsp_data)
1782 {
1783 	struct adv_info *adv;
1784 
1785 	adv = hci_find_adv_instance(hdev, instance);
1786 
1787 	/* If advertisement doesn't exist, we can't modify its data */
1788 	if (!adv)
1789 		return -ENOENT;
1790 
1791 	if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1792 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1793 		memcpy(adv->adv_data, adv_data, adv_data_len);
1794 		adv->adv_data_len = adv_data_len;
1795 		adv->adv_data_changed = true;
1796 	}
1797 
1798 	if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1799 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1800 		memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1801 		adv->scan_rsp_len = scan_rsp_len;
1802 		adv->scan_rsp_changed = true;
1803 	}
1804 
1805 	/* Mark as changed if there are flags which would affect it */
1806 	if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1807 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1808 		adv->scan_rsp_changed = true;
1809 
1810 	return 0;
1811 }
1812 
1813 /* This function requires the caller holds hdev->lock */
1814 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1815 {
1816 	u32 flags;
1817 	struct adv_info *adv;
1818 
1819 	if (instance == 0x00) {
1820 		/* Instance 0 always manages the "Tx Power" and "Flags"
1821 		 * fields
1822 		 */
1823 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1824 
1825 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1826 		 * corresponds to the "connectable" instance flag.
1827 		 */
1828 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1829 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1830 
1831 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1832 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1833 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1834 			flags |= MGMT_ADV_FLAG_DISCOV;
1835 
1836 		return flags;
1837 	}
1838 
1839 	adv = hci_find_adv_instance(hdev, instance);
1840 
1841 	/* Return 0 when we got an invalid instance identifier. */
1842 	if (!adv)
1843 		return 0;
1844 
1845 	return adv->flags;
1846 }
1847 
1848 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1849 {
1850 	struct adv_info *adv;
1851 
1852 	/* Instance 0x00 always set local name */
1853 	if (instance == 0x00)
1854 		return true;
1855 
1856 	adv = hci_find_adv_instance(hdev, instance);
1857 	if (!adv)
1858 		return false;
1859 
1860 	if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1861 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1862 		return true;
1863 
1864 	return adv->scan_rsp_len ? true : false;
1865 }
1866 
1867 /* This function requires the caller holds hdev->lock */
1868 void hci_adv_monitors_clear(struct hci_dev *hdev)
1869 {
1870 	struct adv_monitor *monitor;
1871 	int handle;
1872 
1873 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1874 		hci_free_adv_monitor(hdev, monitor);
1875 
1876 	idr_destroy(&hdev->adv_monitors_idr);
1877 }
1878 
1879 /* Frees the monitor structure and do some bookkeepings.
1880  * This function requires the caller holds hdev->lock.
1881  */
1882 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1883 {
1884 	struct adv_pattern *pattern;
1885 	struct adv_pattern *tmp;
1886 
1887 	if (!monitor)
1888 		return;
1889 
1890 	list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1891 		list_del(&pattern->list);
1892 		kfree(pattern);
1893 	}
1894 
1895 	if (monitor->handle)
1896 		idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1897 
1898 	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1899 		hdev->adv_monitors_cnt--;
1900 		mgmt_adv_monitor_removed(hdev, monitor->handle);
1901 	}
1902 
1903 	kfree(monitor);
1904 }
1905 
1906 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1907  * also attempts to forward the request to the controller.
1908  * This function requires the caller holds hci_req_sync_lock.
1909  */
1910 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1911 {
1912 	int min, max, handle;
1913 	int status = 0;
1914 
1915 	if (!monitor)
1916 		return -EINVAL;
1917 
1918 	hci_dev_lock(hdev);
1919 
1920 	min = HCI_MIN_ADV_MONITOR_HANDLE;
1921 	max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1922 	handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1923 			   GFP_KERNEL);
1924 
1925 	hci_dev_unlock(hdev);
1926 
1927 	if (handle < 0)
1928 		return handle;
1929 
1930 	monitor->handle = handle;
1931 
1932 	if (!hdev_is_powered(hdev))
1933 		return status;
1934 
1935 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1936 	case HCI_ADV_MONITOR_EXT_NONE:
1937 		bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
1938 			   monitor->handle, status);
1939 		/* Message was not forwarded to controller - not an error */
1940 		break;
1941 
1942 	case HCI_ADV_MONITOR_EXT_MSFT:
1943 		status = msft_add_monitor_pattern(hdev, monitor);
1944 		bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
1945 			   monitor->handle, status);
1946 		break;
1947 	}
1948 
1949 	return status;
1950 }
1951 
1952 /* Attempts to tell the controller and free the monitor. If somehow the
1953  * controller doesn't have a corresponding handle, remove anyway.
1954  * This function requires the caller holds hci_req_sync_lock.
1955  */
1956 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1957 				  struct adv_monitor *monitor)
1958 {
1959 	int status = 0;
1960 
1961 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1962 	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1963 		bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
1964 			   monitor->handle, status);
1965 		goto free_monitor;
1966 
1967 	case HCI_ADV_MONITOR_EXT_MSFT:
1968 		status = msft_remove_monitor(hdev, monitor);
1969 		bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
1970 			   hdev->name, monitor->handle, status);
1971 		break;
1972 	}
1973 
1974 	/* In case no matching handle registered, just free the monitor */
1975 	if (status == -ENOENT)
1976 		goto free_monitor;
1977 
1978 	return status;
1979 
1980 free_monitor:
1981 	if (status == -ENOENT)
1982 		bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1983 			    monitor->handle);
1984 	hci_free_adv_monitor(hdev, monitor);
1985 
1986 	return status;
1987 }
1988 
1989 /* This function requires the caller holds hci_req_sync_lock */
1990 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1991 {
1992 	struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1993 
1994 	if (!monitor)
1995 		return -EINVAL;
1996 
1997 	return hci_remove_adv_monitor(hdev, monitor);
1998 }
1999 
2000 /* This function requires the caller holds hci_req_sync_lock */
2001 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2002 {
2003 	struct adv_monitor *monitor;
2004 	int idr_next_id = 0;
2005 	int status = 0;
2006 
2007 	while (1) {
2008 		monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2009 		if (!monitor)
2010 			break;
2011 
2012 		status = hci_remove_adv_monitor(hdev, monitor);
2013 		if (status)
2014 			return status;
2015 
2016 		idr_next_id++;
2017 	}
2018 
2019 	return status;
2020 }
2021 
2022 /* This function requires the caller holds hdev->lock */
2023 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2024 {
2025 	return !idr_is_empty(&hdev->adv_monitors_idr);
2026 }
2027 
2028 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2029 {
2030 	if (msft_monitor_supported(hdev))
2031 		return HCI_ADV_MONITOR_EXT_MSFT;
2032 
2033 	return HCI_ADV_MONITOR_EXT_NONE;
2034 }
2035 
2036 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2037 					 bdaddr_t *bdaddr, u8 type)
2038 {
2039 	struct bdaddr_list *b;
2040 
2041 	list_for_each_entry(b, bdaddr_list, list) {
2042 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2043 			return b;
2044 	}
2045 
2046 	return NULL;
2047 }
2048 
2049 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2050 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2051 				u8 type)
2052 {
2053 	struct bdaddr_list_with_irk *b;
2054 
2055 	list_for_each_entry(b, bdaddr_list, list) {
2056 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2057 			return b;
2058 	}
2059 
2060 	return NULL;
2061 }
2062 
2063 struct bdaddr_list_with_flags *
2064 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2065 				  bdaddr_t *bdaddr, u8 type)
2066 {
2067 	struct bdaddr_list_with_flags *b;
2068 
2069 	list_for_each_entry(b, bdaddr_list, list) {
2070 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2071 			return b;
2072 	}
2073 
2074 	return NULL;
2075 }
2076 
2077 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2078 {
2079 	struct bdaddr_list *b, *n;
2080 
2081 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2082 		list_del(&b->list);
2083 		kfree(b);
2084 	}
2085 }
2086 
2087 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2088 {
2089 	struct bdaddr_list *entry;
2090 
2091 	if (!bacmp(bdaddr, BDADDR_ANY))
2092 		return -EBADF;
2093 
2094 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2095 		return -EEXIST;
2096 
2097 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2098 	if (!entry)
2099 		return -ENOMEM;
2100 
2101 	bacpy(&entry->bdaddr, bdaddr);
2102 	entry->bdaddr_type = type;
2103 
2104 	list_add(&entry->list, list);
2105 
2106 	return 0;
2107 }
2108 
2109 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2110 					u8 type, u8 *peer_irk, u8 *local_irk)
2111 {
2112 	struct bdaddr_list_with_irk *entry;
2113 
2114 	if (!bacmp(bdaddr, BDADDR_ANY))
2115 		return -EBADF;
2116 
2117 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2118 		return -EEXIST;
2119 
2120 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2121 	if (!entry)
2122 		return -ENOMEM;
2123 
2124 	bacpy(&entry->bdaddr, bdaddr);
2125 	entry->bdaddr_type = type;
2126 
2127 	if (peer_irk)
2128 		memcpy(entry->peer_irk, peer_irk, 16);
2129 
2130 	if (local_irk)
2131 		memcpy(entry->local_irk, local_irk, 16);
2132 
2133 	list_add(&entry->list, list);
2134 
2135 	return 0;
2136 }
2137 
2138 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2139 				   u8 type, u32 flags)
2140 {
2141 	struct bdaddr_list_with_flags *entry;
2142 
2143 	if (!bacmp(bdaddr, BDADDR_ANY))
2144 		return -EBADF;
2145 
2146 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2147 		return -EEXIST;
2148 
2149 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2150 	if (!entry)
2151 		return -ENOMEM;
2152 
2153 	bacpy(&entry->bdaddr, bdaddr);
2154 	entry->bdaddr_type = type;
2155 	entry->flags = flags;
2156 
2157 	list_add(&entry->list, list);
2158 
2159 	return 0;
2160 }
2161 
2162 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2163 {
2164 	struct bdaddr_list *entry;
2165 
2166 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2167 		hci_bdaddr_list_clear(list);
2168 		return 0;
2169 	}
2170 
2171 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2172 	if (!entry)
2173 		return -ENOENT;
2174 
2175 	list_del(&entry->list);
2176 	kfree(entry);
2177 
2178 	return 0;
2179 }
2180 
2181 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2182 							u8 type)
2183 {
2184 	struct bdaddr_list_with_irk *entry;
2185 
2186 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2187 		hci_bdaddr_list_clear(list);
2188 		return 0;
2189 	}
2190 
2191 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2192 	if (!entry)
2193 		return -ENOENT;
2194 
2195 	list_del(&entry->list);
2196 	kfree(entry);
2197 
2198 	return 0;
2199 }
2200 
2201 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2202 				   u8 type)
2203 {
2204 	struct bdaddr_list_with_flags *entry;
2205 
2206 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2207 		hci_bdaddr_list_clear(list);
2208 		return 0;
2209 	}
2210 
2211 	entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2212 	if (!entry)
2213 		return -ENOENT;
2214 
2215 	list_del(&entry->list);
2216 	kfree(entry);
2217 
2218 	return 0;
2219 }
2220 
2221 /* This function requires the caller holds hdev->lock */
2222 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2223 					       bdaddr_t *addr, u8 addr_type)
2224 {
2225 	struct hci_conn_params *params;
2226 
2227 	list_for_each_entry(params, &hdev->le_conn_params, list) {
2228 		if (bacmp(&params->addr, addr) == 0 &&
2229 		    params->addr_type == addr_type) {
2230 			return params;
2231 		}
2232 	}
2233 
2234 	return NULL;
2235 }
2236 
2237 /* This function requires the caller holds hdev->lock */
2238 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2239 						  bdaddr_t *addr, u8 addr_type)
2240 {
2241 	struct hci_conn_params *param;
2242 
2243 	list_for_each_entry(param, list, action) {
2244 		if (bacmp(&param->addr, addr) == 0 &&
2245 		    param->addr_type == addr_type)
2246 			return param;
2247 	}
2248 
2249 	return NULL;
2250 }
2251 
2252 /* This function requires the caller holds hdev->lock */
2253 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2254 					    bdaddr_t *addr, u8 addr_type)
2255 {
2256 	struct hci_conn_params *params;
2257 
2258 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2259 	if (params)
2260 		return params;
2261 
2262 	params = kzalloc(sizeof(*params), GFP_KERNEL);
2263 	if (!params) {
2264 		bt_dev_err(hdev, "out of memory");
2265 		return NULL;
2266 	}
2267 
2268 	bacpy(&params->addr, addr);
2269 	params->addr_type = addr_type;
2270 
2271 	list_add(&params->list, &hdev->le_conn_params);
2272 	INIT_LIST_HEAD(&params->action);
2273 
2274 	params->conn_min_interval = hdev->le_conn_min_interval;
2275 	params->conn_max_interval = hdev->le_conn_max_interval;
2276 	params->conn_latency = hdev->le_conn_latency;
2277 	params->supervision_timeout = hdev->le_supv_timeout;
2278 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2279 
2280 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2281 
2282 	return params;
2283 }
2284 
2285 static void hci_conn_params_free(struct hci_conn_params *params)
2286 {
2287 	if (params->conn) {
2288 		hci_conn_drop(params->conn);
2289 		hci_conn_put(params->conn);
2290 	}
2291 
2292 	list_del(&params->action);
2293 	list_del(&params->list);
2294 	kfree(params);
2295 }
2296 
2297 /* This function requires the caller holds hdev->lock */
2298 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2299 {
2300 	struct hci_conn_params *params;
2301 
2302 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2303 	if (!params)
2304 		return;
2305 
2306 	hci_conn_params_free(params);
2307 
2308 	hci_update_passive_scan(hdev);
2309 
2310 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2311 }
2312 
2313 /* This function requires the caller holds hdev->lock */
2314 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2315 {
2316 	struct hci_conn_params *params, *tmp;
2317 
2318 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2319 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2320 			continue;
2321 
2322 		/* If trying to establish one time connection to disabled
2323 		 * device, leave the params, but mark them as just once.
2324 		 */
2325 		if (params->explicit_connect) {
2326 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2327 			continue;
2328 		}
2329 
2330 		list_del(&params->list);
2331 		kfree(params);
2332 	}
2333 
2334 	BT_DBG("All LE disabled connection parameters were removed");
2335 }
2336 
2337 /* This function requires the caller holds hdev->lock */
2338 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2339 {
2340 	struct hci_conn_params *params, *tmp;
2341 
2342 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2343 		hci_conn_params_free(params);
2344 
2345 	BT_DBG("All LE connection parameters were removed");
2346 }
2347 
2348 /* Copy the Identity Address of the controller.
2349  *
2350  * If the controller has a public BD_ADDR, then by default use that one.
2351  * If this is a LE only controller without a public address, default to
2352  * the static random address.
2353  *
2354  * For debugging purposes it is possible to force controllers with a
2355  * public address to use the static random address instead.
2356  *
2357  * In case BR/EDR has been disabled on a dual-mode controller and
2358  * userspace has configured a static address, then that address
2359  * becomes the identity address instead of the public BR/EDR address.
2360  */
2361 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2362 			       u8 *bdaddr_type)
2363 {
2364 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2365 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2366 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2367 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2368 		bacpy(bdaddr, &hdev->static_addr);
2369 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
2370 	} else {
2371 		bacpy(bdaddr, &hdev->bdaddr);
2372 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
2373 	}
2374 }
2375 
2376 static void hci_clear_wake_reason(struct hci_dev *hdev)
2377 {
2378 	hci_dev_lock(hdev);
2379 
2380 	hdev->wake_reason = 0;
2381 	bacpy(&hdev->wake_addr, BDADDR_ANY);
2382 	hdev->wake_addr_type = 0;
2383 
2384 	hci_dev_unlock(hdev);
2385 }
2386 
2387 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2388 				void *data)
2389 {
2390 	struct hci_dev *hdev =
2391 		container_of(nb, struct hci_dev, suspend_notifier);
2392 	int ret = 0;
2393 
2394 	if (action == PM_SUSPEND_PREPARE)
2395 		ret = hci_suspend_dev(hdev);
2396 	else if (action == PM_POST_SUSPEND)
2397 		ret = hci_resume_dev(hdev);
2398 
2399 	if (ret)
2400 		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2401 			   action, ret);
2402 
2403 	return NOTIFY_DONE;
2404 }
2405 
2406 /* Alloc HCI device */
2407 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2408 {
2409 	struct hci_dev *hdev;
2410 	unsigned int alloc_size;
2411 
2412 	alloc_size = sizeof(*hdev);
2413 	if (sizeof_priv) {
2414 		/* Fixme: May need ALIGN-ment? */
2415 		alloc_size += sizeof_priv;
2416 	}
2417 
2418 	hdev = kzalloc(alloc_size, GFP_KERNEL);
2419 	if (!hdev)
2420 		return NULL;
2421 
2422 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2423 	hdev->esco_type = (ESCO_HV1);
2424 	hdev->link_mode = (HCI_LM_ACCEPT);
2425 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
2426 	hdev->io_capability = 0x03;	/* No Input No Output */
2427 	hdev->manufacturer = 0xffff;	/* Default to internal use */
2428 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2429 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2430 	hdev->adv_instance_cnt = 0;
2431 	hdev->cur_adv_instance = 0x00;
2432 	hdev->adv_instance_timeout = 0;
2433 
2434 	hdev->advmon_allowlist_duration = 300;
2435 	hdev->advmon_no_filter_duration = 500;
2436 	hdev->enable_advmon_interleave_scan = 0x00;	/* Default to disable */
2437 
2438 	hdev->sniff_max_interval = 800;
2439 	hdev->sniff_min_interval = 80;
2440 
2441 	hdev->le_adv_channel_map = 0x07;
2442 	hdev->le_adv_min_interval = 0x0800;
2443 	hdev->le_adv_max_interval = 0x0800;
2444 	hdev->le_scan_interval = 0x0060;
2445 	hdev->le_scan_window = 0x0030;
2446 	hdev->le_scan_int_suspend = 0x0400;
2447 	hdev->le_scan_window_suspend = 0x0012;
2448 	hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2449 	hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2450 	hdev->le_scan_int_adv_monitor = 0x0060;
2451 	hdev->le_scan_window_adv_monitor = 0x0030;
2452 	hdev->le_scan_int_connect = 0x0060;
2453 	hdev->le_scan_window_connect = 0x0060;
2454 	hdev->le_conn_min_interval = 0x0018;
2455 	hdev->le_conn_max_interval = 0x0028;
2456 	hdev->le_conn_latency = 0x0000;
2457 	hdev->le_supv_timeout = 0x002a;
2458 	hdev->le_def_tx_len = 0x001b;
2459 	hdev->le_def_tx_time = 0x0148;
2460 	hdev->le_max_tx_len = 0x001b;
2461 	hdev->le_max_tx_time = 0x0148;
2462 	hdev->le_max_rx_len = 0x001b;
2463 	hdev->le_max_rx_time = 0x0148;
2464 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2465 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2466 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2467 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2468 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2469 	hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2470 	hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2471 	hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2472 	hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2473 
2474 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2475 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2476 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2477 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2478 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2479 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2480 
2481 	/* default 1.28 sec page scan */
2482 	hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2483 	hdev->def_page_scan_int = 0x0800;
2484 	hdev->def_page_scan_window = 0x0012;
2485 
2486 	mutex_init(&hdev->lock);
2487 	mutex_init(&hdev->req_lock);
2488 
2489 	INIT_LIST_HEAD(&hdev->mgmt_pending);
2490 	INIT_LIST_HEAD(&hdev->reject_list);
2491 	INIT_LIST_HEAD(&hdev->accept_list);
2492 	INIT_LIST_HEAD(&hdev->uuids);
2493 	INIT_LIST_HEAD(&hdev->link_keys);
2494 	INIT_LIST_HEAD(&hdev->long_term_keys);
2495 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2496 	INIT_LIST_HEAD(&hdev->remote_oob_data);
2497 	INIT_LIST_HEAD(&hdev->le_accept_list);
2498 	INIT_LIST_HEAD(&hdev->le_resolv_list);
2499 	INIT_LIST_HEAD(&hdev->le_conn_params);
2500 	INIT_LIST_HEAD(&hdev->pend_le_conns);
2501 	INIT_LIST_HEAD(&hdev->pend_le_reports);
2502 	INIT_LIST_HEAD(&hdev->conn_hash.list);
2503 	INIT_LIST_HEAD(&hdev->adv_instances);
2504 	INIT_LIST_HEAD(&hdev->blocked_keys);
2505 	INIT_LIST_HEAD(&hdev->monitored_devices);
2506 
2507 	INIT_LIST_HEAD(&hdev->local_codecs);
2508 	INIT_WORK(&hdev->rx_work, hci_rx_work);
2509 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2510 	INIT_WORK(&hdev->tx_work, hci_tx_work);
2511 	INIT_WORK(&hdev->power_on, hci_power_on);
2512 	INIT_WORK(&hdev->error_reset, hci_error_reset);
2513 
2514 	hci_cmd_sync_init(hdev);
2515 
2516 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2517 
2518 	skb_queue_head_init(&hdev->rx_q);
2519 	skb_queue_head_init(&hdev->cmd_q);
2520 	skb_queue_head_init(&hdev->raw_q);
2521 
2522 	init_waitqueue_head(&hdev->req_wait_q);
2523 
2524 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2525 	INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2526 
2527 	hci_request_setup(hdev);
2528 
2529 	hci_init_sysfs(hdev);
2530 	discovery_init(hdev);
2531 
2532 	return hdev;
2533 }
2534 EXPORT_SYMBOL(hci_alloc_dev_priv);
2535 
2536 /* Free HCI device */
2537 void hci_free_dev(struct hci_dev *hdev)
2538 {
2539 	/* will free via device release */
2540 	put_device(&hdev->dev);
2541 }
2542 EXPORT_SYMBOL(hci_free_dev);
2543 
2544 /* Register HCI device */
2545 int hci_register_dev(struct hci_dev *hdev)
2546 {
2547 	int id, error;
2548 
2549 	if (!hdev->open || !hdev->close || !hdev->send)
2550 		return -EINVAL;
2551 
2552 	/* Do not allow HCI_AMP devices to register at index 0,
2553 	 * so the index can be used as the AMP controller ID.
2554 	 */
2555 	switch (hdev->dev_type) {
2556 	case HCI_PRIMARY:
2557 		id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2558 		break;
2559 	case HCI_AMP:
2560 		id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2561 		break;
2562 	default:
2563 		return -EINVAL;
2564 	}
2565 
2566 	if (id < 0)
2567 		return id;
2568 
2569 	snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2570 	hdev->id = id;
2571 
2572 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2573 
2574 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2575 	if (!hdev->workqueue) {
2576 		error = -ENOMEM;
2577 		goto err;
2578 	}
2579 
2580 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2581 						      hdev->name);
2582 	if (!hdev->req_workqueue) {
2583 		destroy_workqueue(hdev->workqueue);
2584 		error = -ENOMEM;
2585 		goto err;
2586 	}
2587 
2588 	if (!IS_ERR_OR_NULL(bt_debugfs))
2589 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2590 
2591 	dev_set_name(&hdev->dev, "%s", hdev->name);
2592 
2593 	error = device_add(&hdev->dev);
2594 	if (error < 0)
2595 		goto err_wqueue;
2596 
2597 	hci_leds_init(hdev);
2598 
2599 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2600 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2601 				    hdev);
2602 	if (hdev->rfkill) {
2603 		if (rfkill_register(hdev->rfkill) < 0) {
2604 			rfkill_destroy(hdev->rfkill);
2605 			hdev->rfkill = NULL;
2606 		}
2607 	}
2608 
2609 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2610 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2611 
2612 	hci_dev_set_flag(hdev, HCI_SETUP);
2613 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2614 
2615 	if (hdev->dev_type == HCI_PRIMARY) {
2616 		/* Assume BR/EDR support until proven otherwise (such as
2617 		 * through reading supported features during init.
2618 		 */
2619 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2620 	}
2621 
2622 	write_lock(&hci_dev_list_lock);
2623 	list_add(&hdev->list, &hci_dev_list);
2624 	write_unlock(&hci_dev_list_lock);
2625 
2626 	/* Devices that are marked for raw-only usage are unconfigured
2627 	 * and should not be included in normal operation.
2628 	 */
2629 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2630 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2631 
2632 	/* Mark Remote Wakeup connection flag as supported if driver has wakeup
2633 	 * callback.
2634 	 */
2635 	if (hdev->wakeup)
2636 		hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2637 
2638 	hci_sock_dev_event(hdev, HCI_DEV_REG);
2639 	hci_dev_hold(hdev);
2640 
2641 	error = hci_register_suspend_notifier(hdev);
2642 	if (error)
2643 		goto err_wqueue;
2644 
2645 	queue_work(hdev->req_workqueue, &hdev->power_on);
2646 
2647 	idr_init(&hdev->adv_monitors_idr);
2648 	msft_register(hdev);
2649 
2650 	return id;
2651 
2652 err_wqueue:
2653 	debugfs_remove_recursive(hdev->debugfs);
2654 	destroy_workqueue(hdev->workqueue);
2655 	destroy_workqueue(hdev->req_workqueue);
2656 err:
2657 	ida_simple_remove(&hci_index_ida, hdev->id);
2658 
2659 	return error;
2660 }
2661 EXPORT_SYMBOL(hci_register_dev);
2662 
2663 /* Unregister HCI device */
2664 void hci_unregister_dev(struct hci_dev *hdev)
2665 {
2666 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2667 
2668 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
2669 
2670 	write_lock(&hci_dev_list_lock);
2671 	list_del(&hdev->list);
2672 	write_unlock(&hci_dev_list_lock);
2673 
2674 	cancel_work_sync(&hdev->power_on);
2675 
2676 	hci_cmd_sync_clear(hdev);
2677 
2678 	hci_unregister_suspend_notifier(hdev);
2679 
2680 	msft_unregister(hdev);
2681 
2682 	hci_dev_do_close(hdev);
2683 
2684 	if (!test_bit(HCI_INIT, &hdev->flags) &&
2685 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
2686 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2687 		hci_dev_lock(hdev);
2688 		mgmt_index_removed(hdev);
2689 		hci_dev_unlock(hdev);
2690 	}
2691 
2692 	/* mgmt_index_removed should take care of emptying the
2693 	 * pending list */
2694 	BUG_ON(!list_empty(&hdev->mgmt_pending));
2695 
2696 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2697 
2698 	if (hdev->rfkill) {
2699 		rfkill_unregister(hdev->rfkill);
2700 		rfkill_destroy(hdev->rfkill);
2701 	}
2702 
2703 	device_del(&hdev->dev);
2704 	/* Actual cleanup is deferred until hci_release_dev(). */
2705 	hci_dev_put(hdev);
2706 }
2707 EXPORT_SYMBOL(hci_unregister_dev);
2708 
2709 /* Release HCI device */
2710 void hci_release_dev(struct hci_dev *hdev)
2711 {
2712 	debugfs_remove_recursive(hdev->debugfs);
2713 	kfree_const(hdev->hw_info);
2714 	kfree_const(hdev->fw_info);
2715 
2716 	destroy_workqueue(hdev->workqueue);
2717 	destroy_workqueue(hdev->req_workqueue);
2718 
2719 	hci_dev_lock(hdev);
2720 	hci_bdaddr_list_clear(&hdev->reject_list);
2721 	hci_bdaddr_list_clear(&hdev->accept_list);
2722 	hci_uuids_clear(hdev);
2723 	hci_link_keys_clear(hdev);
2724 	hci_smp_ltks_clear(hdev);
2725 	hci_smp_irks_clear(hdev);
2726 	hci_remote_oob_data_clear(hdev);
2727 	hci_adv_instances_clear(hdev);
2728 	hci_adv_monitors_clear(hdev);
2729 	hci_bdaddr_list_clear(&hdev->le_accept_list);
2730 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2731 	hci_conn_params_clear_all(hdev);
2732 	hci_discovery_filter_clear(hdev);
2733 	hci_blocked_keys_clear(hdev);
2734 	hci_dev_unlock(hdev);
2735 
2736 	ida_simple_remove(&hci_index_ida, hdev->id);
2737 	kfree_skb(hdev->sent_cmd);
2738 	kfree_skb(hdev->recv_event);
2739 	kfree(hdev);
2740 }
2741 EXPORT_SYMBOL(hci_release_dev);
2742 
2743 int hci_register_suspend_notifier(struct hci_dev *hdev)
2744 {
2745 	int ret = 0;
2746 
2747 	if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2748 		hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2749 		ret = register_pm_notifier(&hdev->suspend_notifier);
2750 	}
2751 
2752 	return ret;
2753 }
2754 
2755 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2756 {
2757 	int ret = 0;
2758 
2759 	if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
2760 		ret = unregister_pm_notifier(&hdev->suspend_notifier);
2761 
2762 	return ret;
2763 }
2764 
2765 /* Suspend HCI device */
2766 int hci_suspend_dev(struct hci_dev *hdev)
2767 {
2768 	int ret;
2769 
2770 	bt_dev_dbg(hdev, "");
2771 
2772 	/* Suspend should only act on when powered. */
2773 	if (!hdev_is_powered(hdev) ||
2774 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2775 		return 0;
2776 
2777 	/* If powering down don't attempt to suspend */
2778 	if (mgmt_powering_down(hdev))
2779 		return 0;
2780 
2781 	hci_req_sync_lock(hdev);
2782 	ret = hci_suspend_sync(hdev);
2783 	hci_req_sync_unlock(hdev);
2784 
2785 	hci_clear_wake_reason(hdev);
2786 	mgmt_suspending(hdev, hdev->suspend_state);
2787 
2788 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2789 	return ret;
2790 }
2791 EXPORT_SYMBOL(hci_suspend_dev);
2792 
2793 /* Resume HCI device */
2794 int hci_resume_dev(struct hci_dev *hdev)
2795 {
2796 	int ret;
2797 
2798 	bt_dev_dbg(hdev, "");
2799 
2800 	/* Resume should only act on when powered. */
2801 	if (!hdev_is_powered(hdev) ||
2802 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2803 		return 0;
2804 
2805 	/* If powering down don't attempt to resume */
2806 	if (mgmt_powering_down(hdev))
2807 		return 0;
2808 
2809 	hci_req_sync_lock(hdev);
2810 	ret = hci_resume_sync(hdev);
2811 	hci_req_sync_unlock(hdev);
2812 
2813 	mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2814 		      hdev->wake_addr_type);
2815 
2816 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2817 	return ret;
2818 }
2819 EXPORT_SYMBOL(hci_resume_dev);
2820 
2821 /* Reset HCI device */
2822 int hci_reset_dev(struct hci_dev *hdev)
2823 {
2824 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2825 	struct sk_buff *skb;
2826 
2827 	skb = bt_skb_alloc(3, GFP_ATOMIC);
2828 	if (!skb)
2829 		return -ENOMEM;
2830 
2831 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2832 	skb_put_data(skb, hw_err, 3);
2833 
2834 	bt_dev_err(hdev, "Injecting HCI hardware error event");
2835 
2836 	/* Send Hardware Error to upper stack */
2837 	return hci_recv_frame(hdev, skb);
2838 }
2839 EXPORT_SYMBOL(hci_reset_dev);
2840 
2841 /* Receive frame from HCI drivers */
2842 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2843 {
2844 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2845 		      && !test_bit(HCI_INIT, &hdev->flags))) {
2846 		kfree_skb(skb);
2847 		return -ENXIO;
2848 	}
2849 
2850 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2851 	    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2852 	    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2853 	    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2854 		kfree_skb(skb);
2855 		return -EINVAL;
2856 	}
2857 
2858 	/* Incoming skb */
2859 	bt_cb(skb)->incoming = 1;
2860 
2861 	/* Time stamp */
2862 	__net_timestamp(skb);
2863 
2864 	skb_queue_tail(&hdev->rx_q, skb);
2865 	queue_work(hdev->workqueue, &hdev->rx_work);
2866 
2867 	return 0;
2868 }
2869 EXPORT_SYMBOL(hci_recv_frame);
2870 
2871 /* Receive diagnostic message from HCI drivers */
2872 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2873 {
2874 	/* Mark as diagnostic packet */
2875 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2876 
2877 	/* Time stamp */
2878 	__net_timestamp(skb);
2879 
2880 	skb_queue_tail(&hdev->rx_q, skb);
2881 	queue_work(hdev->workqueue, &hdev->rx_work);
2882 
2883 	return 0;
2884 }
2885 EXPORT_SYMBOL(hci_recv_diag);
2886 
2887 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2888 {
2889 	va_list vargs;
2890 
2891 	va_start(vargs, fmt);
2892 	kfree_const(hdev->hw_info);
2893 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2894 	va_end(vargs);
2895 }
2896 EXPORT_SYMBOL(hci_set_hw_info);
2897 
2898 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2899 {
2900 	va_list vargs;
2901 
2902 	va_start(vargs, fmt);
2903 	kfree_const(hdev->fw_info);
2904 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2905 	va_end(vargs);
2906 }
2907 EXPORT_SYMBOL(hci_set_fw_info);
2908 
2909 /* ---- Interface to upper protocols ---- */
2910 
2911 int hci_register_cb(struct hci_cb *cb)
2912 {
2913 	BT_DBG("%p name %s", cb, cb->name);
2914 
2915 	mutex_lock(&hci_cb_list_lock);
2916 	list_add_tail(&cb->list, &hci_cb_list);
2917 	mutex_unlock(&hci_cb_list_lock);
2918 
2919 	return 0;
2920 }
2921 EXPORT_SYMBOL(hci_register_cb);
2922 
2923 int hci_unregister_cb(struct hci_cb *cb)
2924 {
2925 	BT_DBG("%p name %s", cb, cb->name);
2926 
2927 	mutex_lock(&hci_cb_list_lock);
2928 	list_del(&cb->list);
2929 	mutex_unlock(&hci_cb_list_lock);
2930 
2931 	return 0;
2932 }
2933 EXPORT_SYMBOL(hci_unregister_cb);
2934 
2935 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2936 {
2937 	int err;
2938 
2939 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2940 	       skb->len);
2941 
2942 	/* Time stamp */
2943 	__net_timestamp(skb);
2944 
2945 	/* Send copy to monitor */
2946 	hci_send_to_monitor(hdev, skb);
2947 
2948 	if (atomic_read(&hdev->promisc)) {
2949 		/* Send copy to the sockets */
2950 		hci_send_to_sock(hdev, skb);
2951 	}
2952 
2953 	/* Get rid of skb owner, prior to sending to the driver. */
2954 	skb_orphan(skb);
2955 
2956 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2957 		kfree_skb(skb);
2958 		return -EINVAL;
2959 	}
2960 
2961 	err = hdev->send(hdev, skb);
2962 	if (err < 0) {
2963 		bt_dev_err(hdev, "sending frame failed (%d)", err);
2964 		kfree_skb(skb);
2965 		return err;
2966 	}
2967 
2968 	return 0;
2969 }
2970 
2971 /* Send HCI command */
2972 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2973 		 const void *param)
2974 {
2975 	struct sk_buff *skb;
2976 
2977 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2978 
2979 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
2980 	if (!skb) {
2981 		bt_dev_err(hdev, "no memory for command");
2982 		return -ENOMEM;
2983 	}
2984 
2985 	/* Stand-alone HCI commands must be flagged as
2986 	 * single-command requests.
2987 	 */
2988 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2989 
2990 	skb_queue_tail(&hdev->cmd_q, skb);
2991 	queue_work(hdev->workqueue, &hdev->cmd_work);
2992 
2993 	return 0;
2994 }
2995 
2996 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2997 		   const void *param)
2998 {
2999 	struct sk_buff *skb;
3000 
3001 	if (hci_opcode_ogf(opcode) != 0x3f) {
3002 		/* A controller receiving a command shall respond with either
3003 		 * a Command Status Event or a Command Complete Event.
3004 		 * Therefore, all standard HCI commands must be sent via the
3005 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3006 		 * Some vendors do not comply with this rule for vendor-specific
3007 		 * commands and do not return any event. We want to support
3008 		 * unresponded commands for such cases only.
3009 		 */
3010 		bt_dev_err(hdev, "unresponded command not supported");
3011 		return -EINVAL;
3012 	}
3013 
3014 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3015 	if (!skb) {
3016 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3017 			   opcode);
3018 		return -ENOMEM;
3019 	}
3020 
3021 	hci_send_frame(hdev, skb);
3022 
3023 	return 0;
3024 }
3025 EXPORT_SYMBOL(__hci_cmd_send);
3026 
3027 /* Get data from the previously sent command */
3028 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3029 {
3030 	struct hci_command_hdr *hdr;
3031 
3032 	if (!hdev->sent_cmd)
3033 		return NULL;
3034 
3035 	hdr = (void *) hdev->sent_cmd->data;
3036 
3037 	if (hdr->opcode != cpu_to_le16(opcode))
3038 		return NULL;
3039 
3040 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3041 
3042 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3043 }
3044 
3045 /* Get data from last received event */
3046 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3047 {
3048 	struct hci_event_hdr *hdr;
3049 	int offset;
3050 
3051 	if (!hdev->recv_event)
3052 		return NULL;
3053 
3054 	hdr = (void *)hdev->recv_event->data;
3055 	offset = sizeof(*hdr);
3056 
3057 	if (hdr->evt != event) {
3058 		/* In case of LE metaevent check the subevent match */
3059 		if (hdr->evt == HCI_EV_LE_META) {
3060 			struct hci_ev_le_meta *ev;
3061 
3062 			ev = (void *)hdev->recv_event->data + offset;
3063 			offset += sizeof(*ev);
3064 			if (ev->subevent == event)
3065 				goto found;
3066 		}
3067 		return NULL;
3068 	}
3069 
3070 found:
3071 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
3072 
3073 	return hdev->recv_event->data + offset;
3074 }
3075 
3076 /* Send ACL data */
3077 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3078 {
3079 	struct hci_acl_hdr *hdr;
3080 	int len = skb->len;
3081 
3082 	skb_push(skb, HCI_ACL_HDR_SIZE);
3083 	skb_reset_transport_header(skb);
3084 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3085 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3086 	hdr->dlen   = cpu_to_le16(len);
3087 }
3088 
3089 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3090 			  struct sk_buff *skb, __u16 flags)
3091 {
3092 	struct hci_conn *conn = chan->conn;
3093 	struct hci_dev *hdev = conn->hdev;
3094 	struct sk_buff *list;
3095 
3096 	skb->len = skb_headlen(skb);
3097 	skb->data_len = 0;
3098 
3099 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3100 
3101 	switch (hdev->dev_type) {
3102 	case HCI_PRIMARY:
3103 		hci_add_acl_hdr(skb, conn->handle, flags);
3104 		break;
3105 	case HCI_AMP:
3106 		hci_add_acl_hdr(skb, chan->handle, flags);
3107 		break;
3108 	default:
3109 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3110 		return;
3111 	}
3112 
3113 	list = skb_shinfo(skb)->frag_list;
3114 	if (!list) {
3115 		/* Non fragmented */
3116 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3117 
3118 		skb_queue_tail(queue, skb);
3119 	} else {
3120 		/* Fragmented */
3121 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3122 
3123 		skb_shinfo(skb)->frag_list = NULL;
3124 
3125 		/* Queue all fragments atomically. We need to use spin_lock_bh
3126 		 * here because of 6LoWPAN links, as there this function is
3127 		 * called from softirq and using normal spin lock could cause
3128 		 * deadlocks.
3129 		 */
3130 		spin_lock_bh(&queue->lock);
3131 
3132 		__skb_queue_tail(queue, skb);
3133 
3134 		flags &= ~ACL_START;
3135 		flags |= ACL_CONT;
3136 		do {
3137 			skb = list; list = list->next;
3138 
3139 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3140 			hci_add_acl_hdr(skb, conn->handle, flags);
3141 
3142 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3143 
3144 			__skb_queue_tail(queue, skb);
3145 		} while (list);
3146 
3147 		spin_unlock_bh(&queue->lock);
3148 	}
3149 }
3150 
3151 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3152 {
3153 	struct hci_dev *hdev = chan->conn->hdev;
3154 
3155 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3156 
3157 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3158 
3159 	queue_work(hdev->workqueue, &hdev->tx_work);
3160 }
3161 
3162 /* Send SCO data */
3163 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3164 {
3165 	struct hci_dev *hdev = conn->hdev;
3166 	struct hci_sco_hdr hdr;
3167 
3168 	BT_DBG("%s len %d", hdev->name, skb->len);
3169 
3170 	hdr.handle = cpu_to_le16(conn->handle);
3171 	hdr.dlen   = skb->len;
3172 
3173 	skb_push(skb, HCI_SCO_HDR_SIZE);
3174 	skb_reset_transport_header(skb);
3175 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3176 
3177 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3178 
3179 	skb_queue_tail(&conn->data_q, skb);
3180 	queue_work(hdev->workqueue, &hdev->tx_work);
3181 }
3182 
3183 /* Send ISO data */
3184 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3185 {
3186 	struct hci_iso_hdr *hdr;
3187 	int len = skb->len;
3188 
3189 	skb_push(skb, HCI_ISO_HDR_SIZE);
3190 	skb_reset_transport_header(skb);
3191 	hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3192 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3193 	hdr->dlen   = cpu_to_le16(len);
3194 }
3195 
3196 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3197 			  struct sk_buff *skb)
3198 {
3199 	struct hci_dev *hdev = conn->hdev;
3200 	struct sk_buff *list;
3201 	__u16 flags;
3202 
3203 	skb->len = skb_headlen(skb);
3204 	skb->data_len = 0;
3205 
3206 	hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3207 
3208 	list = skb_shinfo(skb)->frag_list;
3209 
3210 	flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3211 	hci_add_iso_hdr(skb, conn->handle, flags);
3212 
3213 	if (!list) {
3214 		/* Non fragmented */
3215 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3216 
3217 		skb_queue_tail(queue, skb);
3218 	} else {
3219 		/* Fragmented */
3220 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3221 
3222 		skb_shinfo(skb)->frag_list = NULL;
3223 
3224 		__skb_queue_tail(queue, skb);
3225 
3226 		do {
3227 			skb = list; list = list->next;
3228 
3229 			hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3230 			flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3231 						   0x00);
3232 			hci_add_iso_hdr(skb, conn->handle, flags);
3233 
3234 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3235 
3236 			__skb_queue_tail(queue, skb);
3237 		} while (list);
3238 	}
3239 }
3240 
3241 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3242 {
3243 	struct hci_dev *hdev = conn->hdev;
3244 
3245 	BT_DBG("%s len %d", hdev->name, skb->len);
3246 
3247 	hci_queue_iso(conn, &conn->data_q, skb);
3248 
3249 	queue_work(hdev->workqueue, &hdev->tx_work);
3250 }
3251 
3252 /* ---- HCI TX task (outgoing data) ---- */
3253 
3254 /* HCI Connection scheduler */
3255 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3256 {
3257 	struct hci_dev *hdev;
3258 	int cnt, q;
3259 
3260 	if (!conn) {
3261 		*quote = 0;
3262 		return;
3263 	}
3264 
3265 	hdev = conn->hdev;
3266 
3267 	switch (conn->type) {
3268 	case ACL_LINK:
3269 		cnt = hdev->acl_cnt;
3270 		break;
3271 	case AMP_LINK:
3272 		cnt = hdev->block_cnt;
3273 		break;
3274 	case SCO_LINK:
3275 	case ESCO_LINK:
3276 		cnt = hdev->sco_cnt;
3277 		break;
3278 	case LE_LINK:
3279 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3280 		break;
3281 	case ISO_LINK:
3282 		cnt = hdev->iso_mtu ? hdev->iso_cnt :
3283 			hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3284 		break;
3285 	default:
3286 		cnt = 0;
3287 		bt_dev_err(hdev, "unknown link type %d", conn->type);
3288 	}
3289 
3290 	q = cnt / num;
3291 	*quote = q ? q : 1;
3292 }
3293 
3294 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3295 				     int *quote)
3296 {
3297 	struct hci_conn_hash *h = &hdev->conn_hash;
3298 	struct hci_conn *conn = NULL, *c;
3299 	unsigned int num = 0, min = ~0;
3300 
3301 	/* We don't have to lock device here. Connections are always
3302 	 * added and removed with TX task disabled. */
3303 
3304 	rcu_read_lock();
3305 
3306 	list_for_each_entry_rcu(c, &h->list, list) {
3307 		if (c->type != type || skb_queue_empty(&c->data_q))
3308 			continue;
3309 
3310 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3311 			continue;
3312 
3313 		num++;
3314 
3315 		if (c->sent < min) {
3316 			min  = c->sent;
3317 			conn = c;
3318 		}
3319 
3320 		if (hci_conn_num(hdev, type) == num)
3321 			break;
3322 	}
3323 
3324 	rcu_read_unlock();
3325 
3326 	hci_quote_sent(conn, num, quote);
3327 
3328 	BT_DBG("conn %p quote %d", conn, *quote);
3329 	return conn;
3330 }
3331 
3332 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3333 {
3334 	struct hci_conn_hash *h = &hdev->conn_hash;
3335 	struct hci_conn *c;
3336 
3337 	bt_dev_err(hdev, "link tx timeout");
3338 
3339 	rcu_read_lock();
3340 
3341 	/* Kill stalled connections */
3342 	list_for_each_entry_rcu(c, &h->list, list) {
3343 		if (c->type == type && c->sent) {
3344 			bt_dev_err(hdev, "killing stalled connection %pMR",
3345 				   &c->dst);
3346 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3347 		}
3348 	}
3349 
3350 	rcu_read_unlock();
3351 }
3352 
3353 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3354 				      int *quote)
3355 {
3356 	struct hci_conn_hash *h = &hdev->conn_hash;
3357 	struct hci_chan *chan = NULL;
3358 	unsigned int num = 0, min = ~0, cur_prio = 0;
3359 	struct hci_conn *conn;
3360 	int conn_num = 0;
3361 
3362 	BT_DBG("%s", hdev->name);
3363 
3364 	rcu_read_lock();
3365 
3366 	list_for_each_entry_rcu(conn, &h->list, list) {
3367 		struct hci_chan *tmp;
3368 
3369 		if (conn->type != type)
3370 			continue;
3371 
3372 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3373 			continue;
3374 
3375 		conn_num++;
3376 
3377 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3378 			struct sk_buff *skb;
3379 
3380 			if (skb_queue_empty(&tmp->data_q))
3381 				continue;
3382 
3383 			skb = skb_peek(&tmp->data_q);
3384 			if (skb->priority < cur_prio)
3385 				continue;
3386 
3387 			if (skb->priority > cur_prio) {
3388 				num = 0;
3389 				min = ~0;
3390 				cur_prio = skb->priority;
3391 			}
3392 
3393 			num++;
3394 
3395 			if (conn->sent < min) {
3396 				min  = conn->sent;
3397 				chan = tmp;
3398 			}
3399 		}
3400 
3401 		if (hci_conn_num(hdev, type) == conn_num)
3402 			break;
3403 	}
3404 
3405 	rcu_read_unlock();
3406 
3407 	if (!chan)
3408 		return NULL;
3409 
3410 	hci_quote_sent(chan->conn, num, quote);
3411 
3412 	BT_DBG("chan %p quote %d", chan, *quote);
3413 	return chan;
3414 }
3415 
3416 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3417 {
3418 	struct hci_conn_hash *h = &hdev->conn_hash;
3419 	struct hci_conn *conn;
3420 	int num = 0;
3421 
3422 	BT_DBG("%s", hdev->name);
3423 
3424 	rcu_read_lock();
3425 
3426 	list_for_each_entry_rcu(conn, &h->list, list) {
3427 		struct hci_chan *chan;
3428 
3429 		if (conn->type != type)
3430 			continue;
3431 
3432 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3433 			continue;
3434 
3435 		num++;
3436 
3437 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3438 			struct sk_buff *skb;
3439 
3440 			if (chan->sent) {
3441 				chan->sent = 0;
3442 				continue;
3443 			}
3444 
3445 			if (skb_queue_empty(&chan->data_q))
3446 				continue;
3447 
3448 			skb = skb_peek(&chan->data_q);
3449 			if (skb->priority >= HCI_PRIO_MAX - 1)
3450 				continue;
3451 
3452 			skb->priority = HCI_PRIO_MAX - 1;
3453 
3454 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3455 			       skb->priority);
3456 		}
3457 
3458 		if (hci_conn_num(hdev, type) == num)
3459 			break;
3460 	}
3461 
3462 	rcu_read_unlock();
3463 
3464 }
3465 
3466 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3467 {
3468 	/* Calculate count of blocks used by this packet */
3469 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3470 }
3471 
3472 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3473 {
3474 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3475 		/* ACL tx timeout must be longer than maximum
3476 		 * link supervision timeout (40.9 seconds) */
3477 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3478 				       HCI_ACL_TX_TIMEOUT))
3479 			hci_link_tx_to(hdev, ACL_LINK);
3480 	}
3481 }
3482 
3483 /* Schedule SCO */
3484 static void hci_sched_sco(struct hci_dev *hdev)
3485 {
3486 	struct hci_conn *conn;
3487 	struct sk_buff *skb;
3488 	int quote;
3489 
3490 	BT_DBG("%s", hdev->name);
3491 
3492 	if (!hci_conn_num(hdev, SCO_LINK))
3493 		return;
3494 
3495 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3496 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3497 			BT_DBG("skb %p len %d", skb, skb->len);
3498 			hci_send_frame(hdev, skb);
3499 
3500 			conn->sent++;
3501 			if (conn->sent == ~0)
3502 				conn->sent = 0;
3503 		}
3504 	}
3505 }
3506 
3507 static void hci_sched_esco(struct hci_dev *hdev)
3508 {
3509 	struct hci_conn *conn;
3510 	struct sk_buff *skb;
3511 	int quote;
3512 
3513 	BT_DBG("%s", hdev->name);
3514 
3515 	if (!hci_conn_num(hdev, ESCO_LINK))
3516 		return;
3517 
3518 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3519 						     &quote))) {
3520 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3521 			BT_DBG("skb %p len %d", skb, skb->len);
3522 			hci_send_frame(hdev, skb);
3523 
3524 			conn->sent++;
3525 			if (conn->sent == ~0)
3526 				conn->sent = 0;
3527 		}
3528 	}
3529 }
3530 
3531 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3532 {
3533 	unsigned int cnt = hdev->acl_cnt;
3534 	struct hci_chan *chan;
3535 	struct sk_buff *skb;
3536 	int quote;
3537 
3538 	__check_timeout(hdev, cnt);
3539 
3540 	while (hdev->acl_cnt &&
3541 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3542 		u32 priority = (skb_peek(&chan->data_q))->priority;
3543 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3544 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3545 			       skb->len, skb->priority);
3546 
3547 			/* Stop if priority has changed */
3548 			if (skb->priority < priority)
3549 				break;
3550 
3551 			skb = skb_dequeue(&chan->data_q);
3552 
3553 			hci_conn_enter_active_mode(chan->conn,
3554 						   bt_cb(skb)->force_active);
3555 
3556 			hci_send_frame(hdev, skb);
3557 			hdev->acl_last_tx = jiffies;
3558 
3559 			hdev->acl_cnt--;
3560 			chan->sent++;
3561 			chan->conn->sent++;
3562 
3563 			/* Send pending SCO packets right away */
3564 			hci_sched_sco(hdev);
3565 			hci_sched_esco(hdev);
3566 		}
3567 	}
3568 
3569 	if (cnt != hdev->acl_cnt)
3570 		hci_prio_recalculate(hdev, ACL_LINK);
3571 }
3572 
3573 static void hci_sched_acl_blk(struct hci_dev *hdev)
3574 {
3575 	unsigned int cnt = hdev->block_cnt;
3576 	struct hci_chan *chan;
3577 	struct sk_buff *skb;
3578 	int quote;
3579 	u8 type;
3580 
3581 	__check_timeout(hdev, cnt);
3582 
3583 	BT_DBG("%s", hdev->name);
3584 
3585 	if (hdev->dev_type == HCI_AMP)
3586 		type = AMP_LINK;
3587 	else
3588 		type = ACL_LINK;
3589 
3590 	while (hdev->block_cnt > 0 &&
3591 	       (chan = hci_chan_sent(hdev, type, &quote))) {
3592 		u32 priority = (skb_peek(&chan->data_q))->priority;
3593 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3594 			int blocks;
3595 
3596 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3597 			       skb->len, skb->priority);
3598 
3599 			/* Stop if priority has changed */
3600 			if (skb->priority < priority)
3601 				break;
3602 
3603 			skb = skb_dequeue(&chan->data_q);
3604 
3605 			blocks = __get_blocks(hdev, skb);
3606 			if (blocks > hdev->block_cnt)
3607 				return;
3608 
3609 			hci_conn_enter_active_mode(chan->conn,
3610 						   bt_cb(skb)->force_active);
3611 
3612 			hci_send_frame(hdev, skb);
3613 			hdev->acl_last_tx = jiffies;
3614 
3615 			hdev->block_cnt -= blocks;
3616 			quote -= blocks;
3617 
3618 			chan->sent += blocks;
3619 			chan->conn->sent += blocks;
3620 		}
3621 	}
3622 
3623 	if (cnt != hdev->block_cnt)
3624 		hci_prio_recalculate(hdev, type);
3625 }
3626 
3627 static void hci_sched_acl(struct hci_dev *hdev)
3628 {
3629 	BT_DBG("%s", hdev->name);
3630 
3631 	/* No ACL link over BR/EDR controller */
3632 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3633 		return;
3634 
3635 	/* No AMP link over AMP controller */
3636 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3637 		return;
3638 
3639 	switch (hdev->flow_ctl_mode) {
3640 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
3641 		hci_sched_acl_pkt(hdev);
3642 		break;
3643 
3644 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3645 		hci_sched_acl_blk(hdev);
3646 		break;
3647 	}
3648 }
3649 
3650 static void hci_sched_le(struct hci_dev *hdev)
3651 {
3652 	struct hci_chan *chan;
3653 	struct sk_buff *skb;
3654 	int quote, cnt, tmp;
3655 
3656 	BT_DBG("%s", hdev->name);
3657 
3658 	if (!hci_conn_num(hdev, LE_LINK))
3659 		return;
3660 
3661 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3662 
3663 	__check_timeout(hdev, cnt);
3664 
3665 	tmp = cnt;
3666 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3667 		u32 priority = (skb_peek(&chan->data_q))->priority;
3668 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3669 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3670 			       skb->len, skb->priority);
3671 
3672 			/* Stop if priority has changed */
3673 			if (skb->priority < priority)
3674 				break;
3675 
3676 			skb = skb_dequeue(&chan->data_q);
3677 
3678 			hci_send_frame(hdev, skb);
3679 			hdev->le_last_tx = jiffies;
3680 
3681 			cnt--;
3682 			chan->sent++;
3683 			chan->conn->sent++;
3684 
3685 			/* Send pending SCO packets right away */
3686 			hci_sched_sco(hdev);
3687 			hci_sched_esco(hdev);
3688 		}
3689 	}
3690 
3691 	if (hdev->le_pkts)
3692 		hdev->le_cnt = cnt;
3693 	else
3694 		hdev->acl_cnt = cnt;
3695 
3696 	if (cnt != tmp)
3697 		hci_prio_recalculate(hdev, LE_LINK);
3698 }
3699 
3700 /* Schedule CIS */
3701 static void hci_sched_iso(struct hci_dev *hdev)
3702 {
3703 	struct hci_conn *conn;
3704 	struct sk_buff *skb;
3705 	int quote, *cnt;
3706 
3707 	BT_DBG("%s", hdev->name);
3708 
3709 	if (!hci_conn_num(hdev, ISO_LINK))
3710 		return;
3711 
3712 	cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3713 		hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3714 	while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3715 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3716 			BT_DBG("skb %p len %d", skb, skb->len);
3717 			hci_send_frame(hdev, skb);
3718 
3719 			conn->sent++;
3720 			if (conn->sent == ~0)
3721 				conn->sent = 0;
3722 			(*cnt)--;
3723 		}
3724 	}
3725 }
3726 
3727 static void hci_tx_work(struct work_struct *work)
3728 {
3729 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3730 	struct sk_buff *skb;
3731 
3732 	BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3733 	       hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3734 
3735 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3736 		/* Schedule queues and send stuff to HCI driver */
3737 		hci_sched_sco(hdev);
3738 		hci_sched_esco(hdev);
3739 		hci_sched_iso(hdev);
3740 		hci_sched_acl(hdev);
3741 		hci_sched_le(hdev);
3742 	}
3743 
3744 	/* Send next queued raw (unknown type) packet */
3745 	while ((skb = skb_dequeue(&hdev->raw_q)))
3746 		hci_send_frame(hdev, skb);
3747 }
3748 
3749 /* ----- HCI RX task (incoming data processing) ----- */
3750 
3751 /* ACL data packet */
3752 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3753 {
3754 	struct hci_acl_hdr *hdr = (void *) skb->data;
3755 	struct hci_conn *conn;
3756 	__u16 handle, flags;
3757 
3758 	skb_pull(skb, HCI_ACL_HDR_SIZE);
3759 
3760 	handle = __le16_to_cpu(hdr->handle);
3761 	flags  = hci_flags(handle);
3762 	handle = hci_handle(handle);
3763 
3764 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3765 	       handle, flags);
3766 
3767 	hdev->stat.acl_rx++;
3768 
3769 	hci_dev_lock(hdev);
3770 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3771 	hci_dev_unlock(hdev);
3772 
3773 	if (conn) {
3774 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3775 
3776 		/* Send to upper protocol */
3777 		l2cap_recv_acldata(conn, skb, flags);
3778 		return;
3779 	} else {
3780 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3781 			   handle);
3782 	}
3783 
3784 	kfree_skb(skb);
3785 }
3786 
3787 /* SCO data packet */
3788 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3789 {
3790 	struct hci_sco_hdr *hdr = (void *) skb->data;
3791 	struct hci_conn *conn;
3792 	__u16 handle, flags;
3793 
3794 	skb_pull(skb, HCI_SCO_HDR_SIZE);
3795 
3796 	handle = __le16_to_cpu(hdr->handle);
3797 	flags  = hci_flags(handle);
3798 	handle = hci_handle(handle);
3799 
3800 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3801 	       handle, flags);
3802 
3803 	hdev->stat.sco_rx++;
3804 
3805 	hci_dev_lock(hdev);
3806 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3807 	hci_dev_unlock(hdev);
3808 
3809 	if (conn) {
3810 		/* Send to upper protocol */
3811 		bt_cb(skb)->sco.pkt_status = flags & 0x03;
3812 		sco_recv_scodata(conn, skb);
3813 		return;
3814 	} else {
3815 		bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3816 				       handle);
3817 	}
3818 
3819 	kfree_skb(skb);
3820 }
3821 
3822 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3823 {
3824 	struct hci_iso_hdr *hdr;
3825 	struct hci_conn *conn;
3826 	__u16 handle, flags;
3827 
3828 	hdr = skb_pull_data(skb, sizeof(*hdr));
3829 	if (!hdr) {
3830 		bt_dev_err(hdev, "ISO packet too small");
3831 		goto drop;
3832 	}
3833 
3834 	handle = __le16_to_cpu(hdr->handle);
3835 	flags  = hci_flags(handle);
3836 	handle = hci_handle(handle);
3837 
3838 	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3839 		   handle, flags);
3840 
3841 	hci_dev_lock(hdev);
3842 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3843 	hci_dev_unlock(hdev);
3844 
3845 	if (!conn) {
3846 		bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3847 			   handle);
3848 		goto drop;
3849 	}
3850 
3851 	/* Send to upper protocol */
3852 	iso_recv(conn, skb, flags);
3853 	return;
3854 
3855 drop:
3856 	kfree_skb(skb);
3857 }
3858 
3859 static bool hci_req_is_complete(struct hci_dev *hdev)
3860 {
3861 	struct sk_buff *skb;
3862 
3863 	skb = skb_peek(&hdev->cmd_q);
3864 	if (!skb)
3865 		return true;
3866 
3867 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3868 }
3869 
3870 static void hci_resend_last(struct hci_dev *hdev)
3871 {
3872 	struct hci_command_hdr *sent;
3873 	struct sk_buff *skb;
3874 	u16 opcode;
3875 
3876 	if (!hdev->sent_cmd)
3877 		return;
3878 
3879 	sent = (void *) hdev->sent_cmd->data;
3880 	opcode = __le16_to_cpu(sent->opcode);
3881 	if (opcode == HCI_OP_RESET)
3882 		return;
3883 
3884 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3885 	if (!skb)
3886 		return;
3887 
3888 	skb_queue_head(&hdev->cmd_q, skb);
3889 	queue_work(hdev->workqueue, &hdev->cmd_work);
3890 }
3891 
3892 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3893 			  hci_req_complete_t *req_complete,
3894 			  hci_req_complete_skb_t *req_complete_skb)
3895 {
3896 	struct sk_buff *skb;
3897 	unsigned long flags;
3898 
3899 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3900 
3901 	/* If the completed command doesn't match the last one that was
3902 	 * sent we need to do special handling of it.
3903 	 */
3904 	if (!hci_sent_cmd_data(hdev, opcode)) {
3905 		/* Some CSR based controllers generate a spontaneous
3906 		 * reset complete event during init and any pending
3907 		 * command will never be completed. In such a case we
3908 		 * need to resend whatever was the last sent
3909 		 * command.
3910 		 */
3911 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3912 			hci_resend_last(hdev);
3913 
3914 		return;
3915 	}
3916 
3917 	/* If we reach this point this event matches the last command sent */
3918 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3919 
3920 	/* If the command succeeded and there's still more commands in
3921 	 * this request the request is not yet complete.
3922 	 */
3923 	if (!status && !hci_req_is_complete(hdev))
3924 		return;
3925 
3926 	/* If this was the last command in a request the complete
3927 	 * callback would be found in hdev->sent_cmd instead of the
3928 	 * command queue (hdev->cmd_q).
3929 	 */
3930 	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3931 		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3932 		return;
3933 	}
3934 
3935 	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3936 		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3937 		return;
3938 	}
3939 
3940 	/* Remove all pending commands belonging to this request */
3941 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3942 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3943 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3944 			__skb_queue_head(&hdev->cmd_q, skb);
3945 			break;
3946 		}
3947 
3948 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3949 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3950 		else
3951 			*req_complete = bt_cb(skb)->hci.req_complete;
3952 		kfree_skb(skb);
3953 	}
3954 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3955 }
3956 
3957 static void hci_rx_work(struct work_struct *work)
3958 {
3959 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3960 	struct sk_buff *skb;
3961 
3962 	BT_DBG("%s", hdev->name);
3963 
3964 	/* The kcov_remote functions used for collecting packet parsing
3965 	 * coverage information from this background thread and associate
3966 	 * the coverage with the syscall's thread which originally injected
3967 	 * the packet. This helps fuzzing the kernel.
3968 	 */
3969 	for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3970 		kcov_remote_start_common(skb_get_kcov_handle(skb));
3971 
3972 		/* Send copy to monitor */
3973 		hci_send_to_monitor(hdev, skb);
3974 
3975 		if (atomic_read(&hdev->promisc)) {
3976 			/* Send copy to the sockets */
3977 			hci_send_to_sock(hdev, skb);
3978 		}
3979 
3980 		/* If the device has been opened in HCI_USER_CHANNEL,
3981 		 * the userspace has exclusive access to device.
3982 		 * When device is HCI_INIT, we still need to process
3983 		 * the data packets to the driver in order
3984 		 * to complete its setup().
3985 		 */
3986 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3987 		    !test_bit(HCI_INIT, &hdev->flags)) {
3988 			kfree_skb(skb);
3989 			continue;
3990 		}
3991 
3992 		if (test_bit(HCI_INIT, &hdev->flags)) {
3993 			/* Don't process data packets in this states. */
3994 			switch (hci_skb_pkt_type(skb)) {
3995 			case HCI_ACLDATA_PKT:
3996 			case HCI_SCODATA_PKT:
3997 			case HCI_ISODATA_PKT:
3998 				kfree_skb(skb);
3999 				continue;
4000 			}
4001 		}
4002 
4003 		/* Process frame */
4004 		switch (hci_skb_pkt_type(skb)) {
4005 		case HCI_EVENT_PKT:
4006 			BT_DBG("%s Event packet", hdev->name);
4007 			hci_event_packet(hdev, skb);
4008 			break;
4009 
4010 		case HCI_ACLDATA_PKT:
4011 			BT_DBG("%s ACL data packet", hdev->name);
4012 			hci_acldata_packet(hdev, skb);
4013 			break;
4014 
4015 		case HCI_SCODATA_PKT:
4016 			BT_DBG("%s SCO data packet", hdev->name);
4017 			hci_scodata_packet(hdev, skb);
4018 			break;
4019 
4020 		case HCI_ISODATA_PKT:
4021 			BT_DBG("%s ISO data packet", hdev->name);
4022 			hci_isodata_packet(hdev, skb);
4023 			break;
4024 
4025 		default:
4026 			kfree_skb(skb);
4027 			break;
4028 		}
4029 	}
4030 }
4031 
4032 static void hci_cmd_work(struct work_struct *work)
4033 {
4034 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4035 	struct sk_buff *skb;
4036 
4037 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4038 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4039 
4040 	/* Send queued commands */
4041 	if (atomic_read(&hdev->cmd_cnt)) {
4042 		skb = skb_dequeue(&hdev->cmd_q);
4043 		if (!skb)
4044 			return;
4045 
4046 		kfree_skb(hdev->sent_cmd);
4047 
4048 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4049 		if (hdev->sent_cmd) {
4050 			int res;
4051 			if (hci_req_status_pend(hdev))
4052 				hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4053 			atomic_dec(&hdev->cmd_cnt);
4054 
4055 			res = hci_send_frame(hdev, skb);
4056 			if (res < 0)
4057 				__hci_cmd_sync_cancel(hdev, -res);
4058 
4059 			if (test_bit(HCI_RESET, &hdev->flags) ||
4060 			    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4061 				cancel_delayed_work(&hdev->cmd_timer);
4062 			else
4063 				schedule_delayed_work(&hdev->cmd_timer,
4064 						      HCI_CMD_TIMEOUT);
4065 		} else {
4066 			skb_queue_head(&hdev->cmd_q, skb);
4067 			queue_work(hdev->workqueue, &hdev->cmd_work);
4068 		}
4069 	}
4070 }
4071