xref: /linux/net/bluetooth/hci_core.c (revision 332d2c1d713e232e163386c35a3ba0c1b90df83f)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37 
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42 
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50 
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54 
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58 
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62 
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65 
66 /* Get HCI device by index.
67  * Device is held on return. */
68 struct hci_dev *hci_dev_get(int index)
69 {
70 	struct hci_dev *hdev = NULL, *d;
71 
72 	BT_DBG("%d", index);
73 
74 	if (index < 0)
75 		return NULL;
76 
77 	read_lock(&hci_dev_list_lock);
78 	list_for_each_entry(d, &hci_dev_list, list) {
79 		if (d->id == index) {
80 			hdev = hci_dev_hold(d);
81 			break;
82 		}
83 	}
84 	read_unlock(&hci_dev_list_lock);
85 	return hdev;
86 }
87 
88 /* ---- Inquiry support ---- */
89 
90 bool hci_discovery_active(struct hci_dev *hdev)
91 {
92 	struct discovery_state *discov = &hdev->discovery;
93 
94 	switch (discov->state) {
95 	case DISCOVERY_FINDING:
96 	case DISCOVERY_RESOLVING:
97 		return true;
98 
99 	default:
100 		return false;
101 	}
102 }
103 
104 void hci_discovery_set_state(struct hci_dev *hdev, int state)
105 {
106 	int old_state = hdev->discovery.state;
107 
108 	if (old_state == state)
109 		return;
110 
111 	hdev->discovery.state = state;
112 
113 	switch (state) {
114 	case DISCOVERY_STOPPED:
115 		hci_update_passive_scan(hdev);
116 
117 		if (old_state != DISCOVERY_STARTING)
118 			mgmt_discovering(hdev, 0);
119 		break;
120 	case DISCOVERY_STARTING:
121 		break;
122 	case DISCOVERY_FINDING:
123 		/* If discovery was not started then it was initiated by the
124 		 * MGMT interface so no MGMT event shall be generated either
125 		 */
126 		if (old_state != DISCOVERY_STARTING) {
127 			hdev->discovery.state = old_state;
128 			return;
129 		}
130 		mgmt_discovering(hdev, 1);
131 		break;
132 	case DISCOVERY_RESOLVING:
133 		break;
134 	case DISCOVERY_STOPPING:
135 		break;
136 	}
137 
138 	bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
139 }
140 
141 void hci_inquiry_cache_flush(struct hci_dev *hdev)
142 {
143 	struct discovery_state *cache = &hdev->discovery;
144 	struct inquiry_entry *p, *n;
145 
146 	list_for_each_entry_safe(p, n, &cache->all, all) {
147 		list_del(&p->all);
148 		kfree(p);
149 	}
150 
151 	INIT_LIST_HEAD(&cache->unknown);
152 	INIT_LIST_HEAD(&cache->resolve);
153 }
154 
155 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
156 					       bdaddr_t *bdaddr)
157 {
158 	struct discovery_state *cache = &hdev->discovery;
159 	struct inquiry_entry *e;
160 
161 	BT_DBG("cache %p, %pMR", cache, bdaddr);
162 
163 	list_for_each_entry(e, &cache->all, all) {
164 		if (!bacmp(&e->data.bdaddr, bdaddr))
165 			return e;
166 	}
167 
168 	return NULL;
169 }
170 
171 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
172 						       bdaddr_t *bdaddr)
173 {
174 	struct discovery_state *cache = &hdev->discovery;
175 	struct inquiry_entry *e;
176 
177 	BT_DBG("cache %p, %pMR", cache, bdaddr);
178 
179 	list_for_each_entry(e, &cache->unknown, list) {
180 		if (!bacmp(&e->data.bdaddr, bdaddr))
181 			return e;
182 	}
183 
184 	return NULL;
185 }
186 
187 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
188 						       bdaddr_t *bdaddr,
189 						       int state)
190 {
191 	struct discovery_state *cache = &hdev->discovery;
192 	struct inquiry_entry *e;
193 
194 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
195 
196 	list_for_each_entry(e, &cache->resolve, list) {
197 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
198 			return e;
199 		if (!bacmp(&e->data.bdaddr, bdaddr))
200 			return e;
201 	}
202 
203 	return NULL;
204 }
205 
206 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
207 				      struct inquiry_entry *ie)
208 {
209 	struct discovery_state *cache = &hdev->discovery;
210 	struct list_head *pos = &cache->resolve;
211 	struct inquiry_entry *p;
212 
213 	list_del(&ie->list);
214 
215 	list_for_each_entry(p, &cache->resolve, list) {
216 		if (p->name_state != NAME_PENDING &&
217 		    abs(p->data.rssi) >= abs(ie->data.rssi))
218 			break;
219 		pos = &p->list;
220 	}
221 
222 	list_add(&ie->list, pos);
223 }
224 
225 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
226 			     bool name_known)
227 {
228 	struct discovery_state *cache = &hdev->discovery;
229 	struct inquiry_entry *ie;
230 	u32 flags = 0;
231 
232 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
233 
234 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
235 
236 	if (!data->ssp_mode)
237 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
238 
239 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
240 	if (ie) {
241 		if (!ie->data.ssp_mode)
242 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
243 
244 		if (ie->name_state == NAME_NEEDED &&
245 		    data->rssi != ie->data.rssi) {
246 			ie->data.rssi = data->rssi;
247 			hci_inquiry_cache_update_resolve(hdev, ie);
248 		}
249 
250 		goto update;
251 	}
252 
253 	/* Entry not in the cache. Add new one. */
254 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
255 	if (!ie) {
256 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
257 		goto done;
258 	}
259 
260 	list_add(&ie->all, &cache->all);
261 
262 	if (name_known) {
263 		ie->name_state = NAME_KNOWN;
264 	} else {
265 		ie->name_state = NAME_NOT_KNOWN;
266 		list_add(&ie->list, &cache->unknown);
267 	}
268 
269 update:
270 	if (name_known && ie->name_state != NAME_KNOWN &&
271 	    ie->name_state != NAME_PENDING) {
272 		ie->name_state = NAME_KNOWN;
273 		list_del(&ie->list);
274 	}
275 
276 	memcpy(&ie->data, data, sizeof(*data));
277 	ie->timestamp = jiffies;
278 	cache->timestamp = jiffies;
279 
280 	if (ie->name_state == NAME_NOT_KNOWN)
281 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
282 
283 done:
284 	return flags;
285 }
286 
287 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
288 {
289 	struct discovery_state *cache = &hdev->discovery;
290 	struct inquiry_info *info = (struct inquiry_info *) buf;
291 	struct inquiry_entry *e;
292 	int copied = 0;
293 
294 	list_for_each_entry(e, &cache->all, all) {
295 		struct inquiry_data *data = &e->data;
296 
297 		if (copied >= num)
298 			break;
299 
300 		bacpy(&info->bdaddr, &data->bdaddr);
301 		info->pscan_rep_mode	= data->pscan_rep_mode;
302 		info->pscan_period_mode	= data->pscan_period_mode;
303 		info->pscan_mode	= data->pscan_mode;
304 		memcpy(info->dev_class, data->dev_class, 3);
305 		info->clock_offset	= data->clock_offset;
306 
307 		info++;
308 		copied++;
309 	}
310 
311 	BT_DBG("cache %p, copied %d", cache, copied);
312 	return copied;
313 }
314 
315 static int hci_inq_req(struct hci_request *req, unsigned long opt)
316 {
317 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
318 	struct hci_dev *hdev = req->hdev;
319 	struct hci_cp_inquiry cp;
320 
321 	BT_DBG("%s", hdev->name);
322 
323 	if (test_bit(HCI_INQUIRY, &hdev->flags))
324 		return 0;
325 
326 	/* Start Inquiry */
327 	memcpy(&cp.lap, &ir->lap, 3);
328 	cp.length  = ir->length;
329 	cp.num_rsp = ir->num_rsp;
330 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
331 
332 	return 0;
333 }
334 
335 int hci_inquiry(void __user *arg)
336 {
337 	__u8 __user *ptr = arg;
338 	struct hci_inquiry_req ir;
339 	struct hci_dev *hdev;
340 	int err = 0, do_inquiry = 0, max_rsp;
341 	long timeo;
342 	__u8 *buf;
343 
344 	if (copy_from_user(&ir, ptr, sizeof(ir)))
345 		return -EFAULT;
346 
347 	hdev = hci_dev_get(ir.dev_id);
348 	if (!hdev)
349 		return -ENODEV;
350 
351 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
352 		err = -EBUSY;
353 		goto done;
354 	}
355 
356 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
357 		err = -EOPNOTSUPP;
358 		goto done;
359 	}
360 
361 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
362 		err = -EOPNOTSUPP;
363 		goto done;
364 	}
365 
366 	/* Restrict maximum inquiry length to 60 seconds */
367 	if (ir.length > 60) {
368 		err = -EINVAL;
369 		goto done;
370 	}
371 
372 	hci_dev_lock(hdev);
373 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
374 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
375 		hci_inquiry_cache_flush(hdev);
376 		do_inquiry = 1;
377 	}
378 	hci_dev_unlock(hdev);
379 
380 	timeo = ir.length * msecs_to_jiffies(2000);
381 
382 	if (do_inquiry) {
383 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
384 				   timeo, NULL);
385 		if (err < 0)
386 			goto done;
387 
388 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
389 		 * cleared). If it is interrupted by a signal, return -EINTR.
390 		 */
391 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
392 				TASK_INTERRUPTIBLE)) {
393 			err = -EINTR;
394 			goto done;
395 		}
396 	}
397 
398 	/* for unlimited number of responses we will use buffer with
399 	 * 255 entries
400 	 */
401 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
402 
403 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
404 	 * copy it to the user space.
405 	 */
406 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
407 	if (!buf) {
408 		err = -ENOMEM;
409 		goto done;
410 	}
411 
412 	hci_dev_lock(hdev);
413 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
414 	hci_dev_unlock(hdev);
415 
416 	BT_DBG("num_rsp %d", ir.num_rsp);
417 
418 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
419 		ptr += sizeof(ir);
420 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
421 				 ir.num_rsp))
422 			err = -EFAULT;
423 	} else
424 		err = -EFAULT;
425 
426 	kfree(buf);
427 
428 done:
429 	hci_dev_put(hdev);
430 	return err;
431 }
432 
433 static int hci_dev_do_open(struct hci_dev *hdev)
434 {
435 	int ret = 0;
436 
437 	BT_DBG("%s %p", hdev->name, hdev);
438 
439 	hci_req_sync_lock(hdev);
440 
441 	ret = hci_dev_open_sync(hdev);
442 
443 	hci_req_sync_unlock(hdev);
444 	return ret;
445 }
446 
447 /* ---- HCI ioctl helpers ---- */
448 
449 int hci_dev_open(__u16 dev)
450 {
451 	struct hci_dev *hdev;
452 	int err;
453 
454 	hdev = hci_dev_get(dev);
455 	if (!hdev)
456 		return -ENODEV;
457 
458 	/* Devices that are marked as unconfigured can only be powered
459 	 * up as user channel. Trying to bring them up as normal devices
460 	 * will result into a failure. Only user channel operation is
461 	 * possible.
462 	 *
463 	 * When this function is called for a user channel, the flag
464 	 * HCI_USER_CHANNEL will be set first before attempting to
465 	 * open the device.
466 	 */
467 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
468 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
469 		err = -EOPNOTSUPP;
470 		goto done;
471 	}
472 
473 	/* We need to ensure that no other power on/off work is pending
474 	 * before proceeding to call hci_dev_do_open. This is
475 	 * particularly important if the setup procedure has not yet
476 	 * completed.
477 	 */
478 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
479 		cancel_delayed_work(&hdev->power_off);
480 
481 	/* After this call it is guaranteed that the setup procedure
482 	 * has finished. This means that error conditions like RFKILL
483 	 * or no valid public or static random address apply.
484 	 */
485 	flush_workqueue(hdev->req_workqueue);
486 
487 	/* For controllers not using the management interface and that
488 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
489 	 * so that pairing works for them. Once the management interface
490 	 * is in use this bit will be cleared again and userspace has
491 	 * to explicitly enable it.
492 	 */
493 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
494 	    !hci_dev_test_flag(hdev, HCI_MGMT))
495 		hci_dev_set_flag(hdev, HCI_BONDABLE);
496 
497 	err = hci_dev_do_open(hdev);
498 
499 done:
500 	hci_dev_put(hdev);
501 	return err;
502 }
503 
504 int hci_dev_do_close(struct hci_dev *hdev)
505 {
506 	int err;
507 
508 	BT_DBG("%s %p", hdev->name, hdev);
509 
510 	hci_req_sync_lock(hdev);
511 
512 	err = hci_dev_close_sync(hdev);
513 
514 	hci_req_sync_unlock(hdev);
515 
516 	return err;
517 }
518 
519 int hci_dev_close(__u16 dev)
520 {
521 	struct hci_dev *hdev;
522 	int err;
523 
524 	hdev = hci_dev_get(dev);
525 	if (!hdev)
526 		return -ENODEV;
527 
528 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
529 		err = -EBUSY;
530 		goto done;
531 	}
532 
533 	cancel_work_sync(&hdev->power_on);
534 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
535 		cancel_delayed_work(&hdev->power_off);
536 
537 	err = hci_dev_do_close(hdev);
538 
539 done:
540 	hci_dev_put(hdev);
541 	return err;
542 }
543 
544 static int hci_dev_do_reset(struct hci_dev *hdev)
545 {
546 	int ret;
547 
548 	BT_DBG("%s %p", hdev->name, hdev);
549 
550 	hci_req_sync_lock(hdev);
551 
552 	/* Drop queues */
553 	skb_queue_purge(&hdev->rx_q);
554 	skb_queue_purge(&hdev->cmd_q);
555 
556 	/* Cancel these to avoid queueing non-chained pending work */
557 	hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
558 	/* Wait for
559 	 *
560 	 *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
561 	 *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
562 	 *
563 	 * inside RCU section to see the flag or complete scheduling.
564 	 */
565 	synchronize_rcu();
566 	/* Explicitly cancel works in case scheduled after setting the flag. */
567 	cancel_delayed_work(&hdev->cmd_timer);
568 	cancel_delayed_work(&hdev->ncmd_timer);
569 
570 	/* Avoid potential lockdep warnings from the *_flush() calls by
571 	 * ensuring the workqueue is empty up front.
572 	 */
573 	drain_workqueue(hdev->workqueue);
574 
575 	hci_dev_lock(hdev);
576 	hci_inquiry_cache_flush(hdev);
577 	hci_conn_hash_flush(hdev);
578 	hci_dev_unlock(hdev);
579 
580 	if (hdev->flush)
581 		hdev->flush(hdev);
582 
583 	hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
584 
585 	atomic_set(&hdev->cmd_cnt, 1);
586 	hdev->acl_cnt = 0;
587 	hdev->sco_cnt = 0;
588 	hdev->le_cnt = 0;
589 	hdev->iso_cnt = 0;
590 
591 	ret = hci_reset_sync(hdev);
592 
593 	hci_req_sync_unlock(hdev);
594 	return ret;
595 }
596 
597 int hci_dev_reset(__u16 dev)
598 {
599 	struct hci_dev *hdev;
600 	int err;
601 
602 	hdev = hci_dev_get(dev);
603 	if (!hdev)
604 		return -ENODEV;
605 
606 	if (!test_bit(HCI_UP, &hdev->flags)) {
607 		err = -ENETDOWN;
608 		goto done;
609 	}
610 
611 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
612 		err = -EBUSY;
613 		goto done;
614 	}
615 
616 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
617 		err = -EOPNOTSUPP;
618 		goto done;
619 	}
620 
621 	err = hci_dev_do_reset(hdev);
622 
623 done:
624 	hci_dev_put(hdev);
625 	return err;
626 }
627 
628 int hci_dev_reset_stat(__u16 dev)
629 {
630 	struct hci_dev *hdev;
631 	int ret = 0;
632 
633 	hdev = hci_dev_get(dev);
634 	if (!hdev)
635 		return -ENODEV;
636 
637 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
638 		ret = -EBUSY;
639 		goto done;
640 	}
641 
642 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
643 		ret = -EOPNOTSUPP;
644 		goto done;
645 	}
646 
647 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
648 
649 done:
650 	hci_dev_put(hdev);
651 	return ret;
652 }
653 
654 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
655 {
656 	bool conn_changed, discov_changed;
657 
658 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
659 
660 	if ((scan & SCAN_PAGE))
661 		conn_changed = !hci_dev_test_and_set_flag(hdev,
662 							  HCI_CONNECTABLE);
663 	else
664 		conn_changed = hci_dev_test_and_clear_flag(hdev,
665 							   HCI_CONNECTABLE);
666 
667 	if ((scan & SCAN_INQUIRY)) {
668 		discov_changed = !hci_dev_test_and_set_flag(hdev,
669 							    HCI_DISCOVERABLE);
670 	} else {
671 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
672 		discov_changed = hci_dev_test_and_clear_flag(hdev,
673 							     HCI_DISCOVERABLE);
674 	}
675 
676 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
677 		return;
678 
679 	if (conn_changed || discov_changed) {
680 		/* In case this was disabled through mgmt */
681 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
682 
683 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
684 			hci_update_adv_data(hdev, hdev->cur_adv_instance);
685 
686 		mgmt_new_settings(hdev);
687 	}
688 }
689 
690 int hci_dev_cmd(unsigned int cmd, void __user *arg)
691 {
692 	struct hci_dev *hdev;
693 	struct hci_dev_req dr;
694 	__le16 policy;
695 	int err = 0;
696 
697 	if (copy_from_user(&dr, arg, sizeof(dr)))
698 		return -EFAULT;
699 
700 	hdev = hci_dev_get(dr.dev_id);
701 	if (!hdev)
702 		return -ENODEV;
703 
704 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
705 		err = -EBUSY;
706 		goto done;
707 	}
708 
709 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
710 		err = -EOPNOTSUPP;
711 		goto done;
712 	}
713 
714 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
715 		err = -EOPNOTSUPP;
716 		goto done;
717 	}
718 
719 	switch (cmd) {
720 	case HCISETAUTH:
721 		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
722 					    1, &dr.dev_opt, HCI_CMD_TIMEOUT);
723 		break;
724 
725 	case HCISETENCRYPT:
726 		if (!lmp_encrypt_capable(hdev)) {
727 			err = -EOPNOTSUPP;
728 			break;
729 		}
730 
731 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
732 			/* Auth must be enabled first */
733 			err = __hci_cmd_sync_status(hdev,
734 						    HCI_OP_WRITE_AUTH_ENABLE,
735 						    1, &dr.dev_opt,
736 						    HCI_CMD_TIMEOUT);
737 			if (err)
738 				break;
739 		}
740 
741 		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
742 					    1, &dr.dev_opt,
743 					    HCI_CMD_TIMEOUT);
744 		break;
745 
746 	case HCISETSCAN:
747 		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
748 					    1, &dr.dev_opt,
749 					    HCI_CMD_TIMEOUT);
750 
751 		/* Ensure that the connectable and discoverable states
752 		 * get correctly modified as this was a non-mgmt change.
753 		 */
754 		if (!err)
755 			hci_update_passive_scan_state(hdev, dr.dev_opt);
756 		break;
757 
758 	case HCISETLINKPOL:
759 		policy = cpu_to_le16(dr.dev_opt);
760 
761 		err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
762 					    2, &policy,
763 					    HCI_CMD_TIMEOUT);
764 		break;
765 
766 	case HCISETLINKMODE:
767 		hdev->link_mode = ((__u16) dr.dev_opt) &
768 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
769 		break;
770 
771 	case HCISETPTYPE:
772 		if (hdev->pkt_type == (__u16) dr.dev_opt)
773 			break;
774 
775 		hdev->pkt_type = (__u16) dr.dev_opt;
776 		mgmt_phy_configuration_changed(hdev, NULL);
777 		break;
778 
779 	case HCISETACLMTU:
780 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
781 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
782 		break;
783 
784 	case HCISETSCOMTU:
785 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
786 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
787 		break;
788 
789 	default:
790 		err = -EINVAL;
791 		break;
792 	}
793 
794 done:
795 	hci_dev_put(hdev);
796 	return err;
797 }
798 
799 int hci_get_dev_list(void __user *arg)
800 {
801 	struct hci_dev *hdev;
802 	struct hci_dev_list_req *dl;
803 	struct hci_dev_req *dr;
804 	int n = 0, size, err;
805 	__u16 dev_num;
806 
807 	if (get_user(dev_num, (__u16 __user *) arg))
808 		return -EFAULT;
809 
810 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
811 		return -EINVAL;
812 
813 	size = sizeof(*dl) + dev_num * sizeof(*dr);
814 
815 	dl = kzalloc(size, GFP_KERNEL);
816 	if (!dl)
817 		return -ENOMEM;
818 
819 	dr = dl->dev_req;
820 
821 	read_lock(&hci_dev_list_lock);
822 	list_for_each_entry(hdev, &hci_dev_list, list) {
823 		unsigned long flags = hdev->flags;
824 
825 		/* When the auto-off is configured it means the transport
826 		 * is running, but in that case still indicate that the
827 		 * device is actually down.
828 		 */
829 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
830 			flags &= ~BIT(HCI_UP);
831 
832 		(dr + n)->dev_id  = hdev->id;
833 		(dr + n)->dev_opt = flags;
834 
835 		if (++n >= dev_num)
836 			break;
837 	}
838 	read_unlock(&hci_dev_list_lock);
839 
840 	dl->dev_num = n;
841 	size = sizeof(*dl) + n * sizeof(*dr);
842 
843 	err = copy_to_user(arg, dl, size);
844 	kfree(dl);
845 
846 	return err ? -EFAULT : 0;
847 }
848 
849 int hci_get_dev_info(void __user *arg)
850 {
851 	struct hci_dev *hdev;
852 	struct hci_dev_info di;
853 	unsigned long flags;
854 	int err = 0;
855 
856 	if (copy_from_user(&di, arg, sizeof(di)))
857 		return -EFAULT;
858 
859 	hdev = hci_dev_get(di.dev_id);
860 	if (!hdev)
861 		return -ENODEV;
862 
863 	/* When the auto-off is configured it means the transport
864 	 * is running, but in that case still indicate that the
865 	 * device is actually down.
866 	 */
867 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
868 		flags = hdev->flags & ~BIT(HCI_UP);
869 	else
870 		flags = hdev->flags;
871 
872 	strscpy(di.name, hdev->name, sizeof(di.name));
873 	di.bdaddr   = hdev->bdaddr;
874 	di.type     = (hdev->bus & 0x0f);
875 	di.flags    = flags;
876 	di.pkt_type = hdev->pkt_type;
877 	if (lmp_bredr_capable(hdev)) {
878 		di.acl_mtu  = hdev->acl_mtu;
879 		di.acl_pkts = hdev->acl_pkts;
880 		di.sco_mtu  = hdev->sco_mtu;
881 		di.sco_pkts = hdev->sco_pkts;
882 	} else {
883 		di.acl_mtu  = hdev->le_mtu;
884 		di.acl_pkts = hdev->le_pkts;
885 		di.sco_mtu  = 0;
886 		di.sco_pkts = 0;
887 	}
888 	di.link_policy = hdev->link_policy;
889 	di.link_mode   = hdev->link_mode;
890 
891 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
892 	memcpy(&di.features, &hdev->features, sizeof(di.features));
893 
894 	if (copy_to_user(arg, &di, sizeof(di)))
895 		err = -EFAULT;
896 
897 	hci_dev_put(hdev);
898 
899 	return err;
900 }
901 
902 /* ---- Interface to HCI drivers ---- */
903 
904 static int hci_dev_do_poweroff(struct hci_dev *hdev)
905 {
906 	int err;
907 
908 	BT_DBG("%s %p", hdev->name, hdev);
909 
910 	hci_req_sync_lock(hdev);
911 
912 	err = hci_set_powered_sync(hdev, false);
913 
914 	hci_req_sync_unlock(hdev);
915 
916 	return err;
917 }
918 
919 static int hci_rfkill_set_block(void *data, bool blocked)
920 {
921 	struct hci_dev *hdev = data;
922 	int err;
923 
924 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
925 
926 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
927 		return -EBUSY;
928 
929 	if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
930 		return 0;
931 
932 	if (blocked) {
933 		hci_dev_set_flag(hdev, HCI_RFKILLED);
934 
935 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
936 		    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
937 			err = hci_dev_do_poweroff(hdev);
938 			if (err) {
939 				bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
940 					   err);
941 
942 				/* Make sure the device is still closed even if
943 				 * anything during power off sequence (eg.
944 				 * disconnecting devices) failed.
945 				 */
946 				hci_dev_do_close(hdev);
947 			}
948 		}
949 	} else {
950 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
951 	}
952 
953 	return 0;
954 }
955 
956 static const struct rfkill_ops hci_rfkill_ops = {
957 	.set_block = hci_rfkill_set_block,
958 };
959 
960 static void hci_power_on(struct work_struct *work)
961 {
962 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
963 	int err;
964 
965 	BT_DBG("%s", hdev->name);
966 
967 	if (test_bit(HCI_UP, &hdev->flags) &&
968 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
969 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
970 		cancel_delayed_work(&hdev->power_off);
971 		err = hci_powered_update_sync(hdev);
972 		mgmt_power_on(hdev, err);
973 		return;
974 	}
975 
976 	err = hci_dev_do_open(hdev);
977 	if (err < 0) {
978 		hci_dev_lock(hdev);
979 		mgmt_set_powered_failed(hdev, err);
980 		hci_dev_unlock(hdev);
981 		return;
982 	}
983 
984 	/* During the HCI setup phase, a few error conditions are
985 	 * ignored and they need to be checked now. If they are still
986 	 * valid, it is important to turn the device back off.
987 	 */
988 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
989 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
990 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
991 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
992 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
993 		hci_dev_do_close(hdev);
994 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
995 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
996 				   HCI_AUTO_OFF_TIMEOUT);
997 	}
998 
999 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1000 		/* For unconfigured devices, set the HCI_RAW flag
1001 		 * so that userspace can easily identify them.
1002 		 */
1003 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1004 			set_bit(HCI_RAW, &hdev->flags);
1005 
1006 		/* For fully configured devices, this will send
1007 		 * the Index Added event. For unconfigured devices,
1008 		 * it will send Unconfigued Index Added event.
1009 		 *
1010 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1011 		 * and no event will be send.
1012 		 */
1013 		mgmt_index_added(hdev);
1014 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1015 		/* When the controller is now configured, then it
1016 		 * is important to clear the HCI_RAW flag.
1017 		 */
1018 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1019 			clear_bit(HCI_RAW, &hdev->flags);
1020 
1021 		/* Powering on the controller with HCI_CONFIG set only
1022 		 * happens with the transition from unconfigured to
1023 		 * configured. This will send the Index Added event.
1024 		 */
1025 		mgmt_index_added(hdev);
1026 	}
1027 }
1028 
1029 static void hci_power_off(struct work_struct *work)
1030 {
1031 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1032 					    power_off.work);
1033 
1034 	BT_DBG("%s", hdev->name);
1035 
1036 	hci_dev_do_close(hdev);
1037 }
1038 
1039 static void hci_error_reset(struct work_struct *work)
1040 {
1041 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1042 
1043 	hci_dev_hold(hdev);
1044 	BT_DBG("%s", hdev->name);
1045 
1046 	if (hdev->hw_error)
1047 		hdev->hw_error(hdev, hdev->hw_error_code);
1048 	else
1049 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1050 
1051 	if (!hci_dev_do_close(hdev))
1052 		hci_dev_do_open(hdev);
1053 
1054 	hci_dev_put(hdev);
1055 }
1056 
1057 void hci_uuids_clear(struct hci_dev *hdev)
1058 {
1059 	struct bt_uuid *uuid, *tmp;
1060 
1061 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1062 		list_del(&uuid->list);
1063 		kfree(uuid);
1064 	}
1065 }
1066 
1067 void hci_link_keys_clear(struct hci_dev *hdev)
1068 {
1069 	struct link_key *key, *tmp;
1070 
1071 	list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1072 		list_del_rcu(&key->list);
1073 		kfree_rcu(key, rcu);
1074 	}
1075 }
1076 
1077 void hci_smp_ltks_clear(struct hci_dev *hdev)
1078 {
1079 	struct smp_ltk *k, *tmp;
1080 
1081 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1082 		list_del_rcu(&k->list);
1083 		kfree_rcu(k, rcu);
1084 	}
1085 }
1086 
1087 void hci_smp_irks_clear(struct hci_dev *hdev)
1088 {
1089 	struct smp_irk *k, *tmp;
1090 
1091 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1092 		list_del_rcu(&k->list);
1093 		kfree_rcu(k, rcu);
1094 	}
1095 }
1096 
1097 void hci_blocked_keys_clear(struct hci_dev *hdev)
1098 {
1099 	struct blocked_key *b, *tmp;
1100 
1101 	list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1102 		list_del_rcu(&b->list);
1103 		kfree_rcu(b, rcu);
1104 	}
1105 }
1106 
1107 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1108 {
1109 	bool blocked = false;
1110 	struct blocked_key *b;
1111 
1112 	rcu_read_lock();
1113 	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1114 		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1115 			blocked = true;
1116 			break;
1117 		}
1118 	}
1119 
1120 	rcu_read_unlock();
1121 	return blocked;
1122 }
1123 
1124 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1125 {
1126 	struct link_key *k;
1127 
1128 	rcu_read_lock();
1129 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1130 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
1131 			rcu_read_unlock();
1132 
1133 			if (hci_is_blocked_key(hdev,
1134 					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
1135 					       k->val)) {
1136 				bt_dev_warn_ratelimited(hdev,
1137 							"Link key blocked for %pMR",
1138 							&k->bdaddr);
1139 				return NULL;
1140 			}
1141 
1142 			return k;
1143 		}
1144 	}
1145 	rcu_read_unlock();
1146 
1147 	return NULL;
1148 }
1149 
1150 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1151 			       u8 key_type, u8 old_key_type)
1152 {
1153 	/* Legacy key */
1154 	if (key_type < 0x03)
1155 		return true;
1156 
1157 	/* Debug keys are insecure so don't store them persistently */
1158 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1159 		return false;
1160 
1161 	/* Changed combination key and there's no previous one */
1162 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1163 		return false;
1164 
1165 	/* Security mode 3 case */
1166 	if (!conn)
1167 		return true;
1168 
1169 	/* BR/EDR key derived using SC from an LE link */
1170 	if (conn->type == LE_LINK)
1171 		return true;
1172 
1173 	/* Neither local nor remote side had no-bonding as requirement */
1174 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1175 		return true;
1176 
1177 	/* Local side had dedicated bonding as requirement */
1178 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1179 		return true;
1180 
1181 	/* Remote side had dedicated bonding as requirement */
1182 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1183 		return true;
1184 
1185 	/* If none of the above criteria match, then don't store the key
1186 	 * persistently */
1187 	return false;
1188 }
1189 
1190 static u8 ltk_role(u8 type)
1191 {
1192 	if (type == SMP_LTK)
1193 		return HCI_ROLE_MASTER;
1194 
1195 	return HCI_ROLE_SLAVE;
1196 }
1197 
1198 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1199 			     u8 addr_type, u8 role)
1200 {
1201 	struct smp_ltk *k;
1202 
1203 	rcu_read_lock();
1204 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1205 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1206 			continue;
1207 
1208 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1209 			rcu_read_unlock();
1210 
1211 			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1212 					       k->val)) {
1213 				bt_dev_warn_ratelimited(hdev,
1214 							"LTK blocked for %pMR",
1215 							&k->bdaddr);
1216 				return NULL;
1217 			}
1218 
1219 			return k;
1220 		}
1221 	}
1222 	rcu_read_unlock();
1223 
1224 	return NULL;
1225 }
1226 
1227 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1228 {
1229 	struct smp_irk *irk_to_return = NULL;
1230 	struct smp_irk *irk;
1231 
1232 	rcu_read_lock();
1233 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1234 		if (!bacmp(&irk->rpa, rpa)) {
1235 			irk_to_return = irk;
1236 			goto done;
1237 		}
1238 	}
1239 
1240 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1241 		if (smp_irk_matches(hdev, irk->val, rpa)) {
1242 			bacpy(&irk->rpa, rpa);
1243 			irk_to_return = irk;
1244 			goto done;
1245 		}
1246 	}
1247 
1248 done:
1249 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1250 						irk_to_return->val)) {
1251 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1252 					&irk_to_return->bdaddr);
1253 		irk_to_return = NULL;
1254 	}
1255 
1256 	rcu_read_unlock();
1257 
1258 	return irk_to_return;
1259 }
1260 
1261 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1262 				     u8 addr_type)
1263 {
1264 	struct smp_irk *irk_to_return = NULL;
1265 	struct smp_irk *irk;
1266 
1267 	/* Identity Address must be public or static random */
1268 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1269 		return NULL;
1270 
1271 	rcu_read_lock();
1272 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1273 		if (addr_type == irk->addr_type &&
1274 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
1275 			irk_to_return = irk;
1276 			goto done;
1277 		}
1278 	}
1279 
1280 done:
1281 
1282 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1283 						irk_to_return->val)) {
1284 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1285 					&irk_to_return->bdaddr);
1286 		irk_to_return = NULL;
1287 	}
1288 
1289 	rcu_read_unlock();
1290 
1291 	return irk_to_return;
1292 }
1293 
1294 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1295 				  bdaddr_t *bdaddr, u8 *val, u8 type,
1296 				  u8 pin_len, bool *persistent)
1297 {
1298 	struct link_key *key, *old_key;
1299 	u8 old_key_type;
1300 
1301 	old_key = hci_find_link_key(hdev, bdaddr);
1302 	if (old_key) {
1303 		old_key_type = old_key->type;
1304 		key = old_key;
1305 	} else {
1306 		old_key_type = conn ? conn->key_type : 0xff;
1307 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1308 		if (!key)
1309 			return NULL;
1310 		list_add_rcu(&key->list, &hdev->link_keys);
1311 	}
1312 
1313 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1314 
1315 	/* Some buggy controller combinations generate a changed
1316 	 * combination key for legacy pairing even when there's no
1317 	 * previous key */
1318 	if (type == HCI_LK_CHANGED_COMBINATION &&
1319 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1320 		type = HCI_LK_COMBINATION;
1321 		if (conn)
1322 			conn->key_type = type;
1323 	}
1324 
1325 	bacpy(&key->bdaddr, bdaddr);
1326 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1327 	key->pin_len = pin_len;
1328 
1329 	if (type == HCI_LK_CHANGED_COMBINATION)
1330 		key->type = old_key_type;
1331 	else
1332 		key->type = type;
1333 
1334 	if (persistent)
1335 		*persistent = hci_persistent_key(hdev, conn, type,
1336 						 old_key_type);
1337 
1338 	return key;
1339 }
1340 
1341 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1342 			    u8 addr_type, u8 type, u8 authenticated,
1343 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1344 {
1345 	struct smp_ltk *key, *old_key;
1346 	u8 role = ltk_role(type);
1347 
1348 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1349 	if (old_key)
1350 		key = old_key;
1351 	else {
1352 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1353 		if (!key)
1354 			return NULL;
1355 		list_add_rcu(&key->list, &hdev->long_term_keys);
1356 	}
1357 
1358 	bacpy(&key->bdaddr, bdaddr);
1359 	key->bdaddr_type = addr_type;
1360 	memcpy(key->val, tk, sizeof(key->val));
1361 	key->authenticated = authenticated;
1362 	key->ediv = ediv;
1363 	key->rand = rand;
1364 	key->enc_size = enc_size;
1365 	key->type = type;
1366 
1367 	return key;
1368 }
1369 
1370 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1371 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
1372 {
1373 	struct smp_irk *irk;
1374 
1375 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1376 	if (!irk) {
1377 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1378 		if (!irk)
1379 			return NULL;
1380 
1381 		bacpy(&irk->bdaddr, bdaddr);
1382 		irk->addr_type = addr_type;
1383 
1384 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1385 	}
1386 
1387 	memcpy(irk->val, val, 16);
1388 	bacpy(&irk->rpa, rpa);
1389 
1390 	return irk;
1391 }
1392 
1393 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1394 {
1395 	struct link_key *key;
1396 
1397 	key = hci_find_link_key(hdev, bdaddr);
1398 	if (!key)
1399 		return -ENOENT;
1400 
1401 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1402 
1403 	list_del_rcu(&key->list);
1404 	kfree_rcu(key, rcu);
1405 
1406 	return 0;
1407 }
1408 
1409 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1410 {
1411 	struct smp_ltk *k, *tmp;
1412 	int removed = 0;
1413 
1414 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1415 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1416 			continue;
1417 
1418 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1419 
1420 		list_del_rcu(&k->list);
1421 		kfree_rcu(k, rcu);
1422 		removed++;
1423 	}
1424 
1425 	return removed ? 0 : -ENOENT;
1426 }
1427 
1428 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1429 {
1430 	struct smp_irk *k, *tmp;
1431 
1432 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1433 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1434 			continue;
1435 
1436 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1437 
1438 		list_del_rcu(&k->list);
1439 		kfree_rcu(k, rcu);
1440 	}
1441 }
1442 
1443 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1444 {
1445 	struct smp_ltk *k;
1446 	struct smp_irk *irk;
1447 	u8 addr_type;
1448 
1449 	if (type == BDADDR_BREDR) {
1450 		if (hci_find_link_key(hdev, bdaddr))
1451 			return true;
1452 		return false;
1453 	}
1454 
1455 	/* Convert to HCI addr type which struct smp_ltk uses */
1456 	if (type == BDADDR_LE_PUBLIC)
1457 		addr_type = ADDR_LE_DEV_PUBLIC;
1458 	else
1459 		addr_type = ADDR_LE_DEV_RANDOM;
1460 
1461 	irk = hci_get_irk(hdev, bdaddr, addr_type);
1462 	if (irk) {
1463 		bdaddr = &irk->bdaddr;
1464 		addr_type = irk->addr_type;
1465 	}
1466 
1467 	rcu_read_lock();
1468 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1469 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1470 			rcu_read_unlock();
1471 			return true;
1472 		}
1473 	}
1474 	rcu_read_unlock();
1475 
1476 	return false;
1477 }
1478 
1479 /* HCI command timer function */
1480 static void hci_cmd_timeout(struct work_struct *work)
1481 {
1482 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1483 					    cmd_timer.work);
1484 
1485 	if (hdev->req_skb) {
1486 		u16 opcode = hci_skb_opcode(hdev->req_skb);
1487 
1488 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1489 
1490 		hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1491 	} else {
1492 		bt_dev_err(hdev, "command tx timeout");
1493 	}
1494 
1495 	if (hdev->cmd_timeout)
1496 		hdev->cmd_timeout(hdev);
1497 
1498 	atomic_set(&hdev->cmd_cnt, 1);
1499 	queue_work(hdev->workqueue, &hdev->cmd_work);
1500 }
1501 
1502 /* HCI ncmd timer function */
1503 static void hci_ncmd_timeout(struct work_struct *work)
1504 {
1505 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1506 					    ncmd_timer.work);
1507 
1508 	bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1509 
1510 	/* During HCI_INIT phase no events can be injected if the ncmd timer
1511 	 * triggers since the procedure has its own timeout handling.
1512 	 */
1513 	if (test_bit(HCI_INIT, &hdev->flags))
1514 		return;
1515 
1516 	/* This is an irrecoverable state, inject hardware error event */
1517 	hci_reset_dev(hdev);
1518 }
1519 
1520 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1521 					  bdaddr_t *bdaddr, u8 bdaddr_type)
1522 {
1523 	struct oob_data *data;
1524 
1525 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
1526 		if (bacmp(bdaddr, &data->bdaddr) != 0)
1527 			continue;
1528 		if (data->bdaddr_type != bdaddr_type)
1529 			continue;
1530 		return data;
1531 	}
1532 
1533 	return NULL;
1534 }
1535 
1536 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1537 			       u8 bdaddr_type)
1538 {
1539 	struct oob_data *data;
1540 
1541 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1542 	if (!data)
1543 		return -ENOENT;
1544 
1545 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1546 
1547 	list_del(&data->list);
1548 	kfree(data);
1549 
1550 	return 0;
1551 }
1552 
1553 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1554 {
1555 	struct oob_data *data, *n;
1556 
1557 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1558 		list_del(&data->list);
1559 		kfree(data);
1560 	}
1561 }
1562 
1563 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1564 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1565 			    u8 *hash256, u8 *rand256)
1566 {
1567 	struct oob_data *data;
1568 
1569 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1570 	if (!data) {
1571 		data = kmalloc(sizeof(*data), GFP_KERNEL);
1572 		if (!data)
1573 			return -ENOMEM;
1574 
1575 		bacpy(&data->bdaddr, bdaddr);
1576 		data->bdaddr_type = bdaddr_type;
1577 		list_add(&data->list, &hdev->remote_oob_data);
1578 	}
1579 
1580 	if (hash192 && rand192) {
1581 		memcpy(data->hash192, hash192, sizeof(data->hash192));
1582 		memcpy(data->rand192, rand192, sizeof(data->rand192));
1583 		if (hash256 && rand256)
1584 			data->present = 0x03;
1585 	} else {
1586 		memset(data->hash192, 0, sizeof(data->hash192));
1587 		memset(data->rand192, 0, sizeof(data->rand192));
1588 		if (hash256 && rand256)
1589 			data->present = 0x02;
1590 		else
1591 			data->present = 0x00;
1592 	}
1593 
1594 	if (hash256 && rand256) {
1595 		memcpy(data->hash256, hash256, sizeof(data->hash256));
1596 		memcpy(data->rand256, rand256, sizeof(data->rand256));
1597 	} else {
1598 		memset(data->hash256, 0, sizeof(data->hash256));
1599 		memset(data->rand256, 0, sizeof(data->rand256));
1600 		if (hash192 && rand192)
1601 			data->present = 0x01;
1602 	}
1603 
1604 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1605 
1606 	return 0;
1607 }
1608 
1609 /* This function requires the caller holds hdev->lock */
1610 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1611 {
1612 	struct adv_info *adv_instance;
1613 
1614 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1615 		if (adv_instance->instance == instance)
1616 			return adv_instance;
1617 	}
1618 
1619 	return NULL;
1620 }
1621 
1622 /* This function requires the caller holds hdev->lock */
1623 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1624 {
1625 	struct adv_info *cur_instance;
1626 
1627 	cur_instance = hci_find_adv_instance(hdev, instance);
1628 	if (!cur_instance)
1629 		return NULL;
1630 
1631 	if (cur_instance == list_last_entry(&hdev->adv_instances,
1632 					    struct adv_info, list))
1633 		return list_first_entry(&hdev->adv_instances,
1634 						 struct adv_info, list);
1635 	else
1636 		return list_next_entry(cur_instance, list);
1637 }
1638 
1639 /* This function requires the caller holds hdev->lock */
1640 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1641 {
1642 	struct adv_info *adv_instance;
1643 
1644 	adv_instance = hci_find_adv_instance(hdev, instance);
1645 	if (!adv_instance)
1646 		return -ENOENT;
1647 
1648 	BT_DBG("%s removing %dMR", hdev->name, instance);
1649 
1650 	if (hdev->cur_adv_instance == instance) {
1651 		if (hdev->adv_instance_timeout) {
1652 			cancel_delayed_work(&hdev->adv_instance_expire);
1653 			hdev->adv_instance_timeout = 0;
1654 		}
1655 		hdev->cur_adv_instance = 0x00;
1656 	}
1657 
1658 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1659 
1660 	list_del(&adv_instance->list);
1661 	kfree(adv_instance);
1662 
1663 	hdev->adv_instance_cnt--;
1664 
1665 	return 0;
1666 }
1667 
1668 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1669 {
1670 	struct adv_info *adv_instance, *n;
1671 
1672 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1673 		adv_instance->rpa_expired = rpa_expired;
1674 }
1675 
1676 /* This function requires the caller holds hdev->lock */
1677 void hci_adv_instances_clear(struct hci_dev *hdev)
1678 {
1679 	struct adv_info *adv_instance, *n;
1680 
1681 	if (hdev->adv_instance_timeout) {
1682 		cancel_delayed_work(&hdev->adv_instance_expire);
1683 		hdev->adv_instance_timeout = 0;
1684 	}
1685 
1686 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1687 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1688 		list_del(&adv_instance->list);
1689 		kfree(adv_instance);
1690 	}
1691 
1692 	hdev->adv_instance_cnt = 0;
1693 	hdev->cur_adv_instance = 0x00;
1694 }
1695 
1696 static void adv_instance_rpa_expired(struct work_struct *work)
1697 {
1698 	struct adv_info *adv_instance = container_of(work, struct adv_info,
1699 						     rpa_expired_cb.work);
1700 
1701 	BT_DBG("");
1702 
1703 	adv_instance->rpa_expired = true;
1704 }
1705 
1706 /* This function requires the caller holds hdev->lock */
1707 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1708 				      u32 flags, u16 adv_data_len, u8 *adv_data,
1709 				      u16 scan_rsp_len, u8 *scan_rsp_data,
1710 				      u16 timeout, u16 duration, s8 tx_power,
1711 				      u32 min_interval, u32 max_interval,
1712 				      u8 mesh_handle)
1713 {
1714 	struct adv_info *adv;
1715 
1716 	adv = hci_find_adv_instance(hdev, instance);
1717 	if (adv) {
1718 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1719 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1720 		memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1721 	} else {
1722 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1723 		    instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1724 			return ERR_PTR(-EOVERFLOW);
1725 
1726 		adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1727 		if (!adv)
1728 			return ERR_PTR(-ENOMEM);
1729 
1730 		adv->pending = true;
1731 		adv->instance = instance;
1732 
1733 		/* If controller support only one set and the instance is set to
1734 		 * 1 then there is no option other than using handle 0x00.
1735 		 */
1736 		if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1737 			adv->handle = 0x00;
1738 		else
1739 			adv->handle = instance;
1740 
1741 		list_add(&adv->list, &hdev->adv_instances);
1742 		hdev->adv_instance_cnt++;
1743 	}
1744 
1745 	adv->flags = flags;
1746 	adv->min_interval = min_interval;
1747 	adv->max_interval = max_interval;
1748 	adv->tx_power = tx_power;
1749 	/* Defining a mesh_handle changes the timing units to ms,
1750 	 * rather than seconds, and ties the instance to the requested
1751 	 * mesh_tx queue.
1752 	 */
1753 	adv->mesh = mesh_handle;
1754 
1755 	hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1756 				  scan_rsp_len, scan_rsp_data);
1757 
1758 	adv->timeout = timeout;
1759 	adv->remaining_time = timeout;
1760 
1761 	if (duration == 0)
1762 		adv->duration = hdev->def_multi_adv_rotation_duration;
1763 	else
1764 		adv->duration = duration;
1765 
1766 	INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1767 
1768 	BT_DBG("%s for %dMR", hdev->name, instance);
1769 
1770 	return adv;
1771 }
1772 
1773 /* This function requires the caller holds hdev->lock */
1774 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1775 				      u32 flags, u8 data_len, u8 *data,
1776 				      u32 min_interval, u32 max_interval)
1777 {
1778 	struct adv_info *adv;
1779 
1780 	adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1781 				   0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1782 				   min_interval, max_interval, 0);
1783 	if (IS_ERR(adv))
1784 		return adv;
1785 
1786 	adv->periodic = true;
1787 	adv->per_adv_data_len = data_len;
1788 
1789 	if (data)
1790 		memcpy(adv->per_adv_data, data, data_len);
1791 
1792 	return adv;
1793 }
1794 
1795 /* This function requires the caller holds hdev->lock */
1796 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1797 			      u16 adv_data_len, u8 *adv_data,
1798 			      u16 scan_rsp_len, u8 *scan_rsp_data)
1799 {
1800 	struct adv_info *adv;
1801 
1802 	adv = hci_find_adv_instance(hdev, instance);
1803 
1804 	/* If advertisement doesn't exist, we can't modify its data */
1805 	if (!adv)
1806 		return -ENOENT;
1807 
1808 	if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1809 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1810 		memcpy(adv->adv_data, adv_data, adv_data_len);
1811 		adv->adv_data_len = adv_data_len;
1812 		adv->adv_data_changed = true;
1813 	}
1814 
1815 	if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1816 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1817 		memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1818 		adv->scan_rsp_len = scan_rsp_len;
1819 		adv->scan_rsp_changed = true;
1820 	}
1821 
1822 	/* Mark as changed if there are flags which would affect it */
1823 	if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1824 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1825 		adv->scan_rsp_changed = true;
1826 
1827 	return 0;
1828 }
1829 
1830 /* This function requires the caller holds hdev->lock */
1831 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1832 {
1833 	u32 flags;
1834 	struct adv_info *adv;
1835 
1836 	if (instance == 0x00) {
1837 		/* Instance 0 always manages the "Tx Power" and "Flags"
1838 		 * fields
1839 		 */
1840 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1841 
1842 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1843 		 * corresponds to the "connectable" instance flag.
1844 		 */
1845 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1846 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1847 
1848 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1849 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1850 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1851 			flags |= MGMT_ADV_FLAG_DISCOV;
1852 
1853 		return flags;
1854 	}
1855 
1856 	adv = hci_find_adv_instance(hdev, instance);
1857 
1858 	/* Return 0 when we got an invalid instance identifier. */
1859 	if (!adv)
1860 		return 0;
1861 
1862 	return adv->flags;
1863 }
1864 
1865 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1866 {
1867 	struct adv_info *adv;
1868 
1869 	/* Instance 0x00 always set local name */
1870 	if (instance == 0x00)
1871 		return true;
1872 
1873 	adv = hci_find_adv_instance(hdev, instance);
1874 	if (!adv)
1875 		return false;
1876 
1877 	if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1878 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1879 		return true;
1880 
1881 	return adv->scan_rsp_len ? true : false;
1882 }
1883 
1884 /* This function requires the caller holds hdev->lock */
1885 void hci_adv_monitors_clear(struct hci_dev *hdev)
1886 {
1887 	struct adv_monitor *monitor;
1888 	int handle;
1889 
1890 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1891 		hci_free_adv_monitor(hdev, monitor);
1892 
1893 	idr_destroy(&hdev->adv_monitors_idr);
1894 }
1895 
1896 /* Frees the monitor structure and do some bookkeepings.
1897  * This function requires the caller holds hdev->lock.
1898  */
1899 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1900 {
1901 	struct adv_pattern *pattern;
1902 	struct adv_pattern *tmp;
1903 
1904 	if (!monitor)
1905 		return;
1906 
1907 	list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1908 		list_del(&pattern->list);
1909 		kfree(pattern);
1910 	}
1911 
1912 	if (monitor->handle)
1913 		idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1914 
1915 	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1916 		hdev->adv_monitors_cnt--;
1917 		mgmt_adv_monitor_removed(hdev, monitor->handle);
1918 	}
1919 
1920 	kfree(monitor);
1921 }
1922 
1923 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1924  * also attempts to forward the request to the controller.
1925  * This function requires the caller holds hci_req_sync_lock.
1926  */
1927 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1928 {
1929 	int min, max, handle;
1930 	int status = 0;
1931 
1932 	if (!monitor)
1933 		return -EINVAL;
1934 
1935 	hci_dev_lock(hdev);
1936 
1937 	min = HCI_MIN_ADV_MONITOR_HANDLE;
1938 	max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1939 	handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1940 			   GFP_KERNEL);
1941 
1942 	hci_dev_unlock(hdev);
1943 
1944 	if (handle < 0)
1945 		return handle;
1946 
1947 	monitor->handle = handle;
1948 
1949 	if (!hdev_is_powered(hdev))
1950 		return status;
1951 
1952 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1953 	case HCI_ADV_MONITOR_EXT_NONE:
1954 		bt_dev_dbg(hdev, "add monitor %d status %d",
1955 			   monitor->handle, status);
1956 		/* Message was not forwarded to controller - not an error */
1957 		break;
1958 
1959 	case HCI_ADV_MONITOR_EXT_MSFT:
1960 		status = msft_add_monitor_pattern(hdev, monitor);
1961 		bt_dev_dbg(hdev, "add monitor %d msft status %d",
1962 			   handle, status);
1963 		break;
1964 	}
1965 
1966 	return status;
1967 }
1968 
1969 /* Attempts to tell the controller and free the monitor. If somehow the
1970  * controller doesn't have a corresponding handle, remove anyway.
1971  * This function requires the caller holds hci_req_sync_lock.
1972  */
1973 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1974 				  struct adv_monitor *monitor)
1975 {
1976 	int status = 0;
1977 	int handle;
1978 
1979 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1980 	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1981 		bt_dev_dbg(hdev, "remove monitor %d status %d",
1982 			   monitor->handle, status);
1983 		goto free_monitor;
1984 
1985 	case HCI_ADV_MONITOR_EXT_MSFT:
1986 		handle = monitor->handle;
1987 		status = msft_remove_monitor(hdev, monitor);
1988 		bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1989 			   handle, status);
1990 		break;
1991 	}
1992 
1993 	/* In case no matching handle registered, just free the monitor */
1994 	if (status == -ENOENT)
1995 		goto free_monitor;
1996 
1997 	return status;
1998 
1999 free_monitor:
2000 	if (status == -ENOENT)
2001 		bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2002 			    monitor->handle);
2003 	hci_free_adv_monitor(hdev, monitor);
2004 
2005 	return status;
2006 }
2007 
2008 /* This function requires the caller holds hci_req_sync_lock */
2009 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2010 {
2011 	struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2012 
2013 	if (!monitor)
2014 		return -EINVAL;
2015 
2016 	return hci_remove_adv_monitor(hdev, monitor);
2017 }
2018 
2019 /* This function requires the caller holds hci_req_sync_lock */
2020 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2021 {
2022 	struct adv_monitor *monitor;
2023 	int idr_next_id = 0;
2024 	int status = 0;
2025 
2026 	while (1) {
2027 		monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2028 		if (!monitor)
2029 			break;
2030 
2031 		status = hci_remove_adv_monitor(hdev, monitor);
2032 		if (status)
2033 			return status;
2034 
2035 		idr_next_id++;
2036 	}
2037 
2038 	return status;
2039 }
2040 
2041 /* This function requires the caller holds hdev->lock */
2042 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2043 {
2044 	return !idr_is_empty(&hdev->adv_monitors_idr);
2045 }
2046 
2047 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2048 {
2049 	if (msft_monitor_supported(hdev))
2050 		return HCI_ADV_MONITOR_EXT_MSFT;
2051 
2052 	return HCI_ADV_MONITOR_EXT_NONE;
2053 }
2054 
2055 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2056 					 bdaddr_t *bdaddr, u8 type)
2057 {
2058 	struct bdaddr_list *b;
2059 
2060 	list_for_each_entry(b, bdaddr_list, list) {
2061 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2062 			return b;
2063 	}
2064 
2065 	return NULL;
2066 }
2067 
2068 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2069 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2070 				u8 type)
2071 {
2072 	struct bdaddr_list_with_irk *b;
2073 
2074 	list_for_each_entry(b, bdaddr_list, list) {
2075 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2076 			return b;
2077 	}
2078 
2079 	return NULL;
2080 }
2081 
2082 struct bdaddr_list_with_flags *
2083 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2084 				  bdaddr_t *bdaddr, u8 type)
2085 {
2086 	struct bdaddr_list_with_flags *b;
2087 
2088 	list_for_each_entry(b, bdaddr_list, list) {
2089 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2090 			return b;
2091 	}
2092 
2093 	return NULL;
2094 }
2095 
2096 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2097 {
2098 	struct bdaddr_list *b, *n;
2099 
2100 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2101 		list_del(&b->list);
2102 		kfree(b);
2103 	}
2104 }
2105 
2106 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2107 {
2108 	struct bdaddr_list *entry;
2109 
2110 	if (!bacmp(bdaddr, BDADDR_ANY))
2111 		return -EBADF;
2112 
2113 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2114 		return -EEXIST;
2115 
2116 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2117 	if (!entry)
2118 		return -ENOMEM;
2119 
2120 	bacpy(&entry->bdaddr, bdaddr);
2121 	entry->bdaddr_type = type;
2122 
2123 	list_add(&entry->list, list);
2124 
2125 	return 0;
2126 }
2127 
2128 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2129 					u8 type, u8 *peer_irk, u8 *local_irk)
2130 {
2131 	struct bdaddr_list_with_irk *entry;
2132 
2133 	if (!bacmp(bdaddr, BDADDR_ANY))
2134 		return -EBADF;
2135 
2136 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2137 		return -EEXIST;
2138 
2139 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2140 	if (!entry)
2141 		return -ENOMEM;
2142 
2143 	bacpy(&entry->bdaddr, bdaddr);
2144 	entry->bdaddr_type = type;
2145 
2146 	if (peer_irk)
2147 		memcpy(entry->peer_irk, peer_irk, 16);
2148 
2149 	if (local_irk)
2150 		memcpy(entry->local_irk, local_irk, 16);
2151 
2152 	list_add(&entry->list, list);
2153 
2154 	return 0;
2155 }
2156 
2157 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2158 				   u8 type, u32 flags)
2159 {
2160 	struct bdaddr_list_with_flags *entry;
2161 
2162 	if (!bacmp(bdaddr, BDADDR_ANY))
2163 		return -EBADF;
2164 
2165 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2166 		return -EEXIST;
2167 
2168 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2169 	if (!entry)
2170 		return -ENOMEM;
2171 
2172 	bacpy(&entry->bdaddr, bdaddr);
2173 	entry->bdaddr_type = type;
2174 	entry->flags = flags;
2175 
2176 	list_add(&entry->list, list);
2177 
2178 	return 0;
2179 }
2180 
2181 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2182 {
2183 	struct bdaddr_list *entry;
2184 
2185 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2186 		hci_bdaddr_list_clear(list);
2187 		return 0;
2188 	}
2189 
2190 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2191 	if (!entry)
2192 		return -ENOENT;
2193 
2194 	list_del(&entry->list);
2195 	kfree(entry);
2196 
2197 	return 0;
2198 }
2199 
2200 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2201 							u8 type)
2202 {
2203 	struct bdaddr_list_with_irk *entry;
2204 
2205 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2206 		hci_bdaddr_list_clear(list);
2207 		return 0;
2208 	}
2209 
2210 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2211 	if (!entry)
2212 		return -ENOENT;
2213 
2214 	list_del(&entry->list);
2215 	kfree(entry);
2216 
2217 	return 0;
2218 }
2219 
2220 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2221 				   u8 type)
2222 {
2223 	struct bdaddr_list_with_flags *entry;
2224 
2225 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2226 		hci_bdaddr_list_clear(list);
2227 		return 0;
2228 	}
2229 
2230 	entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2231 	if (!entry)
2232 		return -ENOENT;
2233 
2234 	list_del(&entry->list);
2235 	kfree(entry);
2236 
2237 	return 0;
2238 }
2239 
2240 /* This function requires the caller holds hdev->lock */
2241 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2242 					       bdaddr_t *addr, u8 addr_type)
2243 {
2244 	struct hci_conn_params *params;
2245 
2246 	list_for_each_entry(params, &hdev->le_conn_params, list) {
2247 		if (bacmp(&params->addr, addr) == 0 &&
2248 		    params->addr_type == addr_type) {
2249 			return params;
2250 		}
2251 	}
2252 
2253 	return NULL;
2254 }
2255 
2256 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2257 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2258 						  bdaddr_t *addr, u8 addr_type)
2259 {
2260 	struct hci_conn_params *param;
2261 
2262 	rcu_read_lock();
2263 
2264 	list_for_each_entry_rcu(param, list, action) {
2265 		if (bacmp(&param->addr, addr) == 0 &&
2266 		    param->addr_type == addr_type) {
2267 			rcu_read_unlock();
2268 			return param;
2269 		}
2270 	}
2271 
2272 	rcu_read_unlock();
2273 
2274 	return NULL;
2275 }
2276 
2277 /* This function requires the caller holds hdev->lock */
2278 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2279 {
2280 	if (list_empty(&param->action))
2281 		return;
2282 
2283 	list_del_rcu(&param->action);
2284 	synchronize_rcu();
2285 	INIT_LIST_HEAD(&param->action);
2286 }
2287 
2288 /* This function requires the caller holds hdev->lock */
2289 void hci_pend_le_list_add(struct hci_conn_params *param,
2290 			  struct list_head *list)
2291 {
2292 	list_add_rcu(&param->action, list);
2293 }
2294 
2295 /* This function requires the caller holds hdev->lock */
2296 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2297 					    bdaddr_t *addr, u8 addr_type)
2298 {
2299 	struct hci_conn_params *params;
2300 
2301 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2302 	if (params)
2303 		return params;
2304 
2305 	params = kzalloc(sizeof(*params), GFP_KERNEL);
2306 	if (!params) {
2307 		bt_dev_err(hdev, "out of memory");
2308 		return NULL;
2309 	}
2310 
2311 	bacpy(&params->addr, addr);
2312 	params->addr_type = addr_type;
2313 
2314 	list_add(&params->list, &hdev->le_conn_params);
2315 	INIT_LIST_HEAD(&params->action);
2316 
2317 	params->conn_min_interval = hdev->le_conn_min_interval;
2318 	params->conn_max_interval = hdev->le_conn_max_interval;
2319 	params->conn_latency = hdev->le_conn_latency;
2320 	params->supervision_timeout = hdev->le_supv_timeout;
2321 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2322 
2323 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2324 
2325 	return params;
2326 }
2327 
2328 void hci_conn_params_free(struct hci_conn_params *params)
2329 {
2330 	hci_pend_le_list_del_init(params);
2331 
2332 	if (params->conn) {
2333 		hci_conn_drop(params->conn);
2334 		hci_conn_put(params->conn);
2335 	}
2336 
2337 	list_del(&params->list);
2338 	kfree(params);
2339 }
2340 
2341 /* This function requires the caller holds hdev->lock */
2342 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2343 {
2344 	struct hci_conn_params *params;
2345 
2346 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2347 	if (!params)
2348 		return;
2349 
2350 	hci_conn_params_free(params);
2351 
2352 	hci_update_passive_scan(hdev);
2353 
2354 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2355 }
2356 
2357 /* This function requires the caller holds hdev->lock */
2358 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2359 {
2360 	struct hci_conn_params *params, *tmp;
2361 
2362 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2363 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2364 			continue;
2365 
2366 		/* If trying to establish one time connection to disabled
2367 		 * device, leave the params, but mark them as just once.
2368 		 */
2369 		if (params->explicit_connect) {
2370 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2371 			continue;
2372 		}
2373 
2374 		hci_conn_params_free(params);
2375 	}
2376 
2377 	BT_DBG("All LE disabled connection parameters were removed");
2378 }
2379 
2380 /* This function requires the caller holds hdev->lock */
2381 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2382 {
2383 	struct hci_conn_params *params, *tmp;
2384 
2385 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2386 		hci_conn_params_free(params);
2387 
2388 	BT_DBG("All LE connection parameters were removed");
2389 }
2390 
2391 /* Copy the Identity Address of the controller.
2392  *
2393  * If the controller has a public BD_ADDR, then by default use that one.
2394  * If this is a LE only controller without a public address, default to
2395  * the static random address.
2396  *
2397  * For debugging purposes it is possible to force controllers with a
2398  * public address to use the static random address instead.
2399  *
2400  * In case BR/EDR has been disabled on a dual-mode controller and
2401  * userspace has configured a static address, then that address
2402  * becomes the identity address instead of the public BR/EDR address.
2403  */
2404 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2405 			       u8 *bdaddr_type)
2406 {
2407 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2408 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2409 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2410 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2411 		bacpy(bdaddr, &hdev->static_addr);
2412 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
2413 	} else {
2414 		bacpy(bdaddr, &hdev->bdaddr);
2415 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
2416 	}
2417 }
2418 
2419 static void hci_clear_wake_reason(struct hci_dev *hdev)
2420 {
2421 	hci_dev_lock(hdev);
2422 
2423 	hdev->wake_reason = 0;
2424 	bacpy(&hdev->wake_addr, BDADDR_ANY);
2425 	hdev->wake_addr_type = 0;
2426 
2427 	hci_dev_unlock(hdev);
2428 }
2429 
2430 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2431 				void *data)
2432 {
2433 	struct hci_dev *hdev =
2434 		container_of(nb, struct hci_dev, suspend_notifier);
2435 	int ret = 0;
2436 
2437 	/* Userspace has full control of this device. Do nothing. */
2438 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2439 		return NOTIFY_DONE;
2440 
2441 	/* To avoid a potential race with hci_unregister_dev. */
2442 	hci_dev_hold(hdev);
2443 
2444 	if (action == PM_SUSPEND_PREPARE)
2445 		ret = hci_suspend_dev(hdev);
2446 	else if (action == PM_POST_SUSPEND)
2447 		ret = hci_resume_dev(hdev);
2448 
2449 	if (ret)
2450 		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2451 			   action, ret);
2452 
2453 	hci_dev_put(hdev);
2454 	return NOTIFY_DONE;
2455 }
2456 
2457 /* Alloc HCI device */
2458 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2459 {
2460 	struct hci_dev *hdev;
2461 	unsigned int alloc_size;
2462 
2463 	alloc_size = sizeof(*hdev);
2464 	if (sizeof_priv) {
2465 		/* Fixme: May need ALIGN-ment? */
2466 		alloc_size += sizeof_priv;
2467 	}
2468 
2469 	hdev = kzalloc(alloc_size, GFP_KERNEL);
2470 	if (!hdev)
2471 		return NULL;
2472 
2473 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2474 	hdev->esco_type = (ESCO_HV1);
2475 	hdev->link_mode = (HCI_LM_ACCEPT);
2476 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
2477 	hdev->io_capability = 0x03;	/* No Input No Output */
2478 	hdev->manufacturer = 0xffff;	/* Default to internal use */
2479 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2480 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2481 	hdev->adv_instance_cnt = 0;
2482 	hdev->cur_adv_instance = 0x00;
2483 	hdev->adv_instance_timeout = 0;
2484 
2485 	hdev->advmon_allowlist_duration = 300;
2486 	hdev->advmon_no_filter_duration = 500;
2487 	hdev->enable_advmon_interleave_scan = 0x00;	/* Default to disable */
2488 
2489 	hdev->sniff_max_interval = 800;
2490 	hdev->sniff_min_interval = 80;
2491 
2492 	hdev->le_adv_channel_map = 0x07;
2493 	hdev->le_adv_min_interval = 0x0800;
2494 	hdev->le_adv_max_interval = 0x0800;
2495 	hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2496 	hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2497 	hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2498 	hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2499 	hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2500 	hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2501 	hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2502 	hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2503 	hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2504 	hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2505 	hdev->le_conn_min_interval = 0x0018;
2506 	hdev->le_conn_max_interval = 0x0028;
2507 	hdev->le_conn_latency = 0x0000;
2508 	hdev->le_supv_timeout = 0x002a;
2509 	hdev->le_def_tx_len = 0x001b;
2510 	hdev->le_def_tx_time = 0x0148;
2511 	hdev->le_max_tx_len = 0x001b;
2512 	hdev->le_max_tx_time = 0x0148;
2513 	hdev->le_max_rx_len = 0x001b;
2514 	hdev->le_max_rx_time = 0x0148;
2515 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2516 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2517 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2518 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2519 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2520 	hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2521 	hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2522 	hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2523 	hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2524 
2525 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2526 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2527 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2528 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2529 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2530 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2531 
2532 	/* default 1.28 sec page scan */
2533 	hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2534 	hdev->def_page_scan_int = 0x0800;
2535 	hdev->def_page_scan_window = 0x0012;
2536 
2537 	mutex_init(&hdev->lock);
2538 	mutex_init(&hdev->req_lock);
2539 
2540 	ida_init(&hdev->unset_handle_ida);
2541 
2542 	INIT_LIST_HEAD(&hdev->mesh_pending);
2543 	INIT_LIST_HEAD(&hdev->mgmt_pending);
2544 	INIT_LIST_HEAD(&hdev->reject_list);
2545 	INIT_LIST_HEAD(&hdev->accept_list);
2546 	INIT_LIST_HEAD(&hdev->uuids);
2547 	INIT_LIST_HEAD(&hdev->link_keys);
2548 	INIT_LIST_HEAD(&hdev->long_term_keys);
2549 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2550 	INIT_LIST_HEAD(&hdev->remote_oob_data);
2551 	INIT_LIST_HEAD(&hdev->le_accept_list);
2552 	INIT_LIST_HEAD(&hdev->le_resolv_list);
2553 	INIT_LIST_HEAD(&hdev->le_conn_params);
2554 	INIT_LIST_HEAD(&hdev->pend_le_conns);
2555 	INIT_LIST_HEAD(&hdev->pend_le_reports);
2556 	INIT_LIST_HEAD(&hdev->conn_hash.list);
2557 	INIT_LIST_HEAD(&hdev->adv_instances);
2558 	INIT_LIST_HEAD(&hdev->blocked_keys);
2559 	INIT_LIST_HEAD(&hdev->monitored_devices);
2560 
2561 	INIT_LIST_HEAD(&hdev->local_codecs);
2562 	INIT_WORK(&hdev->rx_work, hci_rx_work);
2563 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2564 	INIT_WORK(&hdev->tx_work, hci_tx_work);
2565 	INIT_WORK(&hdev->power_on, hci_power_on);
2566 	INIT_WORK(&hdev->error_reset, hci_error_reset);
2567 
2568 	hci_cmd_sync_init(hdev);
2569 
2570 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2571 
2572 	skb_queue_head_init(&hdev->rx_q);
2573 	skb_queue_head_init(&hdev->cmd_q);
2574 	skb_queue_head_init(&hdev->raw_q);
2575 
2576 	init_waitqueue_head(&hdev->req_wait_q);
2577 
2578 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2579 	INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2580 
2581 	hci_devcd_setup(hdev);
2582 	hci_request_setup(hdev);
2583 
2584 	hci_init_sysfs(hdev);
2585 	discovery_init(hdev);
2586 
2587 	return hdev;
2588 }
2589 EXPORT_SYMBOL(hci_alloc_dev_priv);
2590 
2591 /* Free HCI device */
2592 void hci_free_dev(struct hci_dev *hdev)
2593 {
2594 	/* will free via device release */
2595 	put_device(&hdev->dev);
2596 }
2597 EXPORT_SYMBOL(hci_free_dev);
2598 
2599 /* Register HCI device */
2600 int hci_register_dev(struct hci_dev *hdev)
2601 {
2602 	int id, error;
2603 
2604 	if (!hdev->open || !hdev->close || !hdev->send)
2605 		return -EINVAL;
2606 
2607 	id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2608 	if (id < 0)
2609 		return id;
2610 
2611 	error = dev_set_name(&hdev->dev, "hci%u", id);
2612 	if (error)
2613 		return error;
2614 
2615 	hdev->name = dev_name(&hdev->dev);
2616 	hdev->id = id;
2617 
2618 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2619 
2620 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2621 	if (!hdev->workqueue) {
2622 		error = -ENOMEM;
2623 		goto err;
2624 	}
2625 
2626 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2627 						      hdev->name);
2628 	if (!hdev->req_workqueue) {
2629 		destroy_workqueue(hdev->workqueue);
2630 		error = -ENOMEM;
2631 		goto err;
2632 	}
2633 
2634 	if (!IS_ERR_OR_NULL(bt_debugfs))
2635 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2636 
2637 	error = device_add(&hdev->dev);
2638 	if (error < 0)
2639 		goto err_wqueue;
2640 
2641 	hci_leds_init(hdev);
2642 
2643 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2644 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2645 				    hdev);
2646 	if (hdev->rfkill) {
2647 		if (rfkill_register(hdev->rfkill) < 0) {
2648 			rfkill_destroy(hdev->rfkill);
2649 			hdev->rfkill = NULL;
2650 		}
2651 	}
2652 
2653 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2654 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2655 
2656 	hci_dev_set_flag(hdev, HCI_SETUP);
2657 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2658 
2659 	/* Assume BR/EDR support until proven otherwise (such as
2660 	 * through reading supported features during init.
2661 	 */
2662 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2663 
2664 	write_lock(&hci_dev_list_lock);
2665 	list_add(&hdev->list, &hci_dev_list);
2666 	write_unlock(&hci_dev_list_lock);
2667 
2668 	/* Devices that are marked for raw-only usage are unconfigured
2669 	 * and should not be included in normal operation.
2670 	 */
2671 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2672 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2673 
2674 	/* Mark Remote Wakeup connection flag as supported if driver has wakeup
2675 	 * callback.
2676 	 */
2677 	if (hdev->wakeup)
2678 		hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2679 
2680 	hci_sock_dev_event(hdev, HCI_DEV_REG);
2681 	hci_dev_hold(hdev);
2682 
2683 	error = hci_register_suspend_notifier(hdev);
2684 	if (error)
2685 		BT_WARN("register suspend notifier failed error:%d\n", error);
2686 
2687 	queue_work(hdev->req_workqueue, &hdev->power_on);
2688 
2689 	idr_init(&hdev->adv_monitors_idr);
2690 	msft_register(hdev);
2691 
2692 	return id;
2693 
2694 err_wqueue:
2695 	debugfs_remove_recursive(hdev->debugfs);
2696 	destroy_workqueue(hdev->workqueue);
2697 	destroy_workqueue(hdev->req_workqueue);
2698 err:
2699 	ida_free(&hci_index_ida, hdev->id);
2700 
2701 	return error;
2702 }
2703 EXPORT_SYMBOL(hci_register_dev);
2704 
2705 /* Unregister HCI device */
2706 void hci_unregister_dev(struct hci_dev *hdev)
2707 {
2708 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2709 
2710 	mutex_lock(&hdev->unregister_lock);
2711 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
2712 	mutex_unlock(&hdev->unregister_lock);
2713 
2714 	write_lock(&hci_dev_list_lock);
2715 	list_del(&hdev->list);
2716 	write_unlock(&hci_dev_list_lock);
2717 
2718 	cancel_work_sync(&hdev->rx_work);
2719 	cancel_work_sync(&hdev->cmd_work);
2720 	cancel_work_sync(&hdev->tx_work);
2721 	cancel_work_sync(&hdev->power_on);
2722 	cancel_work_sync(&hdev->error_reset);
2723 
2724 	hci_cmd_sync_clear(hdev);
2725 
2726 	hci_unregister_suspend_notifier(hdev);
2727 
2728 	hci_dev_do_close(hdev);
2729 
2730 	if (!test_bit(HCI_INIT, &hdev->flags) &&
2731 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
2732 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2733 		hci_dev_lock(hdev);
2734 		mgmt_index_removed(hdev);
2735 		hci_dev_unlock(hdev);
2736 	}
2737 
2738 	/* mgmt_index_removed should take care of emptying the
2739 	 * pending list */
2740 	BUG_ON(!list_empty(&hdev->mgmt_pending));
2741 
2742 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2743 
2744 	if (hdev->rfkill) {
2745 		rfkill_unregister(hdev->rfkill);
2746 		rfkill_destroy(hdev->rfkill);
2747 	}
2748 
2749 	device_del(&hdev->dev);
2750 	/* Actual cleanup is deferred until hci_release_dev(). */
2751 	hci_dev_put(hdev);
2752 }
2753 EXPORT_SYMBOL(hci_unregister_dev);
2754 
2755 /* Release HCI device */
2756 void hci_release_dev(struct hci_dev *hdev)
2757 {
2758 	debugfs_remove_recursive(hdev->debugfs);
2759 	kfree_const(hdev->hw_info);
2760 	kfree_const(hdev->fw_info);
2761 
2762 	destroy_workqueue(hdev->workqueue);
2763 	destroy_workqueue(hdev->req_workqueue);
2764 
2765 	hci_dev_lock(hdev);
2766 	hci_bdaddr_list_clear(&hdev->reject_list);
2767 	hci_bdaddr_list_clear(&hdev->accept_list);
2768 	hci_uuids_clear(hdev);
2769 	hci_link_keys_clear(hdev);
2770 	hci_smp_ltks_clear(hdev);
2771 	hci_smp_irks_clear(hdev);
2772 	hci_remote_oob_data_clear(hdev);
2773 	hci_adv_instances_clear(hdev);
2774 	hci_adv_monitors_clear(hdev);
2775 	hci_bdaddr_list_clear(&hdev->le_accept_list);
2776 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2777 	hci_conn_params_clear_all(hdev);
2778 	hci_discovery_filter_clear(hdev);
2779 	hci_blocked_keys_clear(hdev);
2780 	hci_codec_list_clear(&hdev->local_codecs);
2781 	msft_release(hdev);
2782 	hci_dev_unlock(hdev);
2783 
2784 	ida_destroy(&hdev->unset_handle_ida);
2785 	ida_free(&hci_index_ida, hdev->id);
2786 	kfree_skb(hdev->sent_cmd);
2787 	kfree_skb(hdev->req_skb);
2788 	kfree_skb(hdev->recv_event);
2789 	kfree(hdev);
2790 }
2791 EXPORT_SYMBOL(hci_release_dev);
2792 
2793 int hci_register_suspend_notifier(struct hci_dev *hdev)
2794 {
2795 	int ret = 0;
2796 
2797 	if (!hdev->suspend_notifier.notifier_call &&
2798 	    !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2799 		hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2800 		ret = register_pm_notifier(&hdev->suspend_notifier);
2801 	}
2802 
2803 	return ret;
2804 }
2805 
2806 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2807 {
2808 	int ret = 0;
2809 
2810 	if (hdev->suspend_notifier.notifier_call) {
2811 		ret = unregister_pm_notifier(&hdev->suspend_notifier);
2812 		if (!ret)
2813 			hdev->suspend_notifier.notifier_call = NULL;
2814 	}
2815 
2816 	return ret;
2817 }
2818 
2819 /* Cancel ongoing command synchronously:
2820  *
2821  * - Cancel command timer
2822  * - Reset command counter
2823  * - Cancel command request
2824  */
2825 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2826 {
2827 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
2828 
2829 	cancel_delayed_work_sync(&hdev->cmd_timer);
2830 	cancel_delayed_work_sync(&hdev->ncmd_timer);
2831 	atomic_set(&hdev->cmd_cnt, 1);
2832 
2833 	hci_cmd_sync_cancel_sync(hdev, err);
2834 }
2835 
2836 /* Suspend HCI device */
2837 int hci_suspend_dev(struct hci_dev *hdev)
2838 {
2839 	int ret;
2840 
2841 	bt_dev_dbg(hdev, "");
2842 
2843 	/* Suspend should only act on when powered. */
2844 	if (!hdev_is_powered(hdev) ||
2845 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2846 		return 0;
2847 
2848 	/* If powering down don't attempt to suspend */
2849 	if (mgmt_powering_down(hdev))
2850 		return 0;
2851 
2852 	/* Cancel potentially blocking sync operation before suspend */
2853 	hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2854 
2855 	hci_req_sync_lock(hdev);
2856 	ret = hci_suspend_sync(hdev);
2857 	hci_req_sync_unlock(hdev);
2858 
2859 	hci_clear_wake_reason(hdev);
2860 	mgmt_suspending(hdev, hdev->suspend_state);
2861 
2862 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2863 	return ret;
2864 }
2865 EXPORT_SYMBOL(hci_suspend_dev);
2866 
2867 /* Resume HCI device */
2868 int hci_resume_dev(struct hci_dev *hdev)
2869 {
2870 	int ret;
2871 
2872 	bt_dev_dbg(hdev, "");
2873 
2874 	/* Resume should only act on when powered. */
2875 	if (!hdev_is_powered(hdev) ||
2876 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2877 		return 0;
2878 
2879 	/* If powering down don't attempt to resume */
2880 	if (mgmt_powering_down(hdev))
2881 		return 0;
2882 
2883 	hci_req_sync_lock(hdev);
2884 	ret = hci_resume_sync(hdev);
2885 	hci_req_sync_unlock(hdev);
2886 
2887 	mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2888 		      hdev->wake_addr_type);
2889 
2890 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2891 	return ret;
2892 }
2893 EXPORT_SYMBOL(hci_resume_dev);
2894 
2895 /* Reset HCI device */
2896 int hci_reset_dev(struct hci_dev *hdev)
2897 {
2898 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2899 	struct sk_buff *skb;
2900 
2901 	skb = bt_skb_alloc(3, GFP_ATOMIC);
2902 	if (!skb)
2903 		return -ENOMEM;
2904 
2905 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2906 	skb_put_data(skb, hw_err, 3);
2907 
2908 	bt_dev_err(hdev, "Injecting HCI hardware error event");
2909 
2910 	/* Send Hardware Error to upper stack */
2911 	return hci_recv_frame(hdev, skb);
2912 }
2913 EXPORT_SYMBOL(hci_reset_dev);
2914 
2915 /* Receive frame from HCI drivers */
2916 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2917 {
2918 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2919 		      && !test_bit(HCI_INIT, &hdev->flags))) {
2920 		kfree_skb(skb);
2921 		return -ENXIO;
2922 	}
2923 
2924 	switch (hci_skb_pkt_type(skb)) {
2925 	case HCI_EVENT_PKT:
2926 		break;
2927 	case HCI_ACLDATA_PKT:
2928 		/* Detect if ISO packet has been sent as ACL */
2929 		if (hci_conn_num(hdev, ISO_LINK)) {
2930 			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2931 			__u8 type;
2932 
2933 			type = hci_conn_lookup_type(hdev, hci_handle(handle));
2934 			if (type == ISO_LINK)
2935 				hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2936 		}
2937 		break;
2938 	case HCI_SCODATA_PKT:
2939 		break;
2940 	case HCI_ISODATA_PKT:
2941 		break;
2942 	default:
2943 		kfree_skb(skb);
2944 		return -EINVAL;
2945 	}
2946 
2947 	/* Incoming skb */
2948 	bt_cb(skb)->incoming = 1;
2949 
2950 	/* Time stamp */
2951 	__net_timestamp(skb);
2952 
2953 	skb_queue_tail(&hdev->rx_q, skb);
2954 	queue_work(hdev->workqueue, &hdev->rx_work);
2955 
2956 	return 0;
2957 }
2958 EXPORT_SYMBOL(hci_recv_frame);
2959 
2960 /* Receive diagnostic message from HCI drivers */
2961 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2962 {
2963 	/* Mark as diagnostic packet */
2964 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2965 
2966 	/* Time stamp */
2967 	__net_timestamp(skb);
2968 
2969 	skb_queue_tail(&hdev->rx_q, skb);
2970 	queue_work(hdev->workqueue, &hdev->rx_work);
2971 
2972 	return 0;
2973 }
2974 EXPORT_SYMBOL(hci_recv_diag);
2975 
2976 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2977 {
2978 	va_list vargs;
2979 
2980 	va_start(vargs, fmt);
2981 	kfree_const(hdev->hw_info);
2982 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2983 	va_end(vargs);
2984 }
2985 EXPORT_SYMBOL(hci_set_hw_info);
2986 
2987 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2988 {
2989 	va_list vargs;
2990 
2991 	va_start(vargs, fmt);
2992 	kfree_const(hdev->fw_info);
2993 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2994 	va_end(vargs);
2995 }
2996 EXPORT_SYMBOL(hci_set_fw_info);
2997 
2998 /* ---- Interface to upper protocols ---- */
2999 
3000 int hci_register_cb(struct hci_cb *cb)
3001 {
3002 	BT_DBG("%p name %s", cb, cb->name);
3003 
3004 	mutex_lock(&hci_cb_list_lock);
3005 	list_add_tail(&cb->list, &hci_cb_list);
3006 	mutex_unlock(&hci_cb_list_lock);
3007 
3008 	return 0;
3009 }
3010 EXPORT_SYMBOL(hci_register_cb);
3011 
3012 int hci_unregister_cb(struct hci_cb *cb)
3013 {
3014 	BT_DBG("%p name %s", cb, cb->name);
3015 
3016 	mutex_lock(&hci_cb_list_lock);
3017 	list_del(&cb->list);
3018 	mutex_unlock(&hci_cb_list_lock);
3019 
3020 	return 0;
3021 }
3022 EXPORT_SYMBOL(hci_unregister_cb);
3023 
3024 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3025 {
3026 	int err;
3027 
3028 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3029 	       skb->len);
3030 
3031 	/* Time stamp */
3032 	__net_timestamp(skb);
3033 
3034 	/* Send copy to monitor */
3035 	hci_send_to_monitor(hdev, skb);
3036 
3037 	if (atomic_read(&hdev->promisc)) {
3038 		/* Send copy to the sockets */
3039 		hci_send_to_sock(hdev, skb);
3040 	}
3041 
3042 	/* Get rid of skb owner, prior to sending to the driver. */
3043 	skb_orphan(skb);
3044 
3045 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3046 		kfree_skb(skb);
3047 		return -EINVAL;
3048 	}
3049 
3050 	err = hdev->send(hdev, skb);
3051 	if (err < 0) {
3052 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3053 		kfree_skb(skb);
3054 		return err;
3055 	}
3056 
3057 	return 0;
3058 }
3059 
3060 /* Send HCI command */
3061 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3062 		 const void *param)
3063 {
3064 	struct sk_buff *skb;
3065 
3066 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3067 
3068 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3069 	if (!skb) {
3070 		bt_dev_err(hdev, "no memory for command");
3071 		return -ENOMEM;
3072 	}
3073 
3074 	/* Stand-alone HCI commands must be flagged as
3075 	 * single-command requests.
3076 	 */
3077 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3078 
3079 	skb_queue_tail(&hdev->cmd_q, skb);
3080 	queue_work(hdev->workqueue, &hdev->cmd_work);
3081 
3082 	return 0;
3083 }
3084 
3085 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3086 		   const void *param)
3087 {
3088 	struct sk_buff *skb;
3089 
3090 	if (hci_opcode_ogf(opcode) != 0x3f) {
3091 		/* A controller receiving a command shall respond with either
3092 		 * a Command Status Event or a Command Complete Event.
3093 		 * Therefore, all standard HCI commands must be sent via the
3094 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3095 		 * Some vendors do not comply with this rule for vendor-specific
3096 		 * commands and do not return any event. We want to support
3097 		 * unresponded commands for such cases only.
3098 		 */
3099 		bt_dev_err(hdev, "unresponded command not supported");
3100 		return -EINVAL;
3101 	}
3102 
3103 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3104 	if (!skb) {
3105 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3106 			   opcode);
3107 		return -ENOMEM;
3108 	}
3109 
3110 	hci_send_frame(hdev, skb);
3111 
3112 	return 0;
3113 }
3114 EXPORT_SYMBOL(__hci_cmd_send);
3115 
3116 /* Get data from the previously sent command */
3117 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3118 {
3119 	struct hci_command_hdr *hdr;
3120 
3121 	if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3122 		return NULL;
3123 
3124 	hdr = (void *)skb->data;
3125 
3126 	if (hdr->opcode != cpu_to_le16(opcode))
3127 		return NULL;
3128 
3129 	return skb->data + HCI_COMMAND_HDR_SIZE;
3130 }
3131 
3132 /* Get data from the previously sent command */
3133 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3134 {
3135 	void *data;
3136 
3137 	/* Check if opcode matches last sent command */
3138 	data = hci_cmd_data(hdev->sent_cmd, opcode);
3139 	if (!data)
3140 		/* Check if opcode matches last request */
3141 		data = hci_cmd_data(hdev->req_skb, opcode);
3142 
3143 	return data;
3144 }
3145 
3146 /* Get data from last received event */
3147 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3148 {
3149 	struct hci_event_hdr *hdr;
3150 	int offset;
3151 
3152 	if (!hdev->recv_event)
3153 		return NULL;
3154 
3155 	hdr = (void *)hdev->recv_event->data;
3156 	offset = sizeof(*hdr);
3157 
3158 	if (hdr->evt != event) {
3159 		/* In case of LE metaevent check the subevent match */
3160 		if (hdr->evt == HCI_EV_LE_META) {
3161 			struct hci_ev_le_meta *ev;
3162 
3163 			ev = (void *)hdev->recv_event->data + offset;
3164 			offset += sizeof(*ev);
3165 			if (ev->subevent == event)
3166 				goto found;
3167 		}
3168 		return NULL;
3169 	}
3170 
3171 found:
3172 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
3173 
3174 	return hdev->recv_event->data + offset;
3175 }
3176 
3177 /* Send ACL data */
3178 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3179 {
3180 	struct hci_acl_hdr *hdr;
3181 	int len = skb->len;
3182 
3183 	skb_push(skb, HCI_ACL_HDR_SIZE);
3184 	skb_reset_transport_header(skb);
3185 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3186 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3187 	hdr->dlen   = cpu_to_le16(len);
3188 }
3189 
3190 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3191 			  struct sk_buff *skb, __u16 flags)
3192 {
3193 	struct hci_conn *conn = chan->conn;
3194 	struct hci_dev *hdev = conn->hdev;
3195 	struct sk_buff *list;
3196 
3197 	skb->len = skb_headlen(skb);
3198 	skb->data_len = 0;
3199 
3200 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3201 
3202 	hci_add_acl_hdr(skb, conn->handle, flags);
3203 
3204 	list = skb_shinfo(skb)->frag_list;
3205 	if (!list) {
3206 		/* Non fragmented */
3207 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3208 
3209 		skb_queue_tail(queue, skb);
3210 	} else {
3211 		/* Fragmented */
3212 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3213 
3214 		skb_shinfo(skb)->frag_list = NULL;
3215 
3216 		/* Queue all fragments atomically. We need to use spin_lock_bh
3217 		 * here because of 6LoWPAN links, as there this function is
3218 		 * called from softirq and using normal spin lock could cause
3219 		 * deadlocks.
3220 		 */
3221 		spin_lock_bh(&queue->lock);
3222 
3223 		__skb_queue_tail(queue, skb);
3224 
3225 		flags &= ~ACL_START;
3226 		flags |= ACL_CONT;
3227 		do {
3228 			skb = list; list = list->next;
3229 
3230 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3231 			hci_add_acl_hdr(skb, conn->handle, flags);
3232 
3233 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3234 
3235 			__skb_queue_tail(queue, skb);
3236 		} while (list);
3237 
3238 		spin_unlock_bh(&queue->lock);
3239 	}
3240 }
3241 
3242 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3243 {
3244 	struct hci_dev *hdev = chan->conn->hdev;
3245 
3246 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3247 
3248 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3249 
3250 	queue_work(hdev->workqueue, &hdev->tx_work);
3251 }
3252 
3253 /* Send SCO data */
3254 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3255 {
3256 	struct hci_dev *hdev = conn->hdev;
3257 	struct hci_sco_hdr hdr;
3258 
3259 	BT_DBG("%s len %d", hdev->name, skb->len);
3260 
3261 	hdr.handle = cpu_to_le16(conn->handle);
3262 	hdr.dlen   = skb->len;
3263 
3264 	skb_push(skb, HCI_SCO_HDR_SIZE);
3265 	skb_reset_transport_header(skb);
3266 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3267 
3268 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3269 
3270 	skb_queue_tail(&conn->data_q, skb);
3271 	queue_work(hdev->workqueue, &hdev->tx_work);
3272 }
3273 
3274 /* Send ISO data */
3275 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3276 {
3277 	struct hci_iso_hdr *hdr;
3278 	int len = skb->len;
3279 
3280 	skb_push(skb, HCI_ISO_HDR_SIZE);
3281 	skb_reset_transport_header(skb);
3282 	hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3283 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3284 	hdr->dlen   = cpu_to_le16(len);
3285 }
3286 
3287 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3288 			  struct sk_buff *skb)
3289 {
3290 	struct hci_dev *hdev = conn->hdev;
3291 	struct sk_buff *list;
3292 	__u16 flags;
3293 
3294 	skb->len = skb_headlen(skb);
3295 	skb->data_len = 0;
3296 
3297 	hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3298 
3299 	list = skb_shinfo(skb)->frag_list;
3300 
3301 	flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3302 	hci_add_iso_hdr(skb, conn->handle, flags);
3303 
3304 	if (!list) {
3305 		/* Non fragmented */
3306 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3307 
3308 		skb_queue_tail(queue, skb);
3309 	} else {
3310 		/* Fragmented */
3311 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3312 
3313 		skb_shinfo(skb)->frag_list = NULL;
3314 
3315 		__skb_queue_tail(queue, skb);
3316 
3317 		do {
3318 			skb = list; list = list->next;
3319 
3320 			hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3321 			flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3322 						   0x00);
3323 			hci_add_iso_hdr(skb, conn->handle, flags);
3324 
3325 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3326 
3327 			__skb_queue_tail(queue, skb);
3328 		} while (list);
3329 	}
3330 }
3331 
3332 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3333 {
3334 	struct hci_dev *hdev = conn->hdev;
3335 
3336 	BT_DBG("%s len %d", hdev->name, skb->len);
3337 
3338 	hci_queue_iso(conn, &conn->data_q, skb);
3339 
3340 	queue_work(hdev->workqueue, &hdev->tx_work);
3341 }
3342 
3343 /* ---- HCI TX task (outgoing data) ---- */
3344 
3345 /* HCI Connection scheduler */
3346 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3347 {
3348 	struct hci_dev *hdev;
3349 	int cnt, q;
3350 
3351 	if (!conn) {
3352 		*quote = 0;
3353 		return;
3354 	}
3355 
3356 	hdev = conn->hdev;
3357 
3358 	switch (conn->type) {
3359 	case ACL_LINK:
3360 		cnt = hdev->acl_cnt;
3361 		break;
3362 	case SCO_LINK:
3363 	case ESCO_LINK:
3364 		cnt = hdev->sco_cnt;
3365 		break;
3366 	case LE_LINK:
3367 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3368 		break;
3369 	case ISO_LINK:
3370 		cnt = hdev->iso_mtu ? hdev->iso_cnt :
3371 			hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3372 		break;
3373 	default:
3374 		cnt = 0;
3375 		bt_dev_err(hdev, "unknown link type %d", conn->type);
3376 	}
3377 
3378 	q = cnt / num;
3379 	*quote = q ? q : 1;
3380 }
3381 
3382 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3383 				     int *quote)
3384 {
3385 	struct hci_conn_hash *h = &hdev->conn_hash;
3386 	struct hci_conn *conn = NULL, *c;
3387 	unsigned int num = 0, min = ~0;
3388 
3389 	/* We don't have to lock device here. Connections are always
3390 	 * added and removed with TX task disabled. */
3391 
3392 	rcu_read_lock();
3393 
3394 	list_for_each_entry_rcu(c, &h->list, list) {
3395 		if (c->type != type || skb_queue_empty(&c->data_q))
3396 			continue;
3397 
3398 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3399 			continue;
3400 
3401 		num++;
3402 
3403 		if (c->sent < min) {
3404 			min  = c->sent;
3405 			conn = c;
3406 		}
3407 
3408 		if (hci_conn_num(hdev, type) == num)
3409 			break;
3410 	}
3411 
3412 	rcu_read_unlock();
3413 
3414 	hci_quote_sent(conn, num, quote);
3415 
3416 	BT_DBG("conn %p quote %d", conn, *quote);
3417 	return conn;
3418 }
3419 
3420 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3421 {
3422 	struct hci_conn_hash *h = &hdev->conn_hash;
3423 	struct hci_conn *c;
3424 
3425 	bt_dev_err(hdev, "link tx timeout");
3426 
3427 	rcu_read_lock();
3428 
3429 	/* Kill stalled connections */
3430 	list_for_each_entry_rcu(c, &h->list, list) {
3431 		if (c->type == type && c->sent) {
3432 			bt_dev_err(hdev, "killing stalled connection %pMR",
3433 				   &c->dst);
3434 			/* hci_disconnect might sleep, so, we have to release
3435 			 * the RCU read lock before calling it.
3436 			 */
3437 			rcu_read_unlock();
3438 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3439 			rcu_read_lock();
3440 		}
3441 	}
3442 
3443 	rcu_read_unlock();
3444 }
3445 
3446 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3447 				      int *quote)
3448 {
3449 	struct hci_conn_hash *h = &hdev->conn_hash;
3450 	struct hci_chan *chan = NULL;
3451 	unsigned int num = 0, min = ~0, cur_prio = 0;
3452 	struct hci_conn *conn;
3453 	int conn_num = 0;
3454 
3455 	BT_DBG("%s", hdev->name);
3456 
3457 	rcu_read_lock();
3458 
3459 	list_for_each_entry_rcu(conn, &h->list, list) {
3460 		struct hci_chan *tmp;
3461 
3462 		if (conn->type != type)
3463 			continue;
3464 
3465 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3466 			continue;
3467 
3468 		conn_num++;
3469 
3470 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3471 			struct sk_buff *skb;
3472 
3473 			if (skb_queue_empty(&tmp->data_q))
3474 				continue;
3475 
3476 			skb = skb_peek(&tmp->data_q);
3477 			if (skb->priority < cur_prio)
3478 				continue;
3479 
3480 			if (skb->priority > cur_prio) {
3481 				num = 0;
3482 				min = ~0;
3483 				cur_prio = skb->priority;
3484 			}
3485 
3486 			num++;
3487 
3488 			if (conn->sent < min) {
3489 				min  = conn->sent;
3490 				chan = tmp;
3491 			}
3492 		}
3493 
3494 		if (hci_conn_num(hdev, type) == conn_num)
3495 			break;
3496 	}
3497 
3498 	rcu_read_unlock();
3499 
3500 	if (!chan)
3501 		return NULL;
3502 
3503 	hci_quote_sent(chan->conn, num, quote);
3504 
3505 	BT_DBG("chan %p quote %d", chan, *quote);
3506 	return chan;
3507 }
3508 
3509 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3510 {
3511 	struct hci_conn_hash *h = &hdev->conn_hash;
3512 	struct hci_conn *conn;
3513 	int num = 0;
3514 
3515 	BT_DBG("%s", hdev->name);
3516 
3517 	rcu_read_lock();
3518 
3519 	list_for_each_entry_rcu(conn, &h->list, list) {
3520 		struct hci_chan *chan;
3521 
3522 		if (conn->type != type)
3523 			continue;
3524 
3525 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3526 			continue;
3527 
3528 		num++;
3529 
3530 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3531 			struct sk_buff *skb;
3532 
3533 			if (chan->sent) {
3534 				chan->sent = 0;
3535 				continue;
3536 			}
3537 
3538 			if (skb_queue_empty(&chan->data_q))
3539 				continue;
3540 
3541 			skb = skb_peek(&chan->data_q);
3542 			if (skb->priority >= HCI_PRIO_MAX - 1)
3543 				continue;
3544 
3545 			skb->priority = HCI_PRIO_MAX - 1;
3546 
3547 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3548 			       skb->priority);
3549 		}
3550 
3551 		if (hci_conn_num(hdev, type) == num)
3552 			break;
3553 	}
3554 
3555 	rcu_read_unlock();
3556 
3557 }
3558 
3559 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3560 {
3561 	unsigned long last_tx;
3562 
3563 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3564 		return;
3565 
3566 	switch (type) {
3567 	case LE_LINK:
3568 		last_tx = hdev->le_last_tx;
3569 		break;
3570 	default:
3571 		last_tx = hdev->acl_last_tx;
3572 		break;
3573 	}
3574 
3575 	/* tx timeout must be longer than maximum link supervision timeout
3576 	 * (40.9 seconds)
3577 	 */
3578 	if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3579 		hci_link_tx_to(hdev, type);
3580 }
3581 
3582 /* Schedule SCO */
3583 static void hci_sched_sco(struct hci_dev *hdev)
3584 {
3585 	struct hci_conn *conn;
3586 	struct sk_buff *skb;
3587 	int quote;
3588 
3589 	BT_DBG("%s", hdev->name);
3590 
3591 	if (!hci_conn_num(hdev, SCO_LINK))
3592 		return;
3593 
3594 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3595 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3596 			BT_DBG("skb %p len %d", skb, skb->len);
3597 			hci_send_frame(hdev, skb);
3598 
3599 			conn->sent++;
3600 			if (conn->sent == ~0)
3601 				conn->sent = 0;
3602 		}
3603 	}
3604 }
3605 
3606 static void hci_sched_esco(struct hci_dev *hdev)
3607 {
3608 	struct hci_conn *conn;
3609 	struct sk_buff *skb;
3610 	int quote;
3611 
3612 	BT_DBG("%s", hdev->name);
3613 
3614 	if (!hci_conn_num(hdev, ESCO_LINK))
3615 		return;
3616 
3617 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3618 						     &quote))) {
3619 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3620 			BT_DBG("skb %p len %d", skb, skb->len);
3621 			hci_send_frame(hdev, skb);
3622 
3623 			conn->sent++;
3624 			if (conn->sent == ~0)
3625 				conn->sent = 0;
3626 		}
3627 	}
3628 }
3629 
3630 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3631 {
3632 	unsigned int cnt = hdev->acl_cnt;
3633 	struct hci_chan *chan;
3634 	struct sk_buff *skb;
3635 	int quote;
3636 
3637 	__check_timeout(hdev, cnt, ACL_LINK);
3638 
3639 	while (hdev->acl_cnt &&
3640 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3641 		u32 priority = (skb_peek(&chan->data_q))->priority;
3642 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3643 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3644 			       skb->len, skb->priority);
3645 
3646 			/* Stop if priority has changed */
3647 			if (skb->priority < priority)
3648 				break;
3649 
3650 			skb = skb_dequeue(&chan->data_q);
3651 
3652 			hci_conn_enter_active_mode(chan->conn,
3653 						   bt_cb(skb)->force_active);
3654 
3655 			hci_send_frame(hdev, skb);
3656 			hdev->acl_last_tx = jiffies;
3657 
3658 			hdev->acl_cnt--;
3659 			chan->sent++;
3660 			chan->conn->sent++;
3661 
3662 			/* Send pending SCO packets right away */
3663 			hci_sched_sco(hdev);
3664 			hci_sched_esco(hdev);
3665 		}
3666 	}
3667 
3668 	if (cnt != hdev->acl_cnt)
3669 		hci_prio_recalculate(hdev, ACL_LINK);
3670 }
3671 
3672 static void hci_sched_acl(struct hci_dev *hdev)
3673 {
3674 	BT_DBG("%s", hdev->name);
3675 
3676 	/* No ACL link over BR/EDR controller */
3677 	if (!hci_conn_num(hdev, ACL_LINK))
3678 		return;
3679 
3680 	hci_sched_acl_pkt(hdev);
3681 }
3682 
3683 static void hci_sched_le(struct hci_dev *hdev)
3684 {
3685 	struct hci_chan *chan;
3686 	struct sk_buff *skb;
3687 	int quote, cnt, tmp;
3688 
3689 	BT_DBG("%s", hdev->name);
3690 
3691 	if (!hci_conn_num(hdev, LE_LINK))
3692 		return;
3693 
3694 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3695 
3696 	__check_timeout(hdev, cnt, LE_LINK);
3697 
3698 	tmp = cnt;
3699 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3700 		u32 priority = (skb_peek(&chan->data_q))->priority;
3701 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3702 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3703 			       skb->len, skb->priority);
3704 
3705 			/* Stop if priority has changed */
3706 			if (skb->priority < priority)
3707 				break;
3708 
3709 			skb = skb_dequeue(&chan->data_q);
3710 
3711 			hci_send_frame(hdev, skb);
3712 			hdev->le_last_tx = jiffies;
3713 
3714 			cnt--;
3715 			chan->sent++;
3716 			chan->conn->sent++;
3717 
3718 			/* Send pending SCO packets right away */
3719 			hci_sched_sco(hdev);
3720 			hci_sched_esco(hdev);
3721 		}
3722 	}
3723 
3724 	if (hdev->le_pkts)
3725 		hdev->le_cnt = cnt;
3726 	else
3727 		hdev->acl_cnt = cnt;
3728 
3729 	if (cnt != tmp)
3730 		hci_prio_recalculate(hdev, LE_LINK);
3731 }
3732 
3733 /* Schedule CIS */
3734 static void hci_sched_iso(struct hci_dev *hdev)
3735 {
3736 	struct hci_conn *conn;
3737 	struct sk_buff *skb;
3738 	int quote, *cnt;
3739 
3740 	BT_DBG("%s", hdev->name);
3741 
3742 	if (!hci_conn_num(hdev, ISO_LINK))
3743 		return;
3744 
3745 	cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3746 		hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3747 	while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3748 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3749 			BT_DBG("skb %p len %d", skb, skb->len);
3750 			hci_send_frame(hdev, skb);
3751 
3752 			conn->sent++;
3753 			if (conn->sent == ~0)
3754 				conn->sent = 0;
3755 			(*cnt)--;
3756 		}
3757 	}
3758 }
3759 
3760 static void hci_tx_work(struct work_struct *work)
3761 {
3762 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3763 	struct sk_buff *skb;
3764 
3765 	BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3766 	       hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3767 
3768 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3769 		/* Schedule queues and send stuff to HCI driver */
3770 		hci_sched_sco(hdev);
3771 		hci_sched_esco(hdev);
3772 		hci_sched_iso(hdev);
3773 		hci_sched_acl(hdev);
3774 		hci_sched_le(hdev);
3775 	}
3776 
3777 	/* Send next queued raw (unknown type) packet */
3778 	while ((skb = skb_dequeue(&hdev->raw_q)))
3779 		hci_send_frame(hdev, skb);
3780 }
3781 
3782 /* ----- HCI RX task (incoming data processing) ----- */
3783 
3784 /* ACL data packet */
3785 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3786 {
3787 	struct hci_acl_hdr *hdr = (void *) skb->data;
3788 	struct hci_conn *conn;
3789 	__u16 handle, flags;
3790 
3791 	skb_pull(skb, HCI_ACL_HDR_SIZE);
3792 
3793 	handle = __le16_to_cpu(hdr->handle);
3794 	flags  = hci_flags(handle);
3795 	handle = hci_handle(handle);
3796 
3797 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3798 	       handle, flags);
3799 
3800 	hdev->stat.acl_rx++;
3801 
3802 	hci_dev_lock(hdev);
3803 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3804 	hci_dev_unlock(hdev);
3805 
3806 	if (conn) {
3807 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3808 
3809 		/* Send to upper protocol */
3810 		l2cap_recv_acldata(conn, skb, flags);
3811 		return;
3812 	} else {
3813 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3814 			   handle);
3815 	}
3816 
3817 	kfree_skb(skb);
3818 }
3819 
3820 /* SCO data packet */
3821 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3822 {
3823 	struct hci_sco_hdr *hdr = (void *) skb->data;
3824 	struct hci_conn *conn;
3825 	__u16 handle, flags;
3826 
3827 	skb_pull(skb, HCI_SCO_HDR_SIZE);
3828 
3829 	handle = __le16_to_cpu(hdr->handle);
3830 	flags  = hci_flags(handle);
3831 	handle = hci_handle(handle);
3832 
3833 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3834 	       handle, flags);
3835 
3836 	hdev->stat.sco_rx++;
3837 
3838 	hci_dev_lock(hdev);
3839 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3840 	hci_dev_unlock(hdev);
3841 
3842 	if (conn) {
3843 		/* Send to upper protocol */
3844 		hci_skb_pkt_status(skb) = flags & 0x03;
3845 		sco_recv_scodata(conn, skb);
3846 		return;
3847 	} else {
3848 		bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3849 				       handle);
3850 	}
3851 
3852 	kfree_skb(skb);
3853 }
3854 
3855 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3856 {
3857 	struct hci_iso_hdr *hdr;
3858 	struct hci_conn *conn;
3859 	__u16 handle, flags;
3860 
3861 	hdr = skb_pull_data(skb, sizeof(*hdr));
3862 	if (!hdr) {
3863 		bt_dev_err(hdev, "ISO packet too small");
3864 		goto drop;
3865 	}
3866 
3867 	handle = __le16_to_cpu(hdr->handle);
3868 	flags  = hci_flags(handle);
3869 	handle = hci_handle(handle);
3870 
3871 	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3872 		   handle, flags);
3873 
3874 	hci_dev_lock(hdev);
3875 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3876 	hci_dev_unlock(hdev);
3877 
3878 	if (!conn) {
3879 		bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3880 			   handle);
3881 		goto drop;
3882 	}
3883 
3884 	/* Send to upper protocol */
3885 	iso_recv(conn, skb, flags);
3886 	return;
3887 
3888 drop:
3889 	kfree_skb(skb);
3890 }
3891 
3892 static bool hci_req_is_complete(struct hci_dev *hdev)
3893 {
3894 	struct sk_buff *skb;
3895 
3896 	skb = skb_peek(&hdev->cmd_q);
3897 	if (!skb)
3898 		return true;
3899 
3900 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3901 }
3902 
3903 static void hci_resend_last(struct hci_dev *hdev)
3904 {
3905 	struct hci_command_hdr *sent;
3906 	struct sk_buff *skb;
3907 	u16 opcode;
3908 
3909 	if (!hdev->sent_cmd)
3910 		return;
3911 
3912 	sent = (void *) hdev->sent_cmd->data;
3913 	opcode = __le16_to_cpu(sent->opcode);
3914 	if (opcode == HCI_OP_RESET)
3915 		return;
3916 
3917 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3918 	if (!skb)
3919 		return;
3920 
3921 	skb_queue_head(&hdev->cmd_q, skb);
3922 	queue_work(hdev->workqueue, &hdev->cmd_work);
3923 }
3924 
3925 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3926 			  hci_req_complete_t *req_complete,
3927 			  hci_req_complete_skb_t *req_complete_skb)
3928 {
3929 	struct sk_buff *skb;
3930 	unsigned long flags;
3931 
3932 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3933 
3934 	/* If the completed command doesn't match the last one that was
3935 	 * sent we need to do special handling of it.
3936 	 */
3937 	if (!hci_sent_cmd_data(hdev, opcode)) {
3938 		/* Some CSR based controllers generate a spontaneous
3939 		 * reset complete event during init and any pending
3940 		 * command will never be completed. In such a case we
3941 		 * need to resend whatever was the last sent
3942 		 * command.
3943 		 */
3944 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3945 			hci_resend_last(hdev);
3946 
3947 		return;
3948 	}
3949 
3950 	/* If we reach this point this event matches the last command sent */
3951 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3952 
3953 	/* If the command succeeded and there's still more commands in
3954 	 * this request the request is not yet complete.
3955 	 */
3956 	if (!status && !hci_req_is_complete(hdev))
3957 		return;
3958 
3959 	skb = hdev->req_skb;
3960 
3961 	/* If this was the last command in a request the complete
3962 	 * callback would be found in hdev->req_skb instead of the
3963 	 * command queue (hdev->cmd_q).
3964 	 */
3965 	if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3966 		*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3967 		return;
3968 	}
3969 
3970 	if (skb && bt_cb(skb)->hci.req_complete) {
3971 		*req_complete = bt_cb(skb)->hci.req_complete;
3972 		return;
3973 	}
3974 
3975 	/* Remove all pending commands belonging to this request */
3976 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3977 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3978 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3979 			__skb_queue_head(&hdev->cmd_q, skb);
3980 			break;
3981 		}
3982 
3983 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3984 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3985 		else
3986 			*req_complete = bt_cb(skb)->hci.req_complete;
3987 		dev_kfree_skb_irq(skb);
3988 	}
3989 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3990 }
3991 
3992 static void hci_rx_work(struct work_struct *work)
3993 {
3994 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3995 	struct sk_buff *skb;
3996 
3997 	BT_DBG("%s", hdev->name);
3998 
3999 	/* The kcov_remote functions used for collecting packet parsing
4000 	 * coverage information from this background thread and associate
4001 	 * the coverage with the syscall's thread which originally injected
4002 	 * the packet. This helps fuzzing the kernel.
4003 	 */
4004 	for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4005 		kcov_remote_start_common(skb_get_kcov_handle(skb));
4006 
4007 		/* Send copy to monitor */
4008 		hci_send_to_monitor(hdev, skb);
4009 
4010 		if (atomic_read(&hdev->promisc)) {
4011 			/* Send copy to the sockets */
4012 			hci_send_to_sock(hdev, skb);
4013 		}
4014 
4015 		/* If the device has been opened in HCI_USER_CHANNEL,
4016 		 * the userspace has exclusive access to device.
4017 		 * When device is HCI_INIT, we still need to process
4018 		 * the data packets to the driver in order
4019 		 * to complete its setup().
4020 		 */
4021 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4022 		    !test_bit(HCI_INIT, &hdev->flags)) {
4023 			kfree_skb(skb);
4024 			continue;
4025 		}
4026 
4027 		if (test_bit(HCI_INIT, &hdev->flags)) {
4028 			/* Don't process data packets in this states. */
4029 			switch (hci_skb_pkt_type(skb)) {
4030 			case HCI_ACLDATA_PKT:
4031 			case HCI_SCODATA_PKT:
4032 			case HCI_ISODATA_PKT:
4033 				kfree_skb(skb);
4034 				continue;
4035 			}
4036 		}
4037 
4038 		/* Process frame */
4039 		switch (hci_skb_pkt_type(skb)) {
4040 		case HCI_EVENT_PKT:
4041 			BT_DBG("%s Event packet", hdev->name);
4042 			hci_event_packet(hdev, skb);
4043 			break;
4044 
4045 		case HCI_ACLDATA_PKT:
4046 			BT_DBG("%s ACL data packet", hdev->name);
4047 			hci_acldata_packet(hdev, skb);
4048 			break;
4049 
4050 		case HCI_SCODATA_PKT:
4051 			BT_DBG("%s SCO data packet", hdev->name);
4052 			hci_scodata_packet(hdev, skb);
4053 			break;
4054 
4055 		case HCI_ISODATA_PKT:
4056 			BT_DBG("%s ISO data packet", hdev->name);
4057 			hci_isodata_packet(hdev, skb);
4058 			break;
4059 
4060 		default:
4061 			kfree_skb(skb);
4062 			break;
4063 		}
4064 	}
4065 }
4066 
4067 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4068 {
4069 	int err;
4070 
4071 	bt_dev_dbg(hdev, "skb %p", skb);
4072 
4073 	kfree_skb(hdev->sent_cmd);
4074 
4075 	hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4076 	if (!hdev->sent_cmd) {
4077 		skb_queue_head(&hdev->cmd_q, skb);
4078 		queue_work(hdev->workqueue, &hdev->cmd_work);
4079 		return;
4080 	}
4081 
4082 	err = hci_send_frame(hdev, skb);
4083 	if (err < 0) {
4084 		hci_cmd_sync_cancel_sync(hdev, -err);
4085 		return;
4086 	}
4087 
4088 	if (hci_req_status_pend(hdev) &&
4089 	    !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4090 		kfree_skb(hdev->req_skb);
4091 		hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4092 	}
4093 
4094 	atomic_dec(&hdev->cmd_cnt);
4095 }
4096 
4097 static void hci_cmd_work(struct work_struct *work)
4098 {
4099 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4100 	struct sk_buff *skb;
4101 
4102 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4103 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4104 
4105 	/* Send queued commands */
4106 	if (atomic_read(&hdev->cmd_cnt)) {
4107 		skb = skb_dequeue(&hdev->cmd_q);
4108 		if (!skb)
4109 			return;
4110 
4111 		hci_send_cmd_sync(hdev, skb);
4112 
4113 		rcu_read_lock();
4114 		if (test_bit(HCI_RESET, &hdev->flags) ||
4115 		    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4116 			cancel_delayed_work(&hdev->cmd_timer);
4117 		else
4118 			queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4119 					   HCI_CMD_TIMEOUT);
4120 		rcu_read_unlock();
4121 	}
4122 }
4123