xref: /linux/net/bluetooth/hci_core.c (revision 9e8ba5f3ec35cba4fd8a8bebda548c4db2651e40)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI core. */
26 
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30 
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47 
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51 
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 
55 #define AUTO_OFF_TIMEOUT 2000
56 
57 int enable_hs;
58 
59 static void hci_cmd_task(unsigned long arg);
60 static void hci_rx_task(unsigned long arg);
61 static void hci_tx_task(unsigned long arg);
62 
63 static DEFINE_RWLOCK(hci_task_lock);
64 
65 /* HCI device list */
66 LIST_HEAD(hci_dev_list);
67 DEFINE_RWLOCK(hci_dev_list_lock);
68 
69 /* HCI callback list */
70 LIST_HEAD(hci_cb_list);
71 DEFINE_RWLOCK(hci_cb_list_lock);
72 
73 /* HCI protocols */
74 #define HCI_MAX_PROTO	2
75 struct hci_proto *hci_proto[HCI_MAX_PROTO];
76 
77 /* HCI notifiers list */
78 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
79 
80 /* ---- HCI notifications ---- */
81 
82 int hci_register_notifier(struct notifier_block *nb)
83 {
84 	return atomic_notifier_chain_register(&hci_notifier, nb);
85 }
86 
87 int hci_unregister_notifier(struct notifier_block *nb)
88 {
89 	return atomic_notifier_chain_unregister(&hci_notifier, nb);
90 }
91 
92 static void hci_notify(struct hci_dev *hdev, int event)
93 {
94 	atomic_notifier_call_chain(&hci_notifier, event, hdev);
95 }
96 
97 /* ---- HCI requests ---- */
98 
99 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
100 {
101 	BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
102 
103 	/* If this is the init phase check if the completed command matches
104 	 * the last init command, and if not just return.
105 	 */
106 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
107 		return;
108 
109 	if (hdev->req_status == HCI_REQ_PEND) {
110 		hdev->req_result = result;
111 		hdev->req_status = HCI_REQ_DONE;
112 		wake_up_interruptible(&hdev->req_wait_q);
113 	}
114 }
115 
116 static void hci_req_cancel(struct hci_dev *hdev, int err)
117 {
118 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
119 
120 	if (hdev->req_status == HCI_REQ_PEND) {
121 		hdev->req_result = err;
122 		hdev->req_status = HCI_REQ_CANCELED;
123 		wake_up_interruptible(&hdev->req_wait_q);
124 	}
125 }
126 
127 /* Execute request and wait for completion. */
128 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
129 					unsigned long opt, __u32 timeout)
130 {
131 	DECLARE_WAITQUEUE(wait, current);
132 	int err = 0;
133 
134 	BT_DBG("%s start", hdev->name);
135 
136 	hdev->req_status = HCI_REQ_PEND;
137 
138 	add_wait_queue(&hdev->req_wait_q, &wait);
139 	set_current_state(TASK_INTERRUPTIBLE);
140 
141 	req(hdev, opt);
142 	schedule_timeout(timeout);
143 
144 	remove_wait_queue(&hdev->req_wait_q, &wait);
145 
146 	if (signal_pending(current))
147 		return -EINTR;
148 
149 	switch (hdev->req_status) {
150 	case HCI_REQ_DONE:
151 		err = -bt_to_errno(hdev->req_result);
152 		break;
153 
154 	case HCI_REQ_CANCELED:
155 		err = -hdev->req_result;
156 		break;
157 
158 	default:
159 		err = -ETIMEDOUT;
160 		break;
161 	}
162 
163 	hdev->req_status = hdev->req_result = 0;
164 
165 	BT_DBG("%s end: err %d", hdev->name, err);
166 
167 	return err;
168 }
169 
170 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
171 					unsigned long opt, __u32 timeout)
172 {
173 	int ret;
174 
175 	if (!test_bit(HCI_UP, &hdev->flags))
176 		return -ENETDOWN;
177 
178 	/* Serialize all requests */
179 	hci_req_lock(hdev);
180 	ret = __hci_request(hdev, req, opt, timeout);
181 	hci_req_unlock(hdev);
182 
183 	return ret;
184 }
185 
186 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
187 {
188 	BT_DBG("%s %ld", hdev->name, opt);
189 
190 	/* Reset device */
191 	set_bit(HCI_RESET, &hdev->flags);
192 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
193 }
194 
195 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
196 {
197 	struct hci_cp_delete_stored_link_key cp;
198 	struct sk_buff *skb;
199 	__le16 param;
200 	__u8 flt_type;
201 
202 	BT_DBG("%s %ld", hdev->name, opt);
203 
204 	/* Driver initialization */
205 
206 	/* Special commands */
207 	while ((skb = skb_dequeue(&hdev->driver_init))) {
208 		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
209 		skb->dev = (void *) hdev;
210 
211 		skb_queue_tail(&hdev->cmd_q, skb);
212 		tasklet_schedule(&hdev->cmd_task);
213 	}
214 	skb_queue_purge(&hdev->driver_init);
215 
216 	/* Mandatory initialization */
217 
218 	/* Reset */
219 	if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
220 			set_bit(HCI_RESET, &hdev->flags);
221 			hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
222 	}
223 
224 	/* Read Local Supported Features */
225 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
226 
227 	/* Read Local Version */
228 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
229 
230 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
231 	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
232 
233 	/* Read BD Address */
234 	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
235 
236 	/* Read Class of Device */
237 	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
238 
239 	/* Read Local Name */
240 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
241 
242 	/* Read Voice Setting */
243 	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
244 
245 	/* Optional initialization */
246 
247 	/* Clear Event Filters */
248 	flt_type = HCI_FLT_CLEAR_ALL;
249 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
250 
251 	/* Connection accept timeout ~20 secs */
252 	param = cpu_to_le16(0x7d00);
253 	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
254 
255 	bacpy(&cp.bdaddr, BDADDR_ANY);
256 	cp.delete_all = 1;
257 	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
258 }
259 
260 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
261 {
262 	BT_DBG("%s", hdev->name);
263 
264 	/* Read LE buffer size */
265 	hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
266 }
267 
268 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
269 {
270 	__u8 scan = opt;
271 
272 	BT_DBG("%s %x", hdev->name, scan);
273 
274 	/* Inquiry and Page scans */
275 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
276 }
277 
278 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
279 {
280 	__u8 auth = opt;
281 
282 	BT_DBG("%s %x", hdev->name, auth);
283 
284 	/* Authentication */
285 	hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
286 }
287 
288 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
289 {
290 	__u8 encrypt = opt;
291 
292 	BT_DBG("%s %x", hdev->name, encrypt);
293 
294 	/* Encryption */
295 	hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
296 }
297 
298 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
299 {
300 	__le16 policy = cpu_to_le16(opt);
301 
302 	BT_DBG("%s %x", hdev->name, policy);
303 
304 	/* Default link policy */
305 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
306 }
307 
308 /* Get HCI device by index.
309  * Device is held on return. */
310 struct hci_dev *hci_dev_get(int index)
311 {
312 	struct hci_dev *hdev = NULL, *d;
313 
314 	BT_DBG("%d", index);
315 
316 	if (index < 0)
317 		return NULL;
318 
319 	read_lock(&hci_dev_list_lock);
320 	list_for_each_entry(d, &hci_dev_list, list) {
321 		if (d->id == index) {
322 			hdev = hci_dev_hold(d);
323 			break;
324 		}
325 	}
326 	read_unlock(&hci_dev_list_lock);
327 	return hdev;
328 }
329 
330 /* ---- Inquiry support ---- */
331 static void inquiry_cache_flush(struct hci_dev *hdev)
332 {
333 	struct inquiry_cache *cache = &hdev->inq_cache;
334 	struct inquiry_entry *next  = cache->list, *e;
335 
336 	BT_DBG("cache %p", cache);
337 
338 	cache->list = NULL;
339 	while ((e = next)) {
340 		next = e->next;
341 		kfree(e);
342 	}
343 }
344 
345 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346 {
347 	struct inquiry_cache *cache = &hdev->inq_cache;
348 	struct inquiry_entry *e;
349 
350 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351 
352 	for (e = cache->list; e; e = e->next)
353 		if (!bacmp(&e->data.bdaddr, bdaddr))
354 			break;
355 	return e;
356 }
357 
358 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359 {
360 	struct inquiry_cache *cache = &hdev->inq_cache;
361 	struct inquiry_entry *ie;
362 
363 	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364 
365 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366 	if (!ie) {
367 		/* Entry not in the cache. Add new one. */
368 		ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369 		if (!ie)
370 			return;
371 
372 		ie->next = cache->list;
373 		cache->list = ie;
374 	}
375 
376 	memcpy(&ie->data, data, sizeof(*data));
377 	ie->timestamp = jiffies;
378 	cache->timestamp = jiffies;
379 }
380 
381 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382 {
383 	struct inquiry_cache *cache = &hdev->inq_cache;
384 	struct inquiry_info *info = (struct inquiry_info *) buf;
385 	struct inquiry_entry *e;
386 	int copied = 0;
387 
388 	for (e = cache->list; e && copied < num; e = e->next, copied++) {
389 		struct inquiry_data *data = &e->data;
390 		bacpy(&info->bdaddr, &data->bdaddr);
391 		info->pscan_rep_mode	= data->pscan_rep_mode;
392 		info->pscan_period_mode	= data->pscan_period_mode;
393 		info->pscan_mode	= data->pscan_mode;
394 		memcpy(info->dev_class, data->dev_class, 3);
395 		info->clock_offset	= data->clock_offset;
396 		info++;
397 	}
398 
399 	BT_DBG("cache %p, copied %d", cache, copied);
400 	return copied;
401 }
402 
403 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404 {
405 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406 	struct hci_cp_inquiry cp;
407 
408 	BT_DBG("%s", hdev->name);
409 
410 	if (test_bit(HCI_INQUIRY, &hdev->flags))
411 		return;
412 
413 	/* Start Inquiry */
414 	memcpy(&cp.lap, &ir->lap, 3);
415 	cp.length  = ir->length;
416 	cp.num_rsp = ir->num_rsp;
417 	hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
418 }
419 
420 int hci_inquiry(void __user *arg)
421 {
422 	__u8 __user *ptr = arg;
423 	struct hci_inquiry_req ir;
424 	struct hci_dev *hdev;
425 	int err = 0, do_inquiry = 0, max_rsp;
426 	long timeo;
427 	__u8 *buf;
428 
429 	if (copy_from_user(&ir, ptr, sizeof(ir)))
430 		return -EFAULT;
431 
432 	hdev = hci_dev_get(ir.dev_id);
433 	if (!hdev)
434 		return -ENODEV;
435 
436 	hci_dev_lock_bh(hdev);
437 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
438 				inquiry_cache_empty(hdev) ||
439 				ir.flags & IREQ_CACHE_FLUSH) {
440 		inquiry_cache_flush(hdev);
441 		do_inquiry = 1;
442 	}
443 	hci_dev_unlock_bh(hdev);
444 
445 	timeo = ir.length * msecs_to_jiffies(2000);
446 
447 	if (do_inquiry) {
448 		err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
449 		if (err < 0)
450 			goto done;
451 	}
452 
453 	/* for unlimited number of responses we will use buffer with 255 entries */
454 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
455 
456 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
457 	 * copy it to the user space.
458 	 */
459 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
460 	if (!buf) {
461 		err = -ENOMEM;
462 		goto done;
463 	}
464 
465 	hci_dev_lock_bh(hdev);
466 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
467 	hci_dev_unlock_bh(hdev);
468 
469 	BT_DBG("num_rsp %d", ir.num_rsp);
470 
471 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
472 		ptr += sizeof(ir);
473 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
474 					ir.num_rsp))
475 			err = -EFAULT;
476 	} else
477 		err = -EFAULT;
478 
479 	kfree(buf);
480 
481 done:
482 	hci_dev_put(hdev);
483 	return err;
484 }
485 
486 /* ---- HCI ioctl helpers ---- */
487 
488 int hci_dev_open(__u16 dev)
489 {
490 	struct hci_dev *hdev;
491 	int ret = 0;
492 
493 	hdev = hci_dev_get(dev);
494 	if (!hdev)
495 		return -ENODEV;
496 
497 	BT_DBG("%s %p", hdev->name, hdev);
498 
499 	hci_req_lock(hdev);
500 
501 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
502 		ret = -ERFKILL;
503 		goto done;
504 	}
505 
506 	if (test_bit(HCI_UP, &hdev->flags)) {
507 		ret = -EALREADY;
508 		goto done;
509 	}
510 
511 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 		set_bit(HCI_RAW, &hdev->flags);
513 
514 	/* Treat all non BR/EDR controllers as raw devices if
515 	   enable_hs is not set */
516 	if (hdev->dev_type != HCI_BREDR && !enable_hs)
517 		set_bit(HCI_RAW, &hdev->flags);
518 
519 	if (hdev->open(hdev)) {
520 		ret = -EIO;
521 		goto done;
522 	}
523 
524 	if (!test_bit(HCI_RAW, &hdev->flags)) {
525 		atomic_set(&hdev->cmd_cnt, 1);
526 		set_bit(HCI_INIT, &hdev->flags);
527 		hdev->init_last_cmd = 0;
528 
529 		ret = __hci_request(hdev, hci_init_req, 0,
530 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
531 
532 		if (lmp_host_le_capable(hdev))
533 			ret = __hci_request(hdev, hci_le_init_req, 0,
534 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
535 
536 		clear_bit(HCI_INIT, &hdev->flags);
537 	}
538 
539 	if (!ret) {
540 		hci_dev_hold(hdev);
541 		set_bit(HCI_UP, &hdev->flags);
542 		hci_notify(hdev, HCI_DEV_UP);
543 		if (!test_bit(HCI_SETUP, &hdev->flags)) {
544 			hci_dev_lock_bh(hdev);
545 			mgmt_powered(hdev, 1);
546 			hci_dev_unlock_bh(hdev);
547 		}
548 	} else {
549 		/* Init failed, cleanup */
550 		tasklet_kill(&hdev->rx_task);
551 		tasklet_kill(&hdev->tx_task);
552 		tasklet_kill(&hdev->cmd_task);
553 
554 		skb_queue_purge(&hdev->cmd_q);
555 		skb_queue_purge(&hdev->rx_q);
556 
557 		if (hdev->flush)
558 			hdev->flush(hdev);
559 
560 		if (hdev->sent_cmd) {
561 			kfree_skb(hdev->sent_cmd);
562 			hdev->sent_cmd = NULL;
563 		}
564 
565 		hdev->close(hdev);
566 		hdev->flags = 0;
567 	}
568 
569 done:
570 	hci_req_unlock(hdev);
571 	hci_dev_put(hdev);
572 	return ret;
573 }
574 
575 static int hci_dev_do_close(struct hci_dev *hdev)
576 {
577 	BT_DBG("%s %p", hdev->name, hdev);
578 
579 	hci_req_cancel(hdev, ENODEV);
580 	hci_req_lock(hdev);
581 
582 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
583 		del_timer_sync(&hdev->cmd_timer);
584 		hci_req_unlock(hdev);
585 		return 0;
586 	}
587 
588 	/* Kill RX and TX tasks */
589 	tasklet_kill(&hdev->rx_task);
590 	tasklet_kill(&hdev->tx_task);
591 
592 	if (hdev->discov_timeout > 0) {
593 		cancel_delayed_work(&hdev->discov_off);
594 		hdev->discov_timeout = 0;
595 	}
596 
597 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
598 		cancel_delayed_work(&hdev->power_off);
599 
600 	hci_dev_lock_bh(hdev);
601 	inquiry_cache_flush(hdev);
602 	hci_conn_hash_flush(hdev);
603 	hci_dev_unlock_bh(hdev);
604 
605 	hci_notify(hdev, HCI_DEV_DOWN);
606 
607 	if (hdev->flush)
608 		hdev->flush(hdev);
609 
610 	/* Reset device */
611 	skb_queue_purge(&hdev->cmd_q);
612 	atomic_set(&hdev->cmd_cnt, 1);
613 	if (!test_bit(HCI_RAW, &hdev->flags)) {
614 		set_bit(HCI_INIT, &hdev->flags);
615 		__hci_request(hdev, hci_reset_req, 0,
616 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
617 		clear_bit(HCI_INIT, &hdev->flags);
618 	}
619 
620 	/* Kill cmd task */
621 	tasklet_kill(&hdev->cmd_task);
622 
623 	/* Drop queues */
624 	skb_queue_purge(&hdev->rx_q);
625 	skb_queue_purge(&hdev->cmd_q);
626 	skb_queue_purge(&hdev->raw_q);
627 
628 	/* Drop last sent command */
629 	if (hdev->sent_cmd) {
630 		del_timer_sync(&hdev->cmd_timer);
631 		kfree_skb(hdev->sent_cmd);
632 		hdev->sent_cmd = NULL;
633 	}
634 
635 	/* After this point our queues are empty
636 	 * and no tasks are scheduled. */
637 	hdev->close(hdev);
638 
639 	hci_dev_lock_bh(hdev);
640 	mgmt_powered(hdev, 0);
641 	hci_dev_unlock_bh(hdev);
642 
643 	/* Clear flags */
644 	hdev->flags = 0;
645 
646 	hci_req_unlock(hdev);
647 
648 	hci_dev_put(hdev);
649 	return 0;
650 }
651 
652 int hci_dev_close(__u16 dev)
653 {
654 	struct hci_dev *hdev;
655 	int err;
656 
657 	hdev = hci_dev_get(dev);
658 	if (!hdev)
659 		return -ENODEV;
660 	err = hci_dev_do_close(hdev);
661 	hci_dev_put(hdev);
662 	return err;
663 }
664 
665 int hci_dev_reset(__u16 dev)
666 {
667 	struct hci_dev *hdev;
668 	int ret = 0;
669 
670 	hdev = hci_dev_get(dev);
671 	if (!hdev)
672 		return -ENODEV;
673 
674 	hci_req_lock(hdev);
675 	tasklet_disable(&hdev->tx_task);
676 
677 	if (!test_bit(HCI_UP, &hdev->flags))
678 		goto done;
679 
680 	/* Drop queues */
681 	skb_queue_purge(&hdev->rx_q);
682 	skb_queue_purge(&hdev->cmd_q);
683 
684 	hci_dev_lock_bh(hdev);
685 	inquiry_cache_flush(hdev);
686 	hci_conn_hash_flush(hdev);
687 	hci_dev_unlock_bh(hdev);
688 
689 	if (hdev->flush)
690 		hdev->flush(hdev);
691 
692 	atomic_set(&hdev->cmd_cnt, 1);
693 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
694 
695 	if (!test_bit(HCI_RAW, &hdev->flags))
696 		ret = __hci_request(hdev, hci_reset_req, 0,
697 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
698 
699 done:
700 	tasklet_enable(&hdev->tx_task);
701 	hci_req_unlock(hdev);
702 	hci_dev_put(hdev);
703 	return ret;
704 }
705 
706 int hci_dev_reset_stat(__u16 dev)
707 {
708 	struct hci_dev *hdev;
709 	int ret = 0;
710 
711 	hdev = hci_dev_get(dev);
712 	if (!hdev)
713 		return -ENODEV;
714 
715 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
716 
717 	hci_dev_put(hdev);
718 
719 	return ret;
720 }
721 
722 int hci_dev_cmd(unsigned int cmd, void __user *arg)
723 {
724 	struct hci_dev *hdev;
725 	struct hci_dev_req dr;
726 	int err = 0;
727 
728 	if (copy_from_user(&dr, arg, sizeof(dr)))
729 		return -EFAULT;
730 
731 	hdev = hci_dev_get(dr.dev_id);
732 	if (!hdev)
733 		return -ENODEV;
734 
735 	switch (cmd) {
736 	case HCISETAUTH:
737 		err = hci_request(hdev, hci_auth_req, dr.dev_opt,
738 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
739 		break;
740 
741 	case HCISETENCRYPT:
742 		if (!lmp_encrypt_capable(hdev)) {
743 			err = -EOPNOTSUPP;
744 			break;
745 		}
746 
747 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
748 			/* Auth must be enabled first */
749 			err = hci_request(hdev, hci_auth_req, dr.dev_opt,
750 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
751 			if (err)
752 				break;
753 		}
754 
755 		err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
756 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
757 		break;
758 
759 	case HCISETSCAN:
760 		err = hci_request(hdev, hci_scan_req, dr.dev_opt,
761 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
762 		break;
763 
764 	case HCISETLINKPOL:
765 		err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
766 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
767 		break;
768 
769 	case HCISETLINKMODE:
770 		hdev->link_mode = ((__u16) dr.dev_opt) &
771 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
772 		break;
773 
774 	case HCISETPTYPE:
775 		hdev->pkt_type = (__u16) dr.dev_opt;
776 		break;
777 
778 	case HCISETACLMTU:
779 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
780 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
781 		break;
782 
783 	case HCISETSCOMTU:
784 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
785 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
786 		break;
787 
788 	default:
789 		err = -EINVAL;
790 		break;
791 	}
792 
793 	hci_dev_put(hdev);
794 	return err;
795 }
796 
797 int hci_get_dev_list(void __user *arg)
798 {
799 	struct hci_dev *hdev;
800 	struct hci_dev_list_req *dl;
801 	struct hci_dev_req *dr;
802 	int n = 0, size, err;
803 	__u16 dev_num;
804 
805 	if (get_user(dev_num, (__u16 __user *) arg))
806 		return -EFAULT;
807 
808 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
809 		return -EINVAL;
810 
811 	size = sizeof(*dl) + dev_num * sizeof(*dr);
812 
813 	dl = kzalloc(size, GFP_KERNEL);
814 	if (!dl)
815 		return -ENOMEM;
816 
817 	dr = dl->dev_req;
818 
819 	read_lock_bh(&hci_dev_list_lock);
820 	list_for_each_entry(hdev, &hci_dev_list, list) {
821 		if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
822 			cancel_delayed_work(&hdev->power_off);
823 
824 		if (!test_bit(HCI_MGMT, &hdev->flags))
825 			set_bit(HCI_PAIRABLE, &hdev->flags);
826 
827 		(dr + n)->dev_id  = hdev->id;
828 		(dr + n)->dev_opt = hdev->flags;
829 
830 		if (++n >= dev_num)
831 			break;
832 	}
833 	read_unlock_bh(&hci_dev_list_lock);
834 
835 	dl->dev_num = n;
836 	size = sizeof(*dl) + n * sizeof(*dr);
837 
838 	err = copy_to_user(arg, dl, size);
839 	kfree(dl);
840 
841 	return err ? -EFAULT : 0;
842 }
843 
844 int hci_get_dev_info(void __user *arg)
845 {
846 	struct hci_dev *hdev;
847 	struct hci_dev_info di;
848 	int err = 0;
849 
850 	if (copy_from_user(&di, arg, sizeof(di)))
851 		return -EFAULT;
852 
853 	hdev = hci_dev_get(di.dev_id);
854 	if (!hdev)
855 		return -ENODEV;
856 
857 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
858 		cancel_delayed_work_sync(&hdev->power_off);
859 
860 	if (!test_bit(HCI_MGMT, &hdev->flags))
861 		set_bit(HCI_PAIRABLE, &hdev->flags);
862 
863 	strcpy(di.name, hdev->name);
864 	di.bdaddr   = hdev->bdaddr;
865 	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
866 	di.flags    = hdev->flags;
867 	di.pkt_type = hdev->pkt_type;
868 	di.acl_mtu  = hdev->acl_mtu;
869 	di.acl_pkts = hdev->acl_pkts;
870 	di.sco_mtu  = hdev->sco_mtu;
871 	di.sco_pkts = hdev->sco_pkts;
872 	di.link_policy = hdev->link_policy;
873 	di.link_mode   = hdev->link_mode;
874 
875 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876 	memcpy(&di.features, &hdev->features, sizeof(di.features));
877 
878 	if (copy_to_user(arg, &di, sizeof(di)))
879 		err = -EFAULT;
880 
881 	hci_dev_put(hdev);
882 
883 	return err;
884 }
885 
886 /* ---- Interface to HCI drivers ---- */
887 
888 static int hci_rfkill_set_block(void *data, bool blocked)
889 {
890 	struct hci_dev *hdev = data;
891 
892 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
893 
894 	if (!blocked)
895 		return 0;
896 
897 	hci_dev_do_close(hdev);
898 
899 	return 0;
900 }
901 
902 static const struct rfkill_ops hci_rfkill_ops = {
903 	.set_block = hci_rfkill_set_block,
904 };
905 
906 /* Alloc HCI device */
907 struct hci_dev *hci_alloc_dev(void)
908 {
909 	struct hci_dev *hdev;
910 
911 	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
912 	if (!hdev)
913 		return NULL;
914 
915 	hci_init_sysfs(hdev);
916 	skb_queue_head_init(&hdev->driver_init);
917 
918 	return hdev;
919 }
920 EXPORT_SYMBOL(hci_alloc_dev);
921 
922 /* Free HCI device */
923 void hci_free_dev(struct hci_dev *hdev)
924 {
925 	skb_queue_purge(&hdev->driver_init);
926 
927 	/* will free via device release */
928 	put_device(&hdev->dev);
929 }
930 EXPORT_SYMBOL(hci_free_dev);
931 
932 static void hci_power_on(struct work_struct *work)
933 {
934 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
935 
936 	BT_DBG("%s", hdev->name);
937 
938 	if (hci_dev_open(hdev->id) < 0)
939 		return;
940 
941 	if (test_bit(HCI_AUTO_OFF, &hdev->flags))
942 		queue_delayed_work(hdev->workqueue, &hdev->power_off,
943 					msecs_to_jiffies(AUTO_OFF_TIMEOUT));
944 
945 	if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
946 		mgmt_index_added(hdev);
947 }
948 
949 static void hci_power_off(struct work_struct *work)
950 {
951 	struct hci_dev *hdev = container_of(work, struct hci_dev,
952 							power_off.work);
953 
954 	BT_DBG("%s", hdev->name);
955 
956 	clear_bit(HCI_AUTO_OFF, &hdev->flags);
957 
958 	hci_dev_close(hdev->id);
959 }
960 
961 static void hci_discov_off(struct work_struct *work)
962 {
963 	struct hci_dev *hdev;
964 	u8 scan = SCAN_PAGE;
965 
966 	hdev = container_of(work, struct hci_dev, discov_off.work);
967 
968 	BT_DBG("%s", hdev->name);
969 
970 	hci_dev_lock_bh(hdev);
971 
972 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
973 
974 	hdev->discov_timeout = 0;
975 
976 	hci_dev_unlock_bh(hdev);
977 }
978 
979 int hci_uuids_clear(struct hci_dev *hdev)
980 {
981 	struct list_head *p, *n;
982 
983 	list_for_each_safe(p, n, &hdev->uuids) {
984 		struct bt_uuid *uuid;
985 
986 		uuid = list_entry(p, struct bt_uuid, list);
987 
988 		list_del(p);
989 		kfree(uuid);
990 	}
991 
992 	return 0;
993 }
994 
995 int hci_link_keys_clear(struct hci_dev *hdev)
996 {
997 	struct list_head *p, *n;
998 
999 	list_for_each_safe(p, n, &hdev->link_keys) {
1000 		struct link_key *key;
1001 
1002 		key = list_entry(p, struct link_key, list);
1003 
1004 		list_del(p);
1005 		kfree(key);
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1012 {
1013 	struct link_key *k;
1014 
1015 	list_for_each_entry(k, &hdev->link_keys, list)
1016 		if (bacmp(bdaddr, &k->bdaddr) == 0)
1017 			return k;
1018 
1019 	return NULL;
1020 }
1021 
1022 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1023 						u8 key_type, u8 old_key_type)
1024 {
1025 	/* Legacy key */
1026 	if (key_type < 0x03)
1027 		return 1;
1028 
1029 	/* Debug keys are insecure so don't store them persistently */
1030 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1031 		return 0;
1032 
1033 	/* Changed combination key and there's no previous one */
1034 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1035 		return 0;
1036 
1037 	/* Security mode 3 case */
1038 	if (!conn)
1039 		return 1;
1040 
1041 	/* Neither local nor remote side had no-bonding as requirement */
1042 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1043 		return 1;
1044 
1045 	/* Local side had dedicated bonding as requirement */
1046 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1047 		return 1;
1048 
1049 	/* Remote side had dedicated bonding as requirement */
1050 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1051 		return 1;
1052 
1053 	/* If none of the above criteria match, then don't store the key
1054 	 * persistently */
1055 	return 0;
1056 }
1057 
1058 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1059 {
1060 	struct link_key *k;
1061 
1062 	list_for_each_entry(k, &hdev->link_keys, list) {
1063 		struct key_master_id *id;
1064 
1065 		if (k->type != HCI_LK_SMP_LTK)
1066 			continue;
1067 
1068 		if (k->dlen != sizeof(*id))
1069 			continue;
1070 
1071 		id = (void *) &k->data;
1072 		if (id->ediv == ediv &&
1073 				(memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1074 			return k;
1075 	}
1076 
1077 	return NULL;
1078 }
1079 EXPORT_SYMBOL(hci_find_ltk);
1080 
1081 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1082 					bdaddr_t *bdaddr, u8 type)
1083 {
1084 	struct link_key *k;
1085 
1086 	list_for_each_entry(k, &hdev->link_keys, list)
1087 		if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1088 			return k;
1089 
1090 	return NULL;
1091 }
1092 EXPORT_SYMBOL(hci_find_link_key_type);
1093 
1094 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1095 				bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1096 {
1097 	struct link_key *key, *old_key;
1098 	u8 old_key_type, persistent;
1099 
1100 	old_key = hci_find_link_key(hdev, bdaddr);
1101 	if (old_key) {
1102 		old_key_type = old_key->type;
1103 		key = old_key;
1104 	} else {
1105 		old_key_type = conn ? conn->key_type : 0xff;
1106 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1107 		if (!key)
1108 			return -ENOMEM;
1109 		list_add(&key->list, &hdev->link_keys);
1110 	}
1111 
1112 	BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1113 
1114 	/* Some buggy controller combinations generate a changed
1115 	 * combination key for legacy pairing even when there's no
1116 	 * previous key */
1117 	if (type == HCI_LK_CHANGED_COMBINATION &&
1118 					(!conn || conn->remote_auth == 0xff) &&
1119 					old_key_type == 0xff) {
1120 		type = HCI_LK_COMBINATION;
1121 		if (conn)
1122 			conn->key_type = type;
1123 	}
1124 
1125 	bacpy(&key->bdaddr, bdaddr);
1126 	memcpy(key->val, val, 16);
1127 	key->pin_len = pin_len;
1128 
1129 	if (type == HCI_LK_CHANGED_COMBINATION)
1130 		key->type = old_key_type;
1131 	else
1132 		key->type = type;
1133 
1134 	if (!new_key)
1135 		return 0;
1136 
1137 	persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1138 
1139 	mgmt_new_link_key(hdev, key, persistent);
1140 
1141 	if (!persistent) {
1142 		list_del(&key->list);
1143 		kfree(key);
1144 	}
1145 
1146 	return 0;
1147 }
1148 
1149 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1150 			u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1151 {
1152 	struct link_key *key, *old_key;
1153 	struct key_master_id *id;
1154 	u8 old_key_type;
1155 
1156 	BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1157 
1158 	old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1159 	if (old_key) {
1160 		key = old_key;
1161 		old_key_type = old_key->type;
1162 	} else {
1163 		key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1164 		if (!key)
1165 			return -ENOMEM;
1166 		list_add(&key->list, &hdev->link_keys);
1167 		old_key_type = 0xff;
1168 	}
1169 
1170 	key->dlen = sizeof(*id);
1171 
1172 	bacpy(&key->bdaddr, bdaddr);
1173 	memcpy(key->val, ltk, sizeof(key->val));
1174 	key->type = HCI_LK_SMP_LTK;
1175 	key->pin_len = key_size;
1176 
1177 	id = (void *) &key->data;
1178 	id->ediv = ediv;
1179 	memcpy(id->rand, rand, sizeof(id->rand));
1180 
1181 	if (new_key)
1182 		mgmt_new_link_key(hdev, key, old_key_type);
1183 
1184 	return 0;
1185 }
1186 
1187 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1188 {
1189 	struct link_key *key;
1190 
1191 	key = hci_find_link_key(hdev, bdaddr);
1192 	if (!key)
1193 		return -ENOENT;
1194 
1195 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1196 
1197 	list_del(&key->list);
1198 	kfree(key);
1199 
1200 	return 0;
1201 }
1202 
1203 /* HCI command timer function */
1204 static void hci_cmd_timer(unsigned long arg)
1205 {
1206 	struct hci_dev *hdev = (void *) arg;
1207 
1208 	BT_ERR("%s command tx timeout", hdev->name);
1209 	atomic_set(&hdev->cmd_cnt, 1);
1210 	tasklet_schedule(&hdev->cmd_task);
1211 }
1212 
1213 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1214 							bdaddr_t *bdaddr)
1215 {
1216 	struct oob_data *data;
1217 
1218 	list_for_each_entry(data, &hdev->remote_oob_data, list)
1219 		if (bacmp(bdaddr, &data->bdaddr) == 0)
1220 			return data;
1221 
1222 	return NULL;
1223 }
1224 
1225 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1226 {
1227 	struct oob_data *data;
1228 
1229 	data = hci_find_remote_oob_data(hdev, bdaddr);
1230 	if (!data)
1231 		return -ENOENT;
1232 
1233 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1234 
1235 	list_del(&data->list);
1236 	kfree(data);
1237 
1238 	return 0;
1239 }
1240 
1241 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1242 {
1243 	struct oob_data *data, *n;
1244 
1245 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1246 		list_del(&data->list);
1247 		kfree(data);
1248 	}
1249 
1250 	return 0;
1251 }
1252 
1253 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1254 								u8 *randomizer)
1255 {
1256 	struct oob_data *data;
1257 
1258 	data = hci_find_remote_oob_data(hdev, bdaddr);
1259 
1260 	if (!data) {
1261 		data = kmalloc(sizeof(*data), GFP_ATOMIC);
1262 		if (!data)
1263 			return -ENOMEM;
1264 
1265 		bacpy(&data->bdaddr, bdaddr);
1266 		list_add(&data->list, &hdev->remote_oob_data);
1267 	}
1268 
1269 	memcpy(data->hash, hash, sizeof(data->hash));
1270 	memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1271 
1272 	BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1273 
1274 	return 0;
1275 }
1276 
1277 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1278 						bdaddr_t *bdaddr)
1279 {
1280 	struct bdaddr_list *b;
1281 
1282 	list_for_each_entry(b, &hdev->blacklist, list)
1283 		if (bacmp(bdaddr, &b->bdaddr) == 0)
1284 			return b;
1285 
1286 	return NULL;
1287 }
1288 
1289 int hci_blacklist_clear(struct hci_dev *hdev)
1290 {
1291 	struct list_head *p, *n;
1292 
1293 	list_for_each_safe(p, n, &hdev->blacklist) {
1294 		struct bdaddr_list *b;
1295 
1296 		b = list_entry(p, struct bdaddr_list, list);
1297 
1298 		list_del(p);
1299 		kfree(b);
1300 	}
1301 
1302 	return 0;
1303 }
1304 
1305 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1306 {
1307 	struct bdaddr_list *entry;
1308 
1309 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1310 		return -EBADF;
1311 
1312 	if (hci_blacklist_lookup(hdev, bdaddr))
1313 		return -EEXIST;
1314 
1315 	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1316 	if (!entry)
1317 		return -ENOMEM;
1318 
1319 	bacpy(&entry->bdaddr, bdaddr);
1320 
1321 	list_add(&entry->list, &hdev->blacklist);
1322 
1323 	return mgmt_device_blocked(hdev, bdaddr);
1324 }
1325 
1326 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1327 {
1328 	struct bdaddr_list *entry;
1329 
1330 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1331 		return hci_blacklist_clear(hdev);
1332 
1333 	entry = hci_blacklist_lookup(hdev, bdaddr);
1334 	if (!entry)
1335 		return -ENOENT;
1336 
1337 	list_del(&entry->list);
1338 	kfree(entry);
1339 
1340 	return mgmt_device_unblocked(hdev, bdaddr);
1341 }
1342 
1343 static void hci_clear_adv_cache(unsigned long arg)
1344 {
1345 	struct hci_dev *hdev = (void *) arg;
1346 
1347 	hci_dev_lock(hdev);
1348 
1349 	hci_adv_entries_clear(hdev);
1350 
1351 	hci_dev_unlock(hdev);
1352 }
1353 
1354 int hci_adv_entries_clear(struct hci_dev *hdev)
1355 {
1356 	struct adv_entry *entry, *tmp;
1357 
1358 	list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1359 		list_del(&entry->list);
1360 		kfree(entry);
1361 	}
1362 
1363 	BT_DBG("%s adv cache cleared", hdev->name);
1364 
1365 	return 0;
1366 }
1367 
1368 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1369 {
1370 	struct adv_entry *entry;
1371 
1372 	list_for_each_entry(entry, &hdev->adv_entries, list)
1373 		if (bacmp(bdaddr, &entry->bdaddr) == 0)
1374 			return entry;
1375 
1376 	return NULL;
1377 }
1378 
1379 static inline int is_connectable_adv(u8 evt_type)
1380 {
1381 	if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1382 		return 1;
1383 
1384 	return 0;
1385 }
1386 
1387 int hci_add_adv_entry(struct hci_dev *hdev,
1388 					struct hci_ev_le_advertising_info *ev)
1389 {
1390 	struct adv_entry *entry;
1391 
1392 	if (!is_connectable_adv(ev->evt_type))
1393 		return -EINVAL;
1394 
1395 	/* Only new entries should be added to adv_entries. So, if
1396 	 * bdaddr was found, don't add it. */
1397 	if (hci_find_adv_entry(hdev, &ev->bdaddr))
1398 		return 0;
1399 
1400 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1401 	if (!entry)
1402 		return -ENOMEM;
1403 
1404 	bacpy(&entry->bdaddr, &ev->bdaddr);
1405 	entry->bdaddr_type = ev->bdaddr_type;
1406 
1407 	list_add(&entry->list, &hdev->adv_entries);
1408 
1409 	BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1410 				batostr(&entry->bdaddr), entry->bdaddr_type);
1411 
1412 	return 0;
1413 }
1414 
1415 /* Register HCI device */
1416 int hci_register_dev(struct hci_dev *hdev)
1417 {
1418 	struct list_head *head = &hci_dev_list, *p;
1419 	int i, id, error;
1420 
1421 	BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1422 						hdev->bus, hdev->owner);
1423 
1424 	if (!hdev->open || !hdev->close || !hdev->destruct)
1425 		return -EINVAL;
1426 
1427 	/* Do not allow HCI_AMP devices to register at index 0,
1428 	 * so the index can be used as the AMP controller ID.
1429 	 */
1430 	id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1431 
1432 	write_lock_bh(&hci_dev_list_lock);
1433 
1434 	/* Find first available device id */
1435 	list_for_each(p, &hci_dev_list) {
1436 		if (list_entry(p, struct hci_dev, list)->id != id)
1437 			break;
1438 		head = p; id++;
1439 	}
1440 
1441 	sprintf(hdev->name, "hci%d", id);
1442 	hdev->id = id;
1443 	list_add_tail(&hdev->list, head);
1444 
1445 	atomic_set(&hdev->refcnt, 1);
1446 	spin_lock_init(&hdev->lock);
1447 
1448 	hdev->flags = 0;
1449 	hdev->dev_flags = 0;
1450 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1451 	hdev->esco_type = (ESCO_HV1);
1452 	hdev->link_mode = (HCI_LM_ACCEPT);
1453 	hdev->io_capability = 0x03; /* No Input No Output */
1454 
1455 	hdev->idle_timeout = 0;
1456 	hdev->sniff_max_interval = 800;
1457 	hdev->sniff_min_interval = 80;
1458 
1459 	tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1460 	tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1461 	tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1462 
1463 	skb_queue_head_init(&hdev->rx_q);
1464 	skb_queue_head_init(&hdev->cmd_q);
1465 	skb_queue_head_init(&hdev->raw_q);
1466 
1467 	setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1468 
1469 	for (i = 0; i < NUM_REASSEMBLY; i++)
1470 		hdev->reassembly[i] = NULL;
1471 
1472 	init_waitqueue_head(&hdev->req_wait_q);
1473 	mutex_init(&hdev->req_lock);
1474 
1475 	inquiry_cache_init(hdev);
1476 
1477 	hci_conn_hash_init(hdev);
1478 
1479 	INIT_LIST_HEAD(&hdev->mgmt_pending);
1480 
1481 	INIT_LIST_HEAD(&hdev->blacklist);
1482 
1483 	INIT_LIST_HEAD(&hdev->uuids);
1484 
1485 	INIT_LIST_HEAD(&hdev->link_keys);
1486 
1487 	INIT_LIST_HEAD(&hdev->remote_oob_data);
1488 
1489 	INIT_LIST_HEAD(&hdev->adv_entries);
1490 	setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1491 						(unsigned long) hdev);
1492 
1493 	INIT_WORK(&hdev->power_on, hci_power_on);
1494 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1495 
1496 	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1497 
1498 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1499 
1500 	atomic_set(&hdev->promisc, 0);
1501 
1502 	write_unlock_bh(&hci_dev_list_lock);
1503 
1504 	hdev->workqueue = create_singlethread_workqueue(hdev->name);
1505 	if (!hdev->workqueue) {
1506 		error = -ENOMEM;
1507 		goto err;
1508 	}
1509 
1510 	error = hci_add_sysfs(hdev);
1511 	if (error < 0)
1512 		goto err_wqueue;
1513 
1514 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1515 				RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1516 	if (hdev->rfkill) {
1517 		if (rfkill_register(hdev->rfkill) < 0) {
1518 			rfkill_destroy(hdev->rfkill);
1519 			hdev->rfkill = NULL;
1520 		}
1521 	}
1522 
1523 	set_bit(HCI_AUTO_OFF, &hdev->flags);
1524 	set_bit(HCI_SETUP, &hdev->flags);
1525 	queue_work(hdev->workqueue, &hdev->power_on);
1526 
1527 	hci_notify(hdev, HCI_DEV_REG);
1528 
1529 	return id;
1530 
1531 err_wqueue:
1532 	destroy_workqueue(hdev->workqueue);
1533 err:
1534 	write_lock_bh(&hci_dev_list_lock);
1535 	list_del(&hdev->list);
1536 	write_unlock_bh(&hci_dev_list_lock);
1537 
1538 	return error;
1539 }
1540 EXPORT_SYMBOL(hci_register_dev);
1541 
1542 /* Unregister HCI device */
1543 void hci_unregister_dev(struct hci_dev *hdev)
1544 {
1545 	int i;
1546 
1547 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1548 
1549 	write_lock_bh(&hci_dev_list_lock);
1550 	list_del(&hdev->list);
1551 	write_unlock_bh(&hci_dev_list_lock);
1552 
1553 	hci_dev_do_close(hdev);
1554 
1555 	for (i = 0; i < NUM_REASSEMBLY; i++)
1556 		kfree_skb(hdev->reassembly[i]);
1557 
1558 	if (!test_bit(HCI_INIT, &hdev->flags) &&
1559 					!test_bit(HCI_SETUP, &hdev->flags)) {
1560 		hci_dev_lock_bh(hdev);
1561 		mgmt_index_removed(hdev);
1562 		hci_dev_unlock_bh(hdev);
1563 	}
1564 
1565 	/* mgmt_index_removed should take care of emptying the
1566 	 * pending list */
1567 	BUG_ON(!list_empty(&hdev->mgmt_pending));
1568 
1569 	hci_notify(hdev, HCI_DEV_UNREG);
1570 
1571 	if (hdev->rfkill) {
1572 		rfkill_unregister(hdev->rfkill);
1573 		rfkill_destroy(hdev->rfkill);
1574 	}
1575 
1576 	hci_del_sysfs(hdev);
1577 
1578 	del_timer(&hdev->adv_timer);
1579 
1580 	destroy_workqueue(hdev->workqueue);
1581 
1582 	hci_dev_lock_bh(hdev);
1583 	hci_blacklist_clear(hdev);
1584 	hci_uuids_clear(hdev);
1585 	hci_link_keys_clear(hdev);
1586 	hci_remote_oob_data_clear(hdev);
1587 	hci_adv_entries_clear(hdev);
1588 	hci_dev_unlock_bh(hdev);
1589 
1590 	__hci_dev_put(hdev);
1591 }
1592 EXPORT_SYMBOL(hci_unregister_dev);
1593 
1594 /* Suspend HCI device */
1595 int hci_suspend_dev(struct hci_dev *hdev)
1596 {
1597 	hci_notify(hdev, HCI_DEV_SUSPEND);
1598 	return 0;
1599 }
1600 EXPORT_SYMBOL(hci_suspend_dev);
1601 
1602 /* Resume HCI device */
1603 int hci_resume_dev(struct hci_dev *hdev)
1604 {
1605 	hci_notify(hdev, HCI_DEV_RESUME);
1606 	return 0;
1607 }
1608 EXPORT_SYMBOL(hci_resume_dev);
1609 
1610 /* Receive frame from HCI drivers */
1611 int hci_recv_frame(struct sk_buff *skb)
1612 {
1613 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1614 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1615 				&& !test_bit(HCI_INIT, &hdev->flags))) {
1616 		kfree_skb(skb);
1617 		return -ENXIO;
1618 	}
1619 
1620 	/* Incomming skb */
1621 	bt_cb(skb)->incoming = 1;
1622 
1623 	/* Time stamp */
1624 	__net_timestamp(skb);
1625 
1626 	/* Queue frame for rx task */
1627 	skb_queue_tail(&hdev->rx_q, skb);
1628 	tasklet_schedule(&hdev->rx_task);
1629 
1630 	return 0;
1631 }
1632 EXPORT_SYMBOL(hci_recv_frame);
1633 
1634 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1635 						  int count, __u8 index)
1636 {
1637 	int len = 0;
1638 	int hlen = 0;
1639 	int remain = count;
1640 	struct sk_buff *skb;
1641 	struct bt_skb_cb *scb;
1642 
1643 	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1644 				index >= NUM_REASSEMBLY)
1645 		return -EILSEQ;
1646 
1647 	skb = hdev->reassembly[index];
1648 
1649 	if (!skb) {
1650 		switch (type) {
1651 		case HCI_ACLDATA_PKT:
1652 			len = HCI_MAX_FRAME_SIZE;
1653 			hlen = HCI_ACL_HDR_SIZE;
1654 			break;
1655 		case HCI_EVENT_PKT:
1656 			len = HCI_MAX_EVENT_SIZE;
1657 			hlen = HCI_EVENT_HDR_SIZE;
1658 			break;
1659 		case HCI_SCODATA_PKT:
1660 			len = HCI_MAX_SCO_SIZE;
1661 			hlen = HCI_SCO_HDR_SIZE;
1662 			break;
1663 		}
1664 
1665 		skb = bt_skb_alloc(len, GFP_ATOMIC);
1666 		if (!skb)
1667 			return -ENOMEM;
1668 
1669 		scb = (void *) skb->cb;
1670 		scb->expect = hlen;
1671 		scb->pkt_type = type;
1672 
1673 		skb->dev = (void *) hdev;
1674 		hdev->reassembly[index] = skb;
1675 	}
1676 
1677 	while (count) {
1678 		scb = (void *) skb->cb;
1679 		len = min(scb->expect, (__u16)count);
1680 
1681 		memcpy(skb_put(skb, len), data, len);
1682 
1683 		count -= len;
1684 		data += len;
1685 		scb->expect -= len;
1686 		remain = count;
1687 
1688 		switch (type) {
1689 		case HCI_EVENT_PKT:
1690 			if (skb->len == HCI_EVENT_HDR_SIZE) {
1691 				struct hci_event_hdr *h = hci_event_hdr(skb);
1692 				scb->expect = h->plen;
1693 
1694 				if (skb_tailroom(skb) < scb->expect) {
1695 					kfree_skb(skb);
1696 					hdev->reassembly[index] = NULL;
1697 					return -ENOMEM;
1698 				}
1699 			}
1700 			break;
1701 
1702 		case HCI_ACLDATA_PKT:
1703 			if (skb->len  == HCI_ACL_HDR_SIZE) {
1704 				struct hci_acl_hdr *h = hci_acl_hdr(skb);
1705 				scb->expect = __le16_to_cpu(h->dlen);
1706 
1707 				if (skb_tailroom(skb) < scb->expect) {
1708 					kfree_skb(skb);
1709 					hdev->reassembly[index] = NULL;
1710 					return -ENOMEM;
1711 				}
1712 			}
1713 			break;
1714 
1715 		case HCI_SCODATA_PKT:
1716 			if (skb->len == HCI_SCO_HDR_SIZE) {
1717 				struct hci_sco_hdr *h = hci_sco_hdr(skb);
1718 				scb->expect = h->dlen;
1719 
1720 				if (skb_tailroom(skb) < scb->expect) {
1721 					kfree_skb(skb);
1722 					hdev->reassembly[index] = NULL;
1723 					return -ENOMEM;
1724 				}
1725 			}
1726 			break;
1727 		}
1728 
1729 		if (scb->expect == 0) {
1730 			/* Complete frame */
1731 
1732 			bt_cb(skb)->pkt_type = type;
1733 			hci_recv_frame(skb);
1734 
1735 			hdev->reassembly[index] = NULL;
1736 			return remain;
1737 		}
1738 	}
1739 
1740 	return remain;
1741 }
1742 
1743 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1744 {
1745 	int rem = 0;
1746 
1747 	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1748 		return -EILSEQ;
1749 
1750 	while (count) {
1751 		rem = hci_reassembly(hdev, type, data, count, type - 1);
1752 		if (rem < 0)
1753 			return rem;
1754 
1755 		data += (count - rem);
1756 		count = rem;
1757 	}
1758 
1759 	return rem;
1760 }
1761 EXPORT_SYMBOL(hci_recv_fragment);
1762 
1763 #define STREAM_REASSEMBLY 0
1764 
1765 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1766 {
1767 	int type;
1768 	int rem = 0;
1769 
1770 	while (count) {
1771 		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1772 
1773 		if (!skb) {
1774 			struct { char type; } *pkt;
1775 
1776 			/* Start of the frame */
1777 			pkt = data;
1778 			type = pkt->type;
1779 
1780 			data++;
1781 			count--;
1782 		} else
1783 			type = bt_cb(skb)->pkt_type;
1784 
1785 		rem = hci_reassembly(hdev, type, data, count,
1786 							STREAM_REASSEMBLY);
1787 		if (rem < 0)
1788 			return rem;
1789 
1790 		data += (count - rem);
1791 		count = rem;
1792 	}
1793 
1794 	return rem;
1795 }
1796 EXPORT_SYMBOL(hci_recv_stream_fragment);
1797 
1798 /* ---- Interface to upper protocols ---- */
1799 
1800 /* Register/Unregister protocols.
1801  * hci_task_lock is used to ensure that no tasks are running. */
1802 int hci_register_proto(struct hci_proto *hp)
1803 {
1804 	int err = 0;
1805 
1806 	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1807 
1808 	if (hp->id >= HCI_MAX_PROTO)
1809 		return -EINVAL;
1810 
1811 	write_lock_bh(&hci_task_lock);
1812 
1813 	if (!hci_proto[hp->id])
1814 		hci_proto[hp->id] = hp;
1815 	else
1816 		err = -EEXIST;
1817 
1818 	write_unlock_bh(&hci_task_lock);
1819 
1820 	return err;
1821 }
1822 EXPORT_SYMBOL(hci_register_proto);
1823 
1824 int hci_unregister_proto(struct hci_proto *hp)
1825 {
1826 	int err = 0;
1827 
1828 	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1829 
1830 	if (hp->id >= HCI_MAX_PROTO)
1831 		return -EINVAL;
1832 
1833 	write_lock_bh(&hci_task_lock);
1834 
1835 	if (hci_proto[hp->id])
1836 		hci_proto[hp->id] = NULL;
1837 	else
1838 		err = -ENOENT;
1839 
1840 	write_unlock_bh(&hci_task_lock);
1841 
1842 	return err;
1843 }
1844 EXPORT_SYMBOL(hci_unregister_proto);
1845 
1846 int hci_register_cb(struct hci_cb *cb)
1847 {
1848 	BT_DBG("%p name %s", cb, cb->name);
1849 
1850 	write_lock_bh(&hci_cb_list_lock);
1851 	list_add(&cb->list, &hci_cb_list);
1852 	write_unlock_bh(&hci_cb_list_lock);
1853 
1854 	return 0;
1855 }
1856 EXPORT_SYMBOL(hci_register_cb);
1857 
1858 int hci_unregister_cb(struct hci_cb *cb)
1859 {
1860 	BT_DBG("%p name %s", cb, cb->name);
1861 
1862 	write_lock_bh(&hci_cb_list_lock);
1863 	list_del(&cb->list);
1864 	write_unlock_bh(&hci_cb_list_lock);
1865 
1866 	return 0;
1867 }
1868 EXPORT_SYMBOL(hci_unregister_cb);
1869 
1870 static int hci_send_frame(struct sk_buff *skb)
1871 {
1872 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1873 
1874 	if (!hdev) {
1875 		kfree_skb(skb);
1876 		return -ENODEV;
1877 	}
1878 
1879 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1880 
1881 	if (atomic_read(&hdev->promisc)) {
1882 		/* Time stamp */
1883 		__net_timestamp(skb);
1884 
1885 		hci_send_to_sock(hdev, skb, NULL);
1886 	}
1887 
1888 	/* Get rid of skb owner, prior to sending to the driver. */
1889 	skb_orphan(skb);
1890 
1891 	return hdev->send(skb);
1892 }
1893 
1894 /* Send HCI command */
1895 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1896 {
1897 	int len = HCI_COMMAND_HDR_SIZE + plen;
1898 	struct hci_command_hdr *hdr;
1899 	struct sk_buff *skb;
1900 
1901 	BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1902 
1903 	skb = bt_skb_alloc(len, GFP_ATOMIC);
1904 	if (!skb) {
1905 		BT_ERR("%s no memory for command", hdev->name);
1906 		return -ENOMEM;
1907 	}
1908 
1909 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1910 	hdr->opcode = cpu_to_le16(opcode);
1911 	hdr->plen   = plen;
1912 
1913 	if (plen)
1914 		memcpy(skb_put(skb, plen), param, plen);
1915 
1916 	BT_DBG("skb len %d", skb->len);
1917 
1918 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1919 	skb->dev = (void *) hdev;
1920 
1921 	if (test_bit(HCI_INIT, &hdev->flags))
1922 		hdev->init_last_cmd = opcode;
1923 
1924 	skb_queue_tail(&hdev->cmd_q, skb);
1925 	tasklet_schedule(&hdev->cmd_task);
1926 
1927 	return 0;
1928 }
1929 
1930 /* Get data from the previously sent command */
1931 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1932 {
1933 	struct hci_command_hdr *hdr;
1934 
1935 	if (!hdev->sent_cmd)
1936 		return NULL;
1937 
1938 	hdr = (void *) hdev->sent_cmd->data;
1939 
1940 	if (hdr->opcode != cpu_to_le16(opcode))
1941 		return NULL;
1942 
1943 	BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1944 
1945 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1946 }
1947 
1948 /* Send ACL data */
1949 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1950 {
1951 	struct hci_acl_hdr *hdr;
1952 	int len = skb->len;
1953 
1954 	skb_push(skb, HCI_ACL_HDR_SIZE);
1955 	skb_reset_transport_header(skb);
1956 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1957 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1958 	hdr->dlen   = cpu_to_le16(len);
1959 }
1960 
1961 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1962 				struct sk_buff *skb, __u16 flags)
1963 {
1964 	struct hci_dev *hdev = conn->hdev;
1965 	struct sk_buff *list;
1966 
1967 	list = skb_shinfo(skb)->frag_list;
1968 	if (!list) {
1969 		/* Non fragmented */
1970 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1971 
1972 		skb_queue_tail(queue, skb);
1973 	} else {
1974 		/* Fragmented */
1975 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1976 
1977 		skb_shinfo(skb)->frag_list = NULL;
1978 
1979 		/* Queue all fragments atomically */
1980 		spin_lock_bh(&queue->lock);
1981 
1982 		__skb_queue_tail(queue, skb);
1983 
1984 		flags &= ~ACL_START;
1985 		flags |= ACL_CONT;
1986 		do {
1987 			skb = list; list = list->next;
1988 
1989 			skb->dev = (void *) hdev;
1990 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1991 			hci_add_acl_hdr(skb, conn->handle, flags);
1992 
1993 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1994 
1995 			__skb_queue_tail(queue, skb);
1996 		} while (list);
1997 
1998 		spin_unlock_bh(&queue->lock);
1999 	}
2000 }
2001 
2002 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2003 {
2004 	struct hci_conn *conn = chan->conn;
2005 	struct hci_dev *hdev = conn->hdev;
2006 
2007 	BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2008 
2009 	skb->dev = (void *) hdev;
2010 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2011 	hci_add_acl_hdr(skb, conn->handle, flags);
2012 
2013 	hci_queue_acl(conn, &chan->data_q, skb, flags);
2014 
2015 	tasklet_schedule(&hdev->tx_task);
2016 }
2017 EXPORT_SYMBOL(hci_send_acl);
2018 
2019 /* Send SCO data */
2020 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2021 {
2022 	struct hci_dev *hdev = conn->hdev;
2023 	struct hci_sco_hdr hdr;
2024 
2025 	BT_DBG("%s len %d", hdev->name, skb->len);
2026 
2027 	hdr.handle = cpu_to_le16(conn->handle);
2028 	hdr.dlen   = skb->len;
2029 
2030 	skb_push(skb, HCI_SCO_HDR_SIZE);
2031 	skb_reset_transport_header(skb);
2032 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2033 
2034 	skb->dev = (void *) hdev;
2035 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2036 
2037 	skb_queue_tail(&conn->data_q, skb);
2038 	tasklet_schedule(&hdev->tx_task);
2039 }
2040 EXPORT_SYMBOL(hci_send_sco);
2041 
2042 /* ---- HCI TX task (outgoing data) ---- */
2043 
2044 /* HCI Connection scheduler */
2045 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2046 {
2047 	struct hci_conn_hash *h = &hdev->conn_hash;
2048 	struct hci_conn *conn = NULL, *c;
2049 	int num = 0, min = ~0;
2050 
2051 	/* We don't have to lock device here. Connections are always
2052 	 * added and removed with TX task disabled. */
2053 	list_for_each_entry(c, &h->list, list) {
2054 		if (c->type != type || skb_queue_empty(&c->data_q))
2055 			continue;
2056 
2057 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2058 			continue;
2059 
2060 		num++;
2061 
2062 		if (c->sent < min) {
2063 			min  = c->sent;
2064 			conn = c;
2065 		}
2066 
2067 		if (hci_conn_num(hdev, type) == num)
2068 			break;
2069 	}
2070 
2071 	if (conn) {
2072 		int cnt, q;
2073 
2074 		switch (conn->type) {
2075 		case ACL_LINK:
2076 			cnt = hdev->acl_cnt;
2077 			break;
2078 		case SCO_LINK:
2079 		case ESCO_LINK:
2080 			cnt = hdev->sco_cnt;
2081 			break;
2082 		case LE_LINK:
2083 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2084 			break;
2085 		default:
2086 			cnt = 0;
2087 			BT_ERR("Unknown link type");
2088 		}
2089 
2090 		q = cnt / num;
2091 		*quote = q ? q : 1;
2092 	} else
2093 		*quote = 0;
2094 
2095 	BT_DBG("conn %p quote %d", conn, *quote);
2096 	return conn;
2097 }
2098 
2099 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2100 {
2101 	struct hci_conn_hash *h = &hdev->conn_hash;
2102 	struct hci_conn *c;
2103 
2104 	BT_ERR("%s link tx timeout", hdev->name);
2105 
2106 	/* Kill stalled connections */
2107 	list_for_each_entry(c, &h->list, list) {
2108 		if (c->type == type && c->sent) {
2109 			BT_ERR("%s killing stalled connection %s",
2110 				hdev->name, batostr(&c->dst));
2111 			hci_acl_disconn(c, 0x13);
2112 		}
2113 	}
2114 }
2115 
2116 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2117 						int *quote)
2118 {
2119 	struct hci_conn_hash *h = &hdev->conn_hash;
2120 	struct hci_chan *chan = NULL;
2121 	int num = 0, min = ~0, cur_prio = 0;
2122 	struct hci_conn *conn;
2123 	int cnt, q, conn_num = 0;
2124 
2125 	BT_DBG("%s", hdev->name);
2126 
2127 	list_for_each_entry(conn, &h->list, list) {
2128 		struct hci_chan_hash *ch;
2129 		struct hci_chan *tmp;
2130 
2131 		if (conn->type != type)
2132 			continue;
2133 
2134 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2135 			continue;
2136 
2137 		conn_num++;
2138 
2139 		ch = &conn->chan_hash;
2140 
2141 		list_for_each_entry(tmp, &ch->list, list) {
2142 			struct sk_buff *skb;
2143 
2144 			if (skb_queue_empty(&tmp->data_q))
2145 				continue;
2146 
2147 			skb = skb_peek(&tmp->data_q);
2148 			if (skb->priority < cur_prio)
2149 				continue;
2150 
2151 			if (skb->priority > cur_prio) {
2152 				num = 0;
2153 				min = ~0;
2154 				cur_prio = skb->priority;
2155 			}
2156 
2157 			num++;
2158 
2159 			if (conn->sent < min) {
2160 				min  = conn->sent;
2161 				chan = tmp;
2162 			}
2163 		}
2164 
2165 		if (hci_conn_num(hdev, type) == conn_num)
2166 			break;
2167 	}
2168 
2169 	if (!chan)
2170 		return NULL;
2171 
2172 	switch (chan->conn->type) {
2173 	case ACL_LINK:
2174 		cnt = hdev->acl_cnt;
2175 		break;
2176 	case SCO_LINK:
2177 	case ESCO_LINK:
2178 		cnt = hdev->sco_cnt;
2179 		break;
2180 	case LE_LINK:
2181 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2182 		break;
2183 	default:
2184 		cnt = 0;
2185 		BT_ERR("Unknown link type");
2186 	}
2187 
2188 	q = cnt / num;
2189 	*quote = q ? q : 1;
2190 	BT_DBG("chan %p quote %d", chan, *quote);
2191 	return chan;
2192 }
2193 
2194 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2195 {
2196 	struct hci_conn_hash *h = &hdev->conn_hash;
2197 	struct hci_conn *conn;
2198 	int num = 0;
2199 
2200 	BT_DBG("%s", hdev->name);
2201 
2202 	list_for_each_entry(conn, &h->list, list) {
2203 		struct hci_chan_hash *ch;
2204 		struct hci_chan *chan;
2205 
2206 		if (conn->type != type)
2207 			continue;
2208 
2209 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2210 			continue;
2211 
2212 		num++;
2213 
2214 		ch = &conn->chan_hash;
2215 		list_for_each_entry(chan, &ch->list, list) {
2216 			struct sk_buff *skb;
2217 
2218 			if (chan->sent) {
2219 				chan->sent = 0;
2220 				continue;
2221 			}
2222 
2223 			if (skb_queue_empty(&chan->data_q))
2224 				continue;
2225 
2226 			skb = skb_peek(&chan->data_q);
2227 			if (skb->priority >= HCI_PRIO_MAX - 1)
2228 				continue;
2229 
2230 			skb->priority = HCI_PRIO_MAX - 1;
2231 
2232 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2233 								skb->priority);
2234 		}
2235 
2236 		if (hci_conn_num(hdev, type) == num)
2237 			break;
2238 	}
2239 }
2240 
2241 static inline void hci_sched_acl(struct hci_dev *hdev)
2242 {
2243 	struct hci_chan *chan;
2244 	struct sk_buff *skb;
2245 	int quote;
2246 	unsigned int cnt;
2247 
2248 	BT_DBG("%s", hdev->name);
2249 
2250 	if (!hci_conn_num(hdev, ACL_LINK))
2251 		return;
2252 
2253 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2254 		/* ACL tx timeout must be longer than maximum
2255 		 * link supervision timeout (40.9 seconds) */
2256 		if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2257 			hci_link_tx_to(hdev, ACL_LINK);
2258 	}
2259 
2260 	cnt = hdev->acl_cnt;
2261 
2262 	while (hdev->acl_cnt &&
2263 			(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2264 		u32 priority = (skb_peek(&chan->data_q))->priority;
2265 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2266 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2267 					skb->len, skb->priority);
2268 
2269 			/* Stop if priority has changed */
2270 			if (skb->priority < priority)
2271 				break;
2272 
2273 			skb = skb_dequeue(&chan->data_q);
2274 
2275 			hci_conn_enter_active_mode(chan->conn,
2276 						bt_cb(skb)->force_active);
2277 
2278 			hci_send_frame(skb);
2279 			hdev->acl_last_tx = jiffies;
2280 
2281 			hdev->acl_cnt--;
2282 			chan->sent++;
2283 			chan->conn->sent++;
2284 		}
2285 	}
2286 
2287 	if (cnt != hdev->acl_cnt)
2288 		hci_prio_recalculate(hdev, ACL_LINK);
2289 }
2290 
2291 /* Schedule SCO */
2292 static inline void hci_sched_sco(struct hci_dev *hdev)
2293 {
2294 	struct hci_conn *conn;
2295 	struct sk_buff *skb;
2296 	int quote;
2297 
2298 	BT_DBG("%s", hdev->name);
2299 
2300 	if (!hci_conn_num(hdev, SCO_LINK))
2301 		return;
2302 
2303 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2304 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2305 			BT_DBG("skb %p len %d", skb, skb->len);
2306 			hci_send_frame(skb);
2307 
2308 			conn->sent++;
2309 			if (conn->sent == ~0)
2310 				conn->sent = 0;
2311 		}
2312 	}
2313 }
2314 
2315 static inline void hci_sched_esco(struct hci_dev *hdev)
2316 {
2317 	struct hci_conn *conn;
2318 	struct sk_buff *skb;
2319 	int quote;
2320 
2321 	BT_DBG("%s", hdev->name);
2322 
2323 	if (!hci_conn_num(hdev, ESCO_LINK))
2324 		return;
2325 
2326 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2327 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2328 			BT_DBG("skb %p len %d", skb, skb->len);
2329 			hci_send_frame(skb);
2330 
2331 			conn->sent++;
2332 			if (conn->sent == ~0)
2333 				conn->sent = 0;
2334 		}
2335 	}
2336 }
2337 
2338 static inline void hci_sched_le(struct hci_dev *hdev)
2339 {
2340 	struct hci_chan *chan;
2341 	struct sk_buff *skb;
2342 	int quote, cnt, tmp;
2343 
2344 	BT_DBG("%s", hdev->name);
2345 
2346 	if (!hci_conn_num(hdev, LE_LINK))
2347 		return;
2348 
2349 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2350 		/* LE tx timeout must be longer than maximum
2351 		 * link supervision timeout (40.9 seconds) */
2352 		if (!hdev->le_cnt && hdev->le_pkts &&
2353 				time_after(jiffies, hdev->le_last_tx + HZ * 45))
2354 			hci_link_tx_to(hdev, LE_LINK);
2355 	}
2356 
2357 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2358 	tmp = cnt;
2359 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2360 		u32 priority = (skb_peek(&chan->data_q))->priority;
2361 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2362 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2363 					skb->len, skb->priority);
2364 
2365 			/* Stop if priority has changed */
2366 			if (skb->priority < priority)
2367 				break;
2368 
2369 			skb = skb_dequeue(&chan->data_q);
2370 
2371 			hci_send_frame(skb);
2372 			hdev->le_last_tx = jiffies;
2373 
2374 			cnt--;
2375 			chan->sent++;
2376 			chan->conn->sent++;
2377 		}
2378 	}
2379 
2380 	if (hdev->le_pkts)
2381 		hdev->le_cnt = cnt;
2382 	else
2383 		hdev->acl_cnt = cnt;
2384 
2385 	if (cnt != tmp)
2386 		hci_prio_recalculate(hdev, LE_LINK);
2387 }
2388 
2389 static void hci_tx_task(unsigned long arg)
2390 {
2391 	struct hci_dev *hdev = (struct hci_dev *) arg;
2392 	struct sk_buff *skb;
2393 
2394 	read_lock(&hci_task_lock);
2395 
2396 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2397 		hdev->sco_cnt, hdev->le_cnt);
2398 
2399 	/* Schedule queues and send stuff to HCI driver */
2400 
2401 	hci_sched_acl(hdev);
2402 
2403 	hci_sched_sco(hdev);
2404 
2405 	hci_sched_esco(hdev);
2406 
2407 	hci_sched_le(hdev);
2408 
2409 	/* Send next queued raw (unknown type) packet */
2410 	while ((skb = skb_dequeue(&hdev->raw_q)))
2411 		hci_send_frame(skb);
2412 
2413 	read_unlock(&hci_task_lock);
2414 }
2415 
2416 /* ----- HCI RX task (incoming data processing) ----- */
2417 
2418 /* ACL data packet */
2419 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2420 {
2421 	struct hci_acl_hdr *hdr = (void *) skb->data;
2422 	struct hci_conn *conn;
2423 	__u16 handle, flags;
2424 
2425 	skb_pull(skb, HCI_ACL_HDR_SIZE);
2426 
2427 	handle = __le16_to_cpu(hdr->handle);
2428 	flags  = hci_flags(handle);
2429 	handle = hci_handle(handle);
2430 
2431 	BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2432 
2433 	hdev->stat.acl_rx++;
2434 
2435 	hci_dev_lock(hdev);
2436 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2437 	hci_dev_unlock(hdev);
2438 
2439 	if (conn) {
2440 		register struct hci_proto *hp;
2441 
2442 		hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2443 
2444 		/* Send to upper protocol */
2445 		hp = hci_proto[HCI_PROTO_L2CAP];
2446 		if (hp && hp->recv_acldata) {
2447 			hp->recv_acldata(conn, skb, flags);
2448 			return;
2449 		}
2450 	} else {
2451 		BT_ERR("%s ACL packet for unknown connection handle %d",
2452 			hdev->name, handle);
2453 	}
2454 
2455 	kfree_skb(skb);
2456 }
2457 
2458 /* SCO data packet */
2459 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2460 {
2461 	struct hci_sco_hdr *hdr = (void *) skb->data;
2462 	struct hci_conn *conn;
2463 	__u16 handle;
2464 
2465 	skb_pull(skb, HCI_SCO_HDR_SIZE);
2466 
2467 	handle = __le16_to_cpu(hdr->handle);
2468 
2469 	BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2470 
2471 	hdev->stat.sco_rx++;
2472 
2473 	hci_dev_lock(hdev);
2474 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2475 	hci_dev_unlock(hdev);
2476 
2477 	if (conn) {
2478 		register struct hci_proto *hp;
2479 
2480 		/* Send to upper protocol */
2481 		hp = hci_proto[HCI_PROTO_SCO];
2482 		if (hp && hp->recv_scodata) {
2483 			hp->recv_scodata(conn, skb);
2484 			return;
2485 		}
2486 	} else {
2487 		BT_ERR("%s SCO packet for unknown connection handle %d",
2488 			hdev->name, handle);
2489 	}
2490 
2491 	kfree_skb(skb);
2492 }
2493 
2494 static void hci_rx_task(unsigned long arg)
2495 {
2496 	struct hci_dev *hdev = (struct hci_dev *) arg;
2497 	struct sk_buff *skb;
2498 
2499 	BT_DBG("%s", hdev->name);
2500 
2501 	read_lock(&hci_task_lock);
2502 
2503 	while ((skb = skb_dequeue(&hdev->rx_q))) {
2504 		if (atomic_read(&hdev->promisc)) {
2505 			/* Send copy to the sockets */
2506 			hci_send_to_sock(hdev, skb, NULL);
2507 		}
2508 
2509 		if (test_bit(HCI_RAW, &hdev->flags)) {
2510 			kfree_skb(skb);
2511 			continue;
2512 		}
2513 
2514 		if (test_bit(HCI_INIT, &hdev->flags)) {
2515 			/* Don't process data packets in this states. */
2516 			switch (bt_cb(skb)->pkt_type) {
2517 			case HCI_ACLDATA_PKT:
2518 			case HCI_SCODATA_PKT:
2519 				kfree_skb(skb);
2520 				continue;
2521 			}
2522 		}
2523 
2524 		/* Process frame */
2525 		switch (bt_cb(skb)->pkt_type) {
2526 		case HCI_EVENT_PKT:
2527 			hci_event_packet(hdev, skb);
2528 			break;
2529 
2530 		case HCI_ACLDATA_PKT:
2531 			BT_DBG("%s ACL data packet", hdev->name);
2532 			hci_acldata_packet(hdev, skb);
2533 			break;
2534 
2535 		case HCI_SCODATA_PKT:
2536 			BT_DBG("%s SCO data packet", hdev->name);
2537 			hci_scodata_packet(hdev, skb);
2538 			break;
2539 
2540 		default:
2541 			kfree_skb(skb);
2542 			break;
2543 		}
2544 	}
2545 
2546 	read_unlock(&hci_task_lock);
2547 }
2548 
2549 static void hci_cmd_task(unsigned long arg)
2550 {
2551 	struct hci_dev *hdev = (struct hci_dev *) arg;
2552 	struct sk_buff *skb;
2553 
2554 	BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2555 
2556 	/* Send queued commands */
2557 	if (atomic_read(&hdev->cmd_cnt)) {
2558 		skb = skb_dequeue(&hdev->cmd_q);
2559 		if (!skb)
2560 			return;
2561 
2562 		kfree_skb(hdev->sent_cmd);
2563 
2564 		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2565 		if (hdev->sent_cmd) {
2566 			atomic_dec(&hdev->cmd_cnt);
2567 			hci_send_frame(skb);
2568 			if (test_bit(HCI_RESET, &hdev->flags))
2569 				del_timer(&hdev->cmd_timer);
2570 			else
2571 				mod_timer(&hdev->cmd_timer,
2572 				  jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2573 		} else {
2574 			skb_queue_head(&hdev->cmd_q, skb);
2575 			tasklet_schedule(&hdev->cmd_task);
2576 		}
2577 	}
2578 }
2579 
2580 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2581 {
2582 	/* General inquiry access code (GIAC) */
2583 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
2584 	struct hci_cp_inquiry cp;
2585 
2586 	BT_DBG("%s", hdev->name);
2587 
2588 	if (test_bit(HCI_INQUIRY, &hdev->flags))
2589 		return -EINPROGRESS;
2590 
2591 	memset(&cp, 0, sizeof(cp));
2592 	memcpy(&cp.lap, lap, sizeof(cp.lap));
2593 	cp.length  = length;
2594 
2595 	return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2596 }
2597 
2598 int hci_cancel_inquiry(struct hci_dev *hdev)
2599 {
2600 	BT_DBG("%s", hdev->name);
2601 
2602 	if (!test_bit(HCI_INQUIRY, &hdev->flags))
2603 		return -EPERM;
2604 
2605 	return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2606 }
2607 
2608 module_param(enable_hs, bool, 0644);
2609 MODULE_PARM_DESC(enable_hs, "Enable High Speed");
2610