xref: /linux/net/bluetooth/hci_core.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI core. */
26 
27 #include <linux/module.h>
28 #include <linux/kmod.h>
29 
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/skbuff.h>
39 #include <linux/interrupt.h>
40 #include <linux/notifier.h>
41 #include <net/sock.h>
42 
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45 #include <asm/unaligned.h>
46 
47 #include <net/bluetooth/bluetooth.h>
48 #include <net/bluetooth/hci_core.h>
49 
50 #ifndef CONFIG_BT_HCI_CORE_DEBUG
51 #undef  BT_DBG
52 #define BT_DBG(D...)
53 #endif
54 
55 static void hci_cmd_task(unsigned long arg);
56 static void hci_rx_task(unsigned long arg);
57 static void hci_tx_task(unsigned long arg);
58 static void hci_notify(struct hci_dev *hdev, int event);
59 
60 static DEFINE_RWLOCK(hci_task_lock);
61 
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
65 
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
69 
70 /* HCI protocols */
71 #define HCI_MAX_PROTO	2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
73 
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76 
77 /* ---- HCI notifications ---- */
78 
79 int hci_register_notifier(struct notifier_block *nb)
80 {
81 	return atomic_notifier_chain_register(&hci_notifier, nb);
82 }
83 
84 int hci_unregister_notifier(struct notifier_block *nb)
85 {
86 	return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 }
88 
89 static void hci_notify(struct hci_dev *hdev, int event)
90 {
91 	atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 }
93 
94 /* ---- HCI requests ---- */
95 
96 void hci_req_complete(struct hci_dev *hdev, int result)
97 {
98 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
99 
100 	if (hdev->req_status == HCI_REQ_PEND) {
101 		hdev->req_result = result;
102 		hdev->req_status = HCI_REQ_DONE;
103 		wake_up_interruptible(&hdev->req_wait_q);
104 	}
105 }
106 
107 static void hci_req_cancel(struct hci_dev *hdev, int err)
108 {
109 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
110 
111 	if (hdev->req_status == HCI_REQ_PEND) {
112 		hdev->req_result = err;
113 		hdev->req_status = HCI_REQ_CANCELED;
114 		wake_up_interruptible(&hdev->req_wait_q);
115 	}
116 }
117 
118 /* Execute request and wait for completion. */
119 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
120 				unsigned long opt, __u32 timeout)
121 {
122 	DECLARE_WAITQUEUE(wait, current);
123 	int err = 0;
124 
125 	BT_DBG("%s start", hdev->name);
126 
127 	hdev->req_status = HCI_REQ_PEND;
128 
129 	add_wait_queue(&hdev->req_wait_q, &wait);
130 	set_current_state(TASK_INTERRUPTIBLE);
131 
132 	req(hdev, opt);
133 	schedule_timeout(timeout);
134 
135 	remove_wait_queue(&hdev->req_wait_q, &wait);
136 
137 	if (signal_pending(current))
138 		return -EINTR;
139 
140 	switch (hdev->req_status) {
141 	case HCI_REQ_DONE:
142 		err = -bt_err(hdev->req_result);
143 		break;
144 
145 	case HCI_REQ_CANCELED:
146 		err = -hdev->req_result;
147 		break;
148 
149 	default:
150 		err = -ETIMEDOUT;
151 		break;
152 	};
153 
154 	hdev->req_status = hdev->req_result = 0;
155 
156 	BT_DBG("%s end: err %d", hdev->name, err);
157 
158 	return err;
159 }
160 
161 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
162 				unsigned long opt, __u32 timeout)
163 {
164 	int ret;
165 
166 	/* Serialize all requests */
167 	hci_req_lock(hdev);
168 	ret = __hci_request(hdev, req, opt, timeout);
169 	hci_req_unlock(hdev);
170 
171 	return ret;
172 }
173 
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175 {
176 	BT_DBG("%s %ld", hdev->name, opt);
177 
178 	/* Reset device */
179 	hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
180 }
181 
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183 {
184 	struct sk_buff *skb;
185 	__le16 param;
186 
187 	BT_DBG("%s %ld", hdev->name, opt);
188 
189 	/* Driver initialization */
190 
191 	/* Special commands */
192 	while ((skb = skb_dequeue(&hdev->driver_init))) {
193 		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
194 		skb->dev = (void *) hdev;
195 		skb_queue_tail(&hdev->cmd_q, skb);
196 		hci_sched_cmd(hdev);
197 	}
198 	skb_queue_purge(&hdev->driver_init);
199 
200 	/* Mandatory initialization */
201 
202 	/* Reset */
203 	if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
204 			hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
205 
206 	/* Read Local Supported Features */
207 	hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
208 
209 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
210 	hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
211 
212 #if 0
213 	/* Host buffer size */
214 	{
215 		struct hci_cp_host_buffer_size cp;
216 		cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
217 		cp.sco_mtu = HCI_MAX_SCO_SIZE;
218 		cp.acl_max_pkt = __cpu_to_le16(0xffff);
219 		cp.sco_max_pkt = __cpu_to_le16(0xffff);
220 		hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
221 	}
222 #endif
223 
224 	/* Read BD Address */
225 	hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
226 
227 	/* Read Voice Setting */
228 	hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
229 
230 	/* Optional initialization */
231 
232 	/* Clear Event Filters */
233 	{
234 		struct hci_cp_set_event_flt cp;
235 		cp.flt_type  = HCI_FLT_CLEAR_ALL;
236 		hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
237 	}
238 
239 	/* Page timeout ~20 secs */
240 	param = __cpu_to_le16(0x8000);
241 	hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
242 
243 	/* Connection accept timeout ~20 secs */
244 	param = __cpu_to_le16(0x7d00);
245 	hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
246 }
247 
248 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
249 {
250 	__u8 scan = opt;
251 
252 	BT_DBG("%s %x", hdev->name, scan);
253 
254 	/* Inquiry and Page scans */
255 	hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
256 }
257 
258 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
259 {
260 	__u8 auth = opt;
261 
262 	BT_DBG("%s %x", hdev->name, auth);
263 
264 	/* Authentication */
265 	hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
266 }
267 
268 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
269 {
270 	__u8 encrypt = opt;
271 
272 	BT_DBG("%s %x", hdev->name, encrypt);
273 
274 	/* Authentication */
275 	hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
276 }
277 
278 /* Get HCI device by index.
279  * Device is held on return. */
280 struct hci_dev *hci_dev_get(int index)
281 {
282 	struct hci_dev *hdev = NULL;
283 	struct list_head *p;
284 
285 	BT_DBG("%d", index);
286 
287 	if (index < 0)
288 		return NULL;
289 
290 	read_lock(&hci_dev_list_lock);
291 	list_for_each(p, &hci_dev_list) {
292 		struct hci_dev *d = list_entry(p, struct hci_dev, list);
293 		if (d->id == index) {
294 			hdev = hci_dev_hold(d);
295 			break;
296 		}
297 	}
298 	read_unlock(&hci_dev_list_lock);
299 	return hdev;
300 }
301 
302 /* ---- Inquiry support ---- */
303 static void inquiry_cache_flush(struct hci_dev *hdev)
304 {
305 	struct inquiry_cache *cache = &hdev->inq_cache;
306 	struct inquiry_entry *next  = cache->list, *e;
307 
308 	BT_DBG("cache %p", cache);
309 
310 	cache->list = NULL;
311 	while ((e = next)) {
312 		next = e->next;
313 		kfree(e);
314 	}
315 }
316 
317 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
318 {
319 	struct inquiry_cache *cache = &hdev->inq_cache;
320 	struct inquiry_entry *e;
321 
322 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
323 
324 	for (e = cache->list; e; e = e->next)
325 		if (!bacmp(&e->data.bdaddr, bdaddr))
326 			break;
327 	return e;
328 }
329 
330 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
331 {
332 	struct inquiry_cache *cache = &hdev->inq_cache;
333 	struct inquiry_entry *e;
334 
335 	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
336 
337 	if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
338 		/* Entry not in the cache. Add new one. */
339 		if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
340 			return;
341 		memset(e, 0, sizeof(struct inquiry_entry));
342 		e->next     = cache->list;
343 		cache->list = e;
344 	}
345 
346 	memcpy(&e->data, data, sizeof(*data));
347 	e->timestamp = jiffies;
348 	cache->timestamp = jiffies;
349 }
350 
351 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
352 {
353 	struct inquiry_cache *cache = &hdev->inq_cache;
354 	struct inquiry_info *info = (struct inquiry_info *) buf;
355 	struct inquiry_entry *e;
356 	int copied = 0;
357 
358 	for (e = cache->list; e && copied < num; e = e->next, copied++) {
359 		struct inquiry_data *data = &e->data;
360 		bacpy(&info->bdaddr, &data->bdaddr);
361 		info->pscan_rep_mode	= data->pscan_rep_mode;
362 		info->pscan_period_mode	= data->pscan_period_mode;
363 		info->pscan_mode	= data->pscan_mode;
364 		memcpy(info->dev_class, data->dev_class, 3);
365 		info->clock_offset	= data->clock_offset;
366 		info++;
367 	}
368 
369 	BT_DBG("cache %p, copied %d", cache, copied);
370 	return copied;
371 }
372 
373 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
374 {
375 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
376 	struct hci_cp_inquiry cp;
377 
378 	BT_DBG("%s", hdev->name);
379 
380 	if (test_bit(HCI_INQUIRY, &hdev->flags))
381 		return;
382 
383 	/* Start Inquiry */
384 	memcpy(&cp.lap, &ir->lap, 3);
385 	cp.length  = ir->length;
386 	cp.num_rsp = ir->num_rsp;
387 	hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
388 }
389 
390 int hci_inquiry(void __user *arg)
391 {
392 	__u8 __user *ptr = arg;
393 	struct hci_inquiry_req ir;
394 	struct hci_dev *hdev;
395 	int err = 0, do_inquiry = 0, max_rsp;
396 	long timeo;
397 	__u8 *buf;
398 
399 	if (copy_from_user(&ir, ptr, sizeof(ir)))
400 		return -EFAULT;
401 
402 	if (!(hdev = hci_dev_get(ir.dev_id)))
403 		return -ENODEV;
404 
405 	hci_dev_lock_bh(hdev);
406 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
407 					inquiry_cache_empty(hdev) ||
408 					ir.flags & IREQ_CACHE_FLUSH) {
409 		inquiry_cache_flush(hdev);
410 		do_inquiry = 1;
411 	}
412 	hci_dev_unlock_bh(hdev);
413 
414 	timeo = ir.length * msecs_to_jiffies(2000);
415 	if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
416 		goto done;
417 
418 	/* for unlimited number of responses we will use buffer with 255 entries */
419 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
420 
421 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
422 	 * copy it to the user space.
423 	 */
424 	if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
425 		err = -ENOMEM;
426 		goto done;
427 	}
428 
429 	hci_dev_lock_bh(hdev);
430 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
431 	hci_dev_unlock_bh(hdev);
432 
433 	BT_DBG("num_rsp %d", ir.num_rsp);
434 
435 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
436 		ptr += sizeof(ir);
437 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
438 					ir.num_rsp))
439 			err = -EFAULT;
440 	} else
441 		err = -EFAULT;
442 
443 	kfree(buf);
444 
445 done:
446 	hci_dev_put(hdev);
447 	return err;
448 }
449 
450 /* ---- HCI ioctl helpers ---- */
451 
452 int hci_dev_open(__u16 dev)
453 {
454 	struct hci_dev *hdev;
455 	int ret = 0;
456 
457 	if (!(hdev = hci_dev_get(dev)))
458 		return -ENODEV;
459 
460 	BT_DBG("%s %p", hdev->name, hdev);
461 
462 	hci_req_lock(hdev);
463 
464 	if (test_bit(HCI_UP, &hdev->flags)) {
465 		ret = -EALREADY;
466 		goto done;
467 	}
468 
469 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
470 		set_bit(HCI_RAW, &hdev->flags);
471 
472 	if (hdev->open(hdev)) {
473 		ret = -EIO;
474 		goto done;
475 	}
476 
477 	if (!test_bit(HCI_RAW, &hdev->flags)) {
478 		atomic_set(&hdev->cmd_cnt, 1);
479 		set_bit(HCI_INIT, &hdev->flags);
480 
481 		//__hci_request(hdev, hci_reset_req, 0, HZ);
482 		ret = __hci_request(hdev, hci_init_req, 0,
483 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
484 
485 		clear_bit(HCI_INIT, &hdev->flags);
486 	}
487 
488 	if (!ret) {
489 		hci_dev_hold(hdev);
490 		set_bit(HCI_UP, &hdev->flags);
491 		hci_notify(hdev, HCI_DEV_UP);
492 	} else {
493 		/* Init failed, cleanup */
494 		tasklet_kill(&hdev->rx_task);
495 		tasklet_kill(&hdev->tx_task);
496 		tasklet_kill(&hdev->cmd_task);
497 
498 		skb_queue_purge(&hdev->cmd_q);
499 		skb_queue_purge(&hdev->rx_q);
500 
501 		if (hdev->flush)
502 			hdev->flush(hdev);
503 
504 		if (hdev->sent_cmd) {
505 			kfree_skb(hdev->sent_cmd);
506 			hdev->sent_cmd = NULL;
507 		}
508 
509 		hdev->close(hdev);
510 		hdev->flags = 0;
511 	}
512 
513 done:
514 	hci_req_unlock(hdev);
515 	hci_dev_put(hdev);
516 	return ret;
517 }
518 
519 static int hci_dev_do_close(struct hci_dev *hdev)
520 {
521 	BT_DBG("%s %p", hdev->name, hdev);
522 
523 	hci_req_cancel(hdev, ENODEV);
524 	hci_req_lock(hdev);
525 
526 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
527 		hci_req_unlock(hdev);
528 		return 0;
529 	}
530 
531 	/* Kill RX and TX tasks */
532 	tasklet_kill(&hdev->rx_task);
533 	tasklet_kill(&hdev->tx_task);
534 
535 	hci_dev_lock_bh(hdev);
536 	inquiry_cache_flush(hdev);
537 	hci_conn_hash_flush(hdev);
538 	hci_dev_unlock_bh(hdev);
539 
540 	hci_notify(hdev, HCI_DEV_DOWN);
541 
542 	if (hdev->flush)
543 		hdev->flush(hdev);
544 
545 	/* Reset device */
546 	skb_queue_purge(&hdev->cmd_q);
547 	atomic_set(&hdev->cmd_cnt, 1);
548 	if (!test_bit(HCI_RAW, &hdev->flags)) {
549 		set_bit(HCI_INIT, &hdev->flags);
550 		__hci_request(hdev, hci_reset_req, 0,
551 					msecs_to_jiffies(250));
552 		clear_bit(HCI_INIT, &hdev->flags);
553 	}
554 
555 	/* Kill cmd task */
556 	tasklet_kill(&hdev->cmd_task);
557 
558 	/* Drop queues */
559 	skb_queue_purge(&hdev->rx_q);
560 	skb_queue_purge(&hdev->cmd_q);
561 	skb_queue_purge(&hdev->raw_q);
562 
563 	/* Drop last sent command */
564 	if (hdev->sent_cmd) {
565 		kfree_skb(hdev->sent_cmd);
566 		hdev->sent_cmd = NULL;
567 	}
568 
569 	/* After this point our queues are empty
570 	 * and no tasks are scheduled. */
571 	hdev->close(hdev);
572 
573 	/* Clear flags */
574 	hdev->flags = 0;
575 
576 	hci_req_unlock(hdev);
577 
578 	hci_dev_put(hdev);
579 	return 0;
580 }
581 
582 int hci_dev_close(__u16 dev)
583 {
584 	struct hci_dev *hdev;
585 	int err;
586 
587 	if (!(hdev = hci_dev_get(dev)))
588 		return -ENODEV;
589 	err = hci_dev_do_close(hdev);
590 	hci_dev_put(hdev);
591 	return err;
592 }
593 
594 int hci_dev_reset(__u16 dev)
595 {
596 	struct hci_dev *hdev;
597 	int ret = 0;
598 
599 	if (!(hdev = hci_dev_get(dev)))
600 		return -ENODEV;
601 
602 	hci_req_lock(hdev);
603 	tasklet_disable(&hdev->tx_task);
604 
605 	if (!test_bit(HCI_UP, &hdev->flags))
606 		goto done;
607 
608 	/* Drop queues */
609 	skb_queue_purge(&hdev->rx_q);
610 	skb_queue_purge(&hdev->cmd_q);
611 
612 	hci_dev_lock_bh(hdev);
613 	inquiry_cache_flush(hdev);
614 	hci_conn_hash_flush(hdev);
615 	hci_dev_unlock_bh(hdev);
616 
617 	if (hdev->flush)
618 		hdev->flush(hdev);
619 
620 	atomic_set(&hdev->cmd_cnt, 1);
621 	hdev->acl_cnt = 0; hdev->sco_cnt = 0;
622 
623 	if (!test_bit(HCI_RAW, &hdev->flags))
624 		ret = __hci_request(hdev, hci_reset_req, 0,
625 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
626 
627 done:
628 	tasklet_enable(&hdev->tx_task);
629 	hci_req_unlock(hdev);
630 	hci_dev_put(hdev);
631 	return ret;
632 }
633 
634 int hci_dev_reset_stat(__u16 dev)
635 {
636 	struct hci_dev *hdev;
637 	int ret = 0;
638 
639 	if (!(hdev = hci_dev_get(dev)))
640 		return -ENODEV;
641 
642 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
643 
644 	hci_dev_put(hdev);
645 
646 	return ret;
647 }
648 
649 int hci_dev_cmd(unsigned int cmd, void __user *arg)
650 {
651 	struct hci_dev *hdev;
652 	struct hci_dev_req dr;
653 	int err = 0;
654 
655 	if (copy_from_user(&dr, arg, sizeof(dr)))
656 		return -EFAULT;
657 
658 	if (!(hdev = hci_dev_get(dr.dev_id)))
659 		return -ENODEV;
660 
661 	switch (cmd) {
662 	case HCISETAUTH:
663 		err = hci_request(hdev, hci_auth_req, dr.dev_opt,
664 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
665 		break;
666 
667 	case HCISETENCRYPT:
668 		if (!lmp_encrypt_capable(hdev)) {
669 			err = -EOPNOTSUPP;
670 			break;
671 		}
672 
673 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
674 			/* Auth must be enabled first */
675 			err = hci_request(hdev, hci_auth_req, dr.dev_opt,
676 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
677 			if (err)
678 				break;
679 		}
680 
681 		err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
682 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
683 		break;
684 
685 	case HCISETSCAN:
686 		err = hci_request(hdev, hci_scan_req, dr.dev_opt,
687 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
688 		break;
689 
690 	case HCISETPTYPE:
691 		hdev->pkt_type = (__u16) dr.dev_opt;
692 		break;
693 
694 	case HCISETLINKPOL:
695 		hdev->link_policy = (__u16) dr.dev_opt;
696 		break;
697 
698 	case HCISETLINKMODE:
699 		hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
700 		break;
701 
702 	case HCISETACLMTU:
703 		hdev->acl_mtu  = *((__u16 *)&dr.dev_opt + 1);
704 		hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
705 		break;
706 
707 	case HCISETSCOMTU:
708 		hdev->sco_mtu  = *((__u16 *)&dr.dev_opt + 1);
709 		hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
710 		break;
711 
712 	default:
713 		err = -EINVAL;
714 		break;
715 	}
716 	hci_dev_put(hdev);
717 	return err;
718 }
719 
720 int hci_get_dev_list(void __user *arg)
721 {
722 	struct hci_dev_list_req *dl;
723 	struct hci_dev_req *dr;
724 	struct list_head *p;
725 	int n = 0, size, err;
726 	__u16 dev_num;
727 
728 	if (get_user(dev_num, (__u16 __user *) arg))
729 		return -EFAULT;
730 
731 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
732 		return -EINVAL;
733 
734 	size = sizeof(*dl) + dev_num * sizeof(*dr);
735 
736 	if (!(dl = kmalloc(size, GFP_KERNEL)))
737 		return -ENOMEM;
738 
739 	dr = dl->dev_req;
740 
741 	read_lock_bh(&hci_dev_list_lock);
742 	list_for_each(p, &hci_dev_list) {
743 		struct hci_dev *hdev;
744 		hdev = list_entry(p, struct hci_dev, list);
745 		(dr + n)->dev_id  = hdev->id;
746 		(dr + n)->dev_opt = hdev->flags;
747 		if (++n >= dev_num)
748 			break;
749 	}
750 	read_unlock_bh(&hci_dev_list_lock);
751 
752 	dl->dev_num = n;
753 	size = sizeof(*dl) + n * sizeof(*dr);
754 
755 	err = copy_to_user(arg, dl, size);
756 	kfree(dl);
757 
758 	return err ? -EFAULT : 0;
759 }
760 
761 int hci_get_dev_info(void __user *arg)
762 {
763 	struct hci_dev *hdev;
764 	struct hci_dev_info di;
765 	int err = 0;
766 
767 	if (copy_from_user(&di, arg, sizeof(di)))
768 		return -EFAULT;
769 
770 	if (!(hdev = hci_dev_get(di.dev_id)))
771 		return -ENODEV;
772 
773 	strcpy(di.name, hdev->name);
774 	di.bdaddr   = hdev->bdaddr;
775 	di.type     = hdev->type;
776 	di.flags    = hdev->flags;
777 	di.pkt_type = hdev->pkt_type;
778 	di.acl_mtu  = hdev->acl_mtu;
779 	di.acl_pkts = hdev->acl_pkts;
780 	di.sco_mtu  = hdev->sco_mtu;
781 	di.sco_pkts = hdev->sco_pkts;
782 	di.link_policy = hdev->link_policy;
783 	di.link_mode   = hdev->link_mode;
784 
785 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
786 	memcpy(&di.features, &hdev->features, sizeof(di.features));
787 
788 	if (copy_to_user(arg, &di, sizeof(di)))
789 		err = -EFAULT;
790 
791 	hci_dev_put(hdev);
792 
793 	return err;
794 }
795 
796 /* ---- Interface to HCI drivers ---- */
797 
798 /* Alloc HCI device */
799 struct hci_dev *hci_alloc_dev(void)
800 {
801 	struct hci_dev *hdev;
802 
803 	hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
804 	if (!hdev)
805 		return NULL;
806 
807 	memset(hdev, 0, sizeof(struct hci_dev));
808 
809 	skb_queue_head_init(&hdev->driver_init);
810 
811 	return hdev;
812 }
813 EXPORT_SYMBOL(hci_alloc_dev);
814 
815 /* Free HCI device */
816 void hci_free_dev(struct hci_dev *hdev)
817 {
818 	skb_queue_purge(&hdev->driver_init);
819 
820 	/* will free via device release */
821 	put_device(&hdev->dev);
822 }
823 EXPORT_SYMBOL(hci_free_dev);
824 
825 /* Register HCI device */
826 int hci_register_dev(struct hci_dev *hdev)
827 {
828 	struct list_head *head = &hci_dev_list, *p;
829 	int id = 0;
830 
831 	BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
832 
833 	if (!hdev->open || !hdev->close || !hdev->destruct)
834 		return -EINVAL;
835 
836 	write_lock_bh(&hci_dev_list_lock);
837 
838 	/* Find first available device id */
839 	list_for_each(p, &hci_dev_list) {
840 		if (list_entry(p, struct hci_dev, list)->id != id)
841 			break;
842 		head = p; id++;
843 	}
844 
845 	sprintf(hdev->name, "hci%d", id);
846 	hdev->id = id;
847 	list_add(&hdev->list, head);
848 
849 	atomic_set(&hdev->refcnt, 1);
850 	spin_lock_init(&hdev->lock);
851 
852 	hdev->flags = 0;
853 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
854 	hdev->link_mode = (HCI_LM_ACCEPT);
855 
856 	hdev->idle_timeout = 0;
857 	hdev->sniff_max_interval = 800;
858 	hdev->sniff_min_interval = 80;
859 
860 	tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
861 	tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
862 	tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
863 
864 	skb_queue_head_init(&hdev->rx_q);
865 	skb_queue_head_init(&hdev->cmd_q);
866 	skb_queue_head_init(&hdev->raw_q);
867 
868 	init_waitqueue_head(&hdev->req_wait_q);
869 	init_MUTEX(&hdev->req_lock);
870 
871 	inquiry_cache_init(hdev);
872 
873 	hci_conn_hash_init(hdev);
874 
875 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
876 
877 	atomic_set(&hdev->promisc, 0);
878 
879 	write_unlock_bh(&hci_dev_list_lock);
880 
881 	hci_register_sysfs(hdev);
882 
883 	hci_notify(hdev, HCI_DEV_REG);
884 
885 	return id;
886 }
887 EXPORT_SYMBOL(hci_register_dev);
888 
889 /* Unregister HCI device */
890 int hci_unregister_dev(struct hci_dev *hdev)
891 {
892 	BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
893 
894 	hci_unregister_sysfs(hdev);
895 
896 	write_lock_bh(&hci_dev_list_lock);
897 	list_del(&hdev->list);
898 	write_unlock_bh(&hci_dev_list_lock);
899 
900 	hci_dev_do_close(hdev);
901 
902 	hci_notify(hdev, HCI_DEV_UNREG);
903 
904 	__hci_dev_put(hdev);
905 	return 0;
906 }
907 EXPORT_SYMBOL(hci_unregister_dev);
908 
909 /* Suspend HCI device */
910 int hci_suspend_dev(struct hci_dev *hdev)
911 {
912 	hci_notify(hdev, HCI_DEV_SUSPEND);
913 	return 0;
914 }
915 EXPORT_SYMBOL(hci_suspend_dev);
916 
917 /* Resume HCI device */
918 int hci_resume_dev(struct hci_dev *hdev)
919 {
920 	hci_notify(hdev, HCI_DEV_RESUME);
921 	return 0;
922 }
923 EXPORT_SYMBOL(hci_resume_dev);
924 
925 /* ---- Interface to upper protocols ---- */
926 
927 /* Register/Unregister protocols.
928  * hci_task_lock is used to ensure that no tasks are running. */
929 int hci_register_proto(struct hci_proto *hp)
930 {
931 	int err = 0;
932 
933 	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
934 
935 	if (hp->id >= HCI_MAX_PROTO)
936 		return -EINVAL;
937 
938 	write_lock_bh(&hci_task_lock);
939 
940 	if (!hci_proto[hp->id])
941 		hci_proto[hp->id] = hp;
942 	else
943 		err = -EEXIST;
944 
945 	write_unlock_bh(&hci_task_lock);
946 
947 	return err;
948 }
949 EXPORT_SYMBOL(hci_register_proto);
950 
951 int hci_unregister_proto(struct hci_proto *hp)
952 {
953 	int err = 0;
954 
955 	BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
956 
957 	if (hp->id >= HCI_MAX_PROTO)
958 		return -EINVAL;
959 
960 	write_lock_bh(&hci_task_lock);
961 
962 	if (hci_proto[hp->id])
963 		hci_proto[hp->id] = NULL;
964 	else
965 		err = -ENOENT;
966 
967 	write_unlock_bh(&hci_task_lock);
968 
969 	return err;
970 }
971 EXPORT_SYMBOL(hci_unregister_proto);
972 
973 int hci_register_cb(struct hci_cb *cb)
974 {
975 	BT_DBG("%p name %s", cb, cb->name);
976 
977 	write_lock_bh(&hci_cb_list_lock);
978 	list_add(&cb->list, &hci_cb_list);
979 	write_unlock_bh(&hci_cb_list_lock);
980 
981 	return 0;
982 }
983 EXPORT_SYMBOL(hci_register_cb);
984 
985 int hci_unregister_cb(struct hci_cb *cb)
986 {
987 	BT_DBG("%p name %s", cb, cb->name);
988 
989 	write_lock_bh(&hci_cb_list_lock);
990 	list_del(&cb->list);
991 	write_unlock_bh(&hci_cb_list_lock);
992 
993 	return 0;
994 }
995 EXPORT_SYMBOL(hci_unregister_cb);
996 
997 static int hci_send_frame(struct sk_buff *skb)
998 {
999 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1000 
1001 	if (!hdev) {
1002 		kfree_skb(skb);
1003 		return -ENODEV;
1004 	}
1005 
1006 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1007 
1008 	if (atomic_read(&hdev->promisc)) {
1009 		/* Time stamp */
1010 		__net_timestamp(skb);
1011 
1012 		hci_send_to_sock(hdev, skb);
1013 	}
1014 
1015 	/* Get rid of skb owner, prior to sending to the driver. */
1016 	skb_orphan(skb);
1017 
1018 	return hdev->send(skb);
1019 }
1020 
1021 /* Send HCI command */
1022 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
1023 {
1024 	int len = HCI_COMMAND_HDR_SIZE + plen;
1025 	struct hci_command_hdr *hdr;
1026 	struct sk_buff *skb;
1027 
1028 	BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1029 
1030 	skb = bt_skb_alloc(len, GFP_ATOMIC);
1031 	if (!skb) {
1032 		BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
1033 		return -ENOMEM;
1034 	}
1035 
1036 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1037 	hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
1038 	hdr->plen   = plen;
1039 
1040 	if (plen)
1041 		memcpy(skb_put(skb, plen), param, plen);
1042 
1043 	BT_DBG("skb len %d", skb->len);
1044 
1045 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1046 	skb->dev = (void *) hdev;
1047 	skb_queue_tail(&hdev->cmd_q, skb);
1048 	hci_sched_cmd(hdev);
1049 
1050 	return 0;
1051 }
1052 
1053 /* Get data from the previously sent command */
1054 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1055 {
1056 	struct hci_command_hdr *hdr;
1057 
1058 	if (!hdev->sent_cmd)
1059 		return NULL;
1060 
1061 	hdr = (void *) hdev->sent_cmd->data;
1062 
1063 	if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1064 		return NULL;
1065 
1066 	BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1067 
1068 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1069 }
1070 
1071 /* Send ACL data */
1072 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1073 {
1074 	struct hci_acl_hdr *hdr;
1075 	int len = skb->len;
1076 
1077 	hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1078 	hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1079 	hdr->dlen   = __cpu_to_le16(len);
1080 
1081 	skb->h.raw = (void *) hdr;
1082 }
1083 
1084 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1085 {
1086 	struct hci_dev *hdev = conn->hdev;
1087 	struct sk_buff *list;
1088 
1089 	BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1090 
1091 	skb->dev = (void *) hdev;
1092 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1093 	hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1094 
1095 	if (!(list = skb_shinfo(skb)->frag_list)) {
1096 		/* Non fragmented */
1097 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1098 
1099 		skb_queue_tail(&conn->data_q, skb);
1100 	} else {
1101 		/* Fragmented */
1102 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1103 
1104 		skb_shinfo(skb)->frag_list = NULL;
1105 
1106 		/* Queue all fragments atomically */
1107 		spin_lock_bh(&conn->data_q.lock);
1108 
1109 		__skb_queue_tail(&conn->data_q, skb);
1110 		do {
1111 			skb = list; list = list->next;
1112 
1113 			skb->dev = (void *) hdev;
1114 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1115 			hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1116 
1117 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1118 
1119 			__skb_queue_tail(&conn->data_q, skb);
1120 		} while (list);
1121 
1122 		spin_unlock_bh(&conn->data_q.lock);
1123 	}
1124 
1125 	hci_sched_tx(hdev);
1126 	return 0;
1127 }
1128 EXPORT_SYMBOL(hci_send_acl);
1129 
1130 /* Send SCO data */
1131 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1132 {
1133 	struct hci_dev *hdev = conn->hdev;
1134 	struct hci_sco_hdr hdr;
1135 
1136 	BT_DBG("%s len %d", hdev->name, skb->len);
1137 
1138 	if (skb->len > hdev->sco_mtu) {
1139 		kfree_skb(skb);
1140 		return -EINVAL;
1141 	}
1142 
1143 	hdr.handle = __cpu_to_le16(conn->handle);
1144 	hdr.dlen   = skb->len;
1145 
1146 	skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1147 	memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1148 
1149 	skb->dev = (void *) hdev;
1150 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1151 	skb_queue_tail(&conn->data_q, skb);
1152 	hci_sched_tx(hdev);
1153 	return 0;
1154 }
1155 EXPORT_SYMBOL(hci_send_sco);
1156 
1157 /* ---- HCI TX task (outgoing data) ---- */
1158 
1159 /* HCI Connection scheduler */
1160 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1161 {
1162 	struct hci_conn_hash *h = &hdev->conn_hash;
1163 	struct hci_conn  *conn = NULL;
1164 	int num = 0, min = ~0;
1165 	struct list_head *p;
1166 
1167 	/* We don't have to lock device here. Connections are always
1168 	 * added and removed with TX task disabled. */
1169 	list_for_each(p, &h->list) {
1170 		struct hci_conn *c;
1171 		c = list_entry(p, struct hci_conn, list);
1172 
1173 		if (c->type != type || c->state != BT_CONNECTED
1174 				|| skb_queue_empty(&c->data_q))
1175 			continue;
1176 		num++;
1177 
1178 		if (c->sent < min) {
1179 			min  = c->sent;
1180 			conn = c;
1181 		}
1182 	}
1183 
1184 	if (conn) {
1185 		int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1186 		int q = cnt / num;
1187 		*quote = q ? q : 1;
1188 	} else
1189 		*quote = 0;
1190 
1191 	BT_DBG("conn %p quote %d", conn, *quote);
1192 	return conn;
1193 }
1194 
1195 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1196 {
1197 	struct hci_conn_hash *h = &hdev->conn_hash;
1198 	struct list_head *p;
1199 	struct hci_conn  *c;
1200 
1201 	BT_ERR("%s ACL tx timeout", hdev->name);
1202 
1203 	/* Kill stalled connections */
1204 	list_for_each(p, &h->list) {
1205 		c = list_entry(p, struct hci_conn, list);
1206 		if (c->type == ACL_LINK && c->sent) {
1207 			BT_ERR("%s killing stalled ACL connection %s",
1208 				hdev->name, batostr(&c->dst));
1209 			hci_acl_disconn(c, 0x13);
1210 		}
1211 	}
1212 }
1213 
1214 static inline void hci_sched_acl(struct hci_dev *hdev)
1215 {
1216 	struct hci_conn *conn;
1217 	struct sk_buff *skb;
1218 	int quote;
1219 
1220 	BT_DBG("%s", hdev->name);
1221 
1222 	if (!test_bit(HCI_RAW, &hdev->flags)) {
1223 		/* ACL tx timeout must be longer than maximum
1224 		 * link supervision timeout (40.9 seconds) */
1225 		if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1226 			hci_acl_tx_to(hdev);
1227 	}
1228 
1229 	while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1230 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1231 			BT_DBG("skb %p len %d", skb, skb->len);
1232 
1233 			hci_conn_enter_active_mode(conn);
1234 
1235 			hci_send_frame(skb);
1236 			hdev->acl_last_tx = jiffies;
1237 
1238 			hdev->acl_cnt--;
1239 			conn->sent++;
1240 		}
1241 	}
1242 }
1243 
1244 /* Schedule SCO */
1245 static inline void hci_sched_sco(struct hci_dev *hdev)
1246 {
1247 	struct hci_conn *conn;
1248 	struct sk_buff *skb;
1249 	int quote;
1250 
1251 	BT_DBG("%s", hdev->name);
1252 
1253 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1254 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1255 			BT_DBG("skb %p len %d", skb, skb->len);
1256 			hci_send_frame(skb);
1257 
1258 			conn->sent++;
1259 			if (conn->sent == ~0)
1260 				conn->sent = 0;
1261 		}
1262 	}
1263 }
1264 
1265 static void hci_tx_task(unsigned long arg)
1266 {
1267 	struct hci_dev *hdev = (struct hci_dev *) arg;
1268 	struct sk_buff *skb;
1269 
1270 	read_lock(&hci_task_lock);
1271 
1272 	BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1273 
1274 	/* Schedule queues and send stuff to HCI driver */
1275 
1276 	hci_sched_acl(hdev);
1277 
1278 	hci_sched_sco(hdev);
1279 
1280 	/* Send next queued raw (unknown type) packet */
1281 	while ((skb = skb_dequeue(&hdev->raw_q)))
1282 		hci_send_frame(skb);
1283 
1284 	read_unlock(&hci_task_lock);
1285 }
1286 
1287 /* ----- HCI RX task (incoming data proccessing) ----- */
1288 
1289 /* ACL data packet */
1290 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1291 {
1292 	struct hci_acl_hdr *hdr = (void *) skb->data;
1293 	struct hci_conn *conn;
1294 	__u16 handle, flags;
1295 
1296 	skb_pull(skb, HCI_ACL_HDR_SIZE);
1297 
1298 	handle = __le16_to_cpu(hdr->handle);
1299 	flags  = hci_flags(handle);
1300 	handle = hci_handle(handle);
1301 
1302 	BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1303 
1304 	hdev->stat.acl_rx++;
1305 
1306 	hci_dev_lock(hdev);
1307 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1308 	hci_dev_unlock(hdev);
1309 
1310 	if (conn) {
1311 		register struct hci_proto *hp;
1312 
1313 		hci_conn_enter_active_mode(conn);
1314 
1315 		/* Send to upper protocol */
1316 		if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1317 			hp->recv_acldata(conn, skb, flags);
1318 			return;
1319 		}
1320 	} else {
1321 		BT_ERR("%s ACL packet for unknown connection handle %d",
1322 			hdev->name, handle);
1323 	}
1324 
1325 	kfree_skb(skb);
1326 }
1327 
1328 /* SCO data packet */
1329 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1330 {
1331 	struct hci_sco_hdr *hdr = (void *) skb->data;
1332 	struct hci_conn *conn;
1333 	__u16 handle;
1334 
1335 	skb_pull(skb, HCI_SCO_HDR_SIZE);
1336 
1337 	handle = __le16_to_cpu(hdr->handle);
1338 
1339 	BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1340 
1341 	hdev->stat.sco_rx++;
1342 
1343 	hci_dev_lock(hdev);
1344 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1345 	hci_dev_unlock(hdev);
1346 
1347 	if (conn) {
1348 		register struct hci_proto *hp;
1349 
1350 		/* Send to upper protocol */
1351 		if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1352 			hp->recv_scodata(conn, skb);
1353 			return;
1354 		}
1355 	} else {
1356 		BT_ERR("%s SCO packet for unknown connection handle %d",
1357 			hdev->name, handle);
1358 	}
1359 
1360 	kfree_skb(skb);
1361 }
1362 
1363 static void hci_rx_task(unsigned long arg)
1364 {
1365 	struct hci_dev *hdev = (struct hci_dev *) arg;
1366 	struct sk_buff *skb;
1367 
1368 	BT_DBG("%s", hdev->name);
1369 
1370 	read_lock(&hci_task_lock);
1371 
1372 	while ((skb = skb_dequeue(&hdev->rx_q))) {
1373 		if (atomic_read(&hdev->promisc)) {
1374 			/* Send copy to the sockets */
1375 			hci_send_to_sock(hdev, skb);
1376 		}
1377 
1378 		if (test_bit(HCI_RAW, &hdev->flags)) {
1379 			kfree_skb(skb);
1380 			continue;
1381 		}
1382 
1383 		if (test_bit(HCI_INIT, &hdev->flags)) {
1384 			/* Don't process data packets in this states. */
1385 			switch (bt_cb(skb)->pkt_type) {
1386 			case HCI_ACLDATA_PKT:
1387 			case HCI_SCODATA_PKT:
1388 				kfree_skb(skb);
1389 				continue;
1390 			};
1391 		}
1392 
1393 		/* Process frame */
1394 		switch (bt_cb(skb)->pkt_type) {
1395 		case HCI_EVENT_PKT:
1396 			hci_event_packet(hdev, skb);
1397 			break;
1398 
1399 		case HCI_ACLDATA_PKT:
1400 			BT_DBG("%s ACL data packet", hdev->name);
1401 			hci_acldata_packet(hdev, skb);
1402 			break;
1403 
1404 		case HCI_SCODATA_PKT:
1405 			BT_DBG("%s SCO data packet", hdev->name);
1406 			hci_scodata_packet(hdev, skb);
1407 			break;
1408 
1409 		default:
1410 			kfree_skb(skb);
1411 			break;
1412 		}
1413 	}
1414 
1415 	read_unlock(&hci_task_lock);
1416 }
1417 
1418 static void hci_cmd_task(unsigned long arg)
1419 {
1420 	struct hci_dev *hdev = (struct hci_dev *) arg;
1421 	struct sk_buff *skb;
1422 
1423 	BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1424 
1425 	if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1426 		BT_ERR("%s command tx timeout", hdev->name);
1427 		atomic_set(&hdev->cmd_cnt, 1);
1428 	}
1429 
1430 	/* Send queued commands */
1431 	if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1432 		if (hdev->sent_cmd)
1433 			kfree_skb(hdev->sent_cmd);
1434 
1435 		if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1436 			atomic_dec(&hdev->cmd_cnt);
1437 			hci_send_frame(skb);
1438 			hdev->cmd_last_tx = jiffies;
1439 		} else {
1440 			skb_queue_head(&hdev->cmd_q, skb);
1441 			hci_sched_cmd(hdev);
1442 		}
1443 	}
1444 }
1445