xref: /linux/net/bluetooth/hci_core.c (revision 2dbf708448c836754d25fe6108c5bfe1f5697c95)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31 
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47 
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50 
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53 
54 #define AUTO_OFF_TIMEOUT 2000
55 
56 static void hci_rx_work(struct work_struct *work);
57 static void hci_cmd_work(struct work_struct *work);
58 static void hci_tx_work(struct work_struct *work);
59 
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63 
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67 
68 /* ---- HCI notifications ---- */
69 
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72 	hci_sock_dev_event(hdev, event);
73 }
74 
75 /* ---- HCI requests ---- */
76 
77 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
78 {
79 	BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80 
81 	/* If this is the init phase check if the completed command matches
82 	 * the last init command, and if not just return.
83 	 */
84 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
86 		struct sk_buff *skb;
87 
88 		/* Some CSR based controllers generate a spontaneous
89 		 * reset complete event during init and any pending
90 		 * command will never be completed. In such a case we
91 		 * need to resend whatever was the last sent
92 		 * command.
93 		 */
94 
95 		if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
96 			return;
97 
98 		skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
99 		if (skb) {
100 			skb_queue_head(&hdev->cmd_q, skb);
101 			queue_work(hdev->workqueue, &hdev->cmd_work);
102 		}
103 
104 		return;
105 	}
106 
107 	if (hdev->req_status == HCI_REQ_PEND) {
108 		hdev->req_result = result;
109 		hdev->req_status = HCI_REQ_DONE;
110 		wake_up_interruptible(&hdev->req_wait_q);
111 	}
112 }
113 
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
117 
118 	if (hdev->req_status == HCI_REQ_PEND) {
119 		hdev->req_result = err;
120 		hdev->req_status = HCI_REQ_CANCELED;
121 		wake_up_interruptible(&hdev->req_wait_q);
122 	}
123 }
124 
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 					unsigned long opt, __u32 timeout)
128 {
129 	DECLARE_WAITQUEUE(wait, current);
130 	int err = 0;
131 
132 	BT_DBG("%s start", hdev->name);
133 
134 	hdev->req_status = HCI_REQ_PEND;
135 
136 	add_wait_queue(&hdev->req_wait_q, &wait);
137 	set_current_state(TASK_INTERRUPTIBLE);
138 
139 	req(hdev, opt);
140 	schedule_timeout(timeout);
141 
142 	remove_wait_queue(&hdev->req_wait_q, &wait);
143 
144 	if (signal_pending(current))
145 		return -EINTR;
146 
147 	switch (hdev->req_status) {
148 	case HCI_REQ_DONE:
149 		err = -bt_to_errno(hdev->req_result);
150 		break;
151 
152 	case HCI_REQ_CANCELED:
153 		err = -hdev->req_result;
154 		break;
155 
156 	default:
157 		err = -ETIMEDOUT;
158 		break;
159 	}
160 
161 	hdev->req_status = hdev->req_result = 0;
162 
163 	BT_DBG("%s end: err %d", hdev->name, err);
164 
165 	return err;
166 }
167 
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 					unsigned long opt, __u32 timeout)
170 {
171 	int ret;
172 
173 	if (!test_bit(HCI_UP, &hdev->flags))
174 		return -ENETDOWN;
175 
176 	/* Serialize all requests */
177 	hci_req_lock(hdev);
178 	ret = __hci_request(hdev, req, opt, timeout);
179 	hci_req_unlock(hdev);
180 
181 	return ret;
182 }
183 
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186 	BT_DBG("%s %ld", hdev->name, opt);
187 
188 	/* Reset device */
189 	set_bit(HCI_RESET, &hdev->flags);
190 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192 
193 static void bredr_init(struct hci_dev *hdev)
194 {
195 	struct hci_cp_delete_stored_link_key cp;
196 	__le16 param;
197 	__u8 flt_type;
198 
199 	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200 
201 	/* Mandatory initialization */
202 
203 	/* Reset */
204 	if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
205 		set_bit(HCI_RESET, &hdev->flags);
206 		hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
207 	}
208 
209 	/* Read Local Supported Features */
210 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
211 
212 	/* Read Local Version */
213 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214 
215 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
216 	hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217 
218 	/* Read BD Address */
219 	hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
220 
221 	/* Read Class of Device */
222 	hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
223 
224 	/* Read Local Name */
225 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
226 
227 	/* Read Voice Setting */
228 	hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
229 
230 	/* Optional initialization */
231 
232 	/* Clear Event Filters */
233 	flt_type = HCI_FLT_CLEAR_ALL;
234 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
235 
236 	/* Connection accept timeout ~20 secs */
237 	param = cpu_to_le16(0x7d00);
238 	hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
239 
240 	bacpy(&cp.bdaddr, BDADDR_ANY);
241 	cp.delete_all = 1;
242 	hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
243 }
244 
245 static void amp_init(struct hci_dev *hdev)
246 {
247 	hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
248 
249 	/* Reset */
250 	hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
251 
252 	/* Read Local Version */
253 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
254 }
255 
256 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
257 {
258 	struct sk_buff *skb;
259 
260 	BT_DBG("%s %ld", hdev->name, opt);
261 
262 	/* Driver initialization */
263 
264 	/* Special commands */
265 	while ((skb = skb_dequeue(&hdev->driver_init))) {
266 		bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
267 		skb->dev = (void *) hdev;
268 
269 		skb_queue_tail(&hdev->cmd_q, skb);
270 		queue_work(hdev->workqueue, &hdev->cmd_work);
271 	}
272 	skb_queue_purge(&hdev->driver_init);
273 
274 	switch (hdev->dev_type) {
275 	case HCI_BREDR:
276 		bredr_init(hdev);
277 		break;
278 
279 	case HCI_AMP:
280 		amp_init(hdev);
281 		break;
282 
283 	default:
284 		BT_ERR("Unknown device type %d", hdev->dev_type);
285 		break;
286 	}
287 
288 }
289 
290 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
291 {
292 	BT_DBG("%s", hdev->name);
293 
294 	/* Read LE buffer size */
295 	hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
296 }
297 
298 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
299 {
300 	__u8 scan = opt;
301 
302 	BT_DBG("%s %x", hdev->name, scan);
303 
304 	/* Inquiry and Page scans */
305 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
306 }
307 
308 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
309 {
310 	__u8 auth = opt;
311 
312 	BT_DBG("%s %x", hdev->name, auth);
313 
314 	/* Authentication */
315 	hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
316 }
317 
318 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
319 {
320 	__u8 encrypt = opt;
321 
322 	BT_DBG("%s %x", hdev->name, encrypt);
323 
324 	/* Encryption */
325 	hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
326 }
327 
328 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
329 {
330 	__le16 policy = cpu_to_le16(opt);
331 
332 	BT_DBG("%s %x", hdev->name, policy);
333 
334 	/* Default link policy */
335 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
336 }
337 
338 /* Get HCI device by index.
339  * Device is held on return. */
340 struct hci_dev *hci_dev_get(int index)
341 {
342 	struct hci_dev *hdev = NULL, *d;
343 
344 	BT_DBG("%d", index);
345 
346 	if (index < 0)
347 		return NULL;
348 
349 	read_lock(&hci_dev_list_lock);
350 	list_for_each_entry(d, &hci_dev_list, list) {
351 		if (d->id == index) {
352 			hdev = hci_dev_hold(d);
353 			break;
354 		}
355 	}
356 	read_unlock(&hci_dev_list_lock);
357 	return hdev;
358 }
359 
360 /* ---- Inquiry support ---- */
361 
362 bool hci_discovery_active(struct hci_dev *hdev)
363 {
364 	struct discovery_state *discov = &hdev->discovery;
365 
366 	switch (discov->state) {
367 	case DISCOVERY_FINDING:
368 	case DISCOVERY_RESOLVING:
369 		return true;
370 
371 	default:
372 		return false;
373 	}
374 }
375 
376 void hci_discovery_set_state(struct hci_dev *hdev, int state)
377 {
378 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
379 
380 	if (hdev->discovery.state == state)
381 		return;
382 
383 	switch (state) {
384 	case DISCOVERY_STOPPED:
385 		if (hdev->discovery.state != DISCOVERY_STARTING)
386 			mgmt_discovering(hdev, 0);
387 		hdev->discovery.type = 0;
388 		break;
389 	case DISCOVERY_STARTING:
390 		break;
391 	case DISCOVERY_FINDING:
392 		mgmt_discovering(hdev, 1);
393 		break;
394 	case DISCOVERY_RESOLVING:
395 		break;
396 	case DISCOVERY_STOPPING:
397 		break;
398 	}
399 
400 	hdev->discovery.state = state;
401 }
402 
403 static void inquiry_cache_flush(struct hci_dev *hdev)
404 {
405 	struct discovery_state *cache = &hdev->discovery;
406 	struct inquiry_entry *p, *n;
407 
408 	list_for_each_entry_safe(p, n, &cache->all, all) {
409 		list_del(&p->all);
410 		kfree(p);
411 	}
412 
413 	INIT_LIST_HEAD(&cache->unknown);
414 	INIT_LIST_HEAD(&cache->resolve);
415 }
416 
417 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
418 {
419 	struct discovery_state *cache = &hdev->discovery;
420 	struct inquiry_entry *e;
421 
422 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
423 
424 	list_for_each_entry(e, &cache->all, all) {
425 		if (!bacmp(&e->data.bdaddr, bdaddr))
426 			return e;
427 	}
428 
429 	return NULL;
430 }
431 
432 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
433 						       bdaddr_t *bdaddr)
434 {
435 	struct discovery_state *cache = &hdev->discovery;
436 	struct inquiry_entry *e;
437 
438 	BT_DBG("cache %p, %s", cache, batostr(bdaddr));
439 
440 	list_for_each_entry(e, &cache->unknown, list) {
441 		if (!bacmp(&e->data.bdaddr, bdaddr))
442 			return e;
443 	}
444 
445 	return NULL;
446 }
447 
448 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
449 						       bdaddr_t *bdaddr,
450 						       int state)
451 {
452 	struct discovery_state *cache = &hdev->discovery;
453 	struct inquiry_entry *e;
454 
455 	BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
456 
457 	list_for_each_entry(e, &cache->resolve, list) {
458 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
459 			return e;
460 		if (!bacmp(&e->data.bdaddr, bdaddr))
461 			return e;
462 	}
463 
464 	return NULL;
465 }
466 
467 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
468 				      struct inquiry_entry *ie)
469 {
470 	struct discovery_state *cache = &hdev->discovery;
471 	struct list_head *pos = &cache->resolve;
472 	struct inquiry_entry *p;
473 
474 	list_del(&ie->list);
475 
476 	list_for_each_entry(p, &cache->resolve, list) {
477 		if (p->name_state != NAME_PENDING &&
478 				abs(p->data.rssi) >= abs(ie->data.rssi))
479 			break;
480 		pos = &p->list;
481 	}
482 
483 	list_add(&ie->list, pos);
484 }
485 
486 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
487 			      bool name_known, bool *ssp)
488 {
489 	struct discovery_state *cache = &hdev->discovery;
490 	struct inquiry_entry *ie;
491 
492 	BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
493 
494 	if (ssp)
495 		*ssp = data->ssp_mode;
496 
497 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
498 	if (ie) {
499 		if (ie->data.ssp_mode && ssp)
500 			*ssp = true;
501 
502 		if (ie->name_state == NAME_NEEDED &&
503 						data->rssi != ie->data.rssi) {
504 			ie->data.rssi = data->rssi;
505 			hci_inquiry_cache_update_resolve(hdev, ie);
506 		}
507 
508 		goto update;
509 	}
510 
511 	/* Entry not in the cache. Add new one. */
512 	ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
513 	if (!ie)
514 		return false;
515 
516 	list_add(&ie->all, &cache->all);
517 
518 	if (name_known) {
519 		ie->name_state = NAME_KNOWN;
520 	} else {
521 		ie->name_state = NAME_NOT_KNOWN;
522 		list_add(&ie->list, &cache->unknown);
523 	}
524 
525 update:
526 	if (name_known && ie->name_state != NAME_KNOWN &&
527 					ie->name_state != NAME_PENDING) {
528 		ie->name_state = NAME_KNOWN;
529 		list_del(&ie->list);
530 	}
531 
532 	memcpy(&ie->data, data, sizeof(*data));
533 	ie->timestamp = jiffies;
534 	cache->timestamp = jiffies;
535 
536 	if (ie->name_state == NAME_NOT_KNOWN)
537 		return false;
538 
539 	return true;
540 }
541 
542 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
543 {
544 	struct discovery_state *cache = &hdev->discovery;
545 	struct inquiry_info *info = (struct inquiry_info *) buf;
546 	struct inquiry_entry *e;
547 	int copied = 0;
548 
549 	list_for_each_entry(e, &cache->all, all) {
550 		struct inquiry_data *data = &e->data;
551 
552 		if (copied >= num)
553 			break;
554 
555 		bacpy(&info->bdaddr, &data->bdaddr);
556 		info->pscan_rep_mode	= data->pscan_rep_mode;
557 		info->pscan_period_mode	= data->pscan_period_mode;
558 		info->pscan_mode	= data->pscan_mode;
559 		memcpy(info->dev_class, data->dev_class, 3);
560 		info->clock_offset	= data->clock_offset;
561 
562 		info++;
563 		copied++;
564 	}
565 
566 	BT_DBG("cache %p, copied %d", cache, copied);
567 	return copied;
568 }
569 
570 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
571 {
572 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
573 	struct hci_cp_inquiry cp;
574 
575 	BT_DBG("%s", hdev->name);
576 
577 	if (test_bit(HCI_INQUIRY, &hdev->flags))
578 		return;
579 
580 	/* Start Inquiry */
581 	memcpy(&cp.lap, &ir->lap, 3);
582 	cp.length  = ir->length;
583 	cp.num_rsp = ir->num_rsp;
584 	hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
585 }
586 
587 int hci_inquiry(void __user *arg)
588 {
589 	__u8 __user *ptr = arg;
590 	struct hci_inquiry_req ir;
591 	struct hci_dev *hdev;
592 	int err = 0, do_inquiry = 0, max_rsp;
593 	long timeo;
594 	__u8 *buf;
595 
596 	if (copy_from_user(&ir, ptr, sizeof(ir)))
597 		return -EFAULT;
598 
599 	hdev = hci_dev_get(ir.dev_id);
600 	if (!hdev)
601 		return -ENODEV;
602 
603 	hci_dev_lock(hdev);
604 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
605 				inquiry_cache_empty(hdev) ||
606 				ir.flags & IREQ_CACHE_FLUSH) {
607 		inquiry_cache_flush(hdev);
608 		do_inquiry = 1;
609 	}
610 	hci_dev_unlock(hdev);
611 
612 	timeo = ir.length * msecs_to_jiffies(2000);
613 
614 	if (do_inquiry) {
615 		err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
616 		if (err < 0)
617 			goto done;
618 	}
619 
620 	/* for unlimited number of responses we will use buffer with 255 entries */
621 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
622 
623 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
624 	 * copy it to the user space.
625 	 */
626 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
627 	if (!buf) {
628 		err = -ENOMEM;
629 		goto done;
630 	}
631 
632 	hci_dev_lock(hdev);
633 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
634 	hci_dev_unlock(hdev);
635 
636 	BT_DBG("num_rsp %d", ir.num_rsp);
637 
638 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
639 		ptr += sizeof(ir);
640 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
641 					ir.num_rsp))
642 			err = -EFAULT;
643 	} else
644 		err = -EFAULT;
645 
646 	kfree(buf);
647 
648 done:
649 	hci_dev_put(hdev);
650 	return err;
651 }
652 
653 /* ---- HCI ioctl helpers ---- */
654 
655 int hci_dev_open(__u16 dev)
656 {
657 	struct hci_dev *hdev;
658 	int ret = 0;
659 
660 	hdev = hci_dev_get(dev);
661 	if (!hdev)
662 		return -ENODEV;
663 
664 	BT_DBG("%s %p", hdev->name, hdev);
665 
666 	hci_req_lock(hdev);
667 
668 	if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
669 		ret = -ENODEV;
670 		goto done;
671 	}
672 
673 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
674 		ret = -ERFKILL;
675 		goto done;
676 	}
677 
678 	if (test_bit(HCI_UP, &hdev->flags)) {
679 		ret = -EALREADY;
680 		goto done;
681 	}
682 
683 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
684 		set_bit(HCI_RAW, &hdev->flags);
685 
686 	/* Treat all non BR/EDR controllers as raw devices if
687 	   enable_hs is not set */
688 	if (hdev->dev_type != HCI_BREDR && !enable_hs)
689 		set_bit(HCI_RAW, &hdev->flags);
690 
691 	if (hdev->open(hdev)) {
692 		ret = -EIO;
693 		goto done;
694 	}
695 
696 	if (!test_bit(HCI_RAW, &hdev->flags)) {
697 		atomic_set(&hdev->cmd_cnt, 1);
698 		set_bit(HCI_INIT, &hdev->flags);
699 		hdev->init_last_cmd = 0;
700 
701 		ret = __hci_request(hdev, hci_init_req, 0,
702 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
703 
704 		if (lmp_host_le_capable(hdev))
705 			ret = __hci_request(hdev, hci_le_init_req, 0,
706 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
707 
708 		clear_bit(HCI_INIT, &hdev->flags);
709 	}
710 
711 	if (!ret) {
712 		hci_dev_hold(hdev);
713 		set_bit(HCI_UP, &hdev->flags);
714 		hci_notify(hdev, HCI_DEV_UP);
715 		if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
716 			hci_dev_lock(hdev);
717 			mgmt_powered(hdev, 1);
718 			hci_dev_unlock(hdev);
719 		}
720 	} else {
721 		/* Init failed, cleanup */
722 		flush_work(&hdev->tx_work);
723 		flush_work(&hdev->cmd_work);
724 		flush_work(&hdev->rx_work);
725 
726 		skb_queue_purge(&hdev->cmd_q);
727 		skb_queue_purge(&hdev->rx_q);
728 
729 		if (hdev->flush)
730 			hdev->flush(hdev);
731 
732 		if (hdev->sent_cmd) {
733 			kfree_skb(hdev->sent_cmd);
734 			hdev->sent_cmd = NULL;
735 		}
736 
737 		hdev->close(hdev);
738 		hdev->flags = 0;
739 	}
740 
741 done:
742 	hci_req_unlock(hdev);
743 	hci_dev_put(hdev);
744 	return ret;
745 }
746 
747 static int hci_dev_do_close(struct hci_dev *hdev)
748 {
749 	BT_DBG("%s %p", hdev->name, hdev);
750 
751 	cancel_work_sync(&hdev->le_scan);
752 
753 	hci_req_cancel(hdev, ENODEV);
754 	hci_req_lock(hdev);
755 
756 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
757 		del_timer_sync(&hdev->cmd_timer);
758 		hci_req_unlock(hdev);
759 		return 0;
760 	}
761 
762 	/* Flush RX and TX works */
763 	flush_work(&hdev->tx_work);
764 	flush_work(&hdev->rx_work);
765 
766 	if (hdev->discov_timeout > 0) {
767 		cancel_delayed_work(&hdev->discov_off);
768 		hdev->discov_timeout = 0;
769 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
770 	}
771 
772 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
773 		cancel_delayed_work(&hdev->service_cache);
774 
775 	cancel_delayed_work_sync(&hdev->le_scan_disable);
776 
777 	hci_dev_lock(hdev);
778 	inquiry_cache_flush(hdev);
779 	hci_conn_hash_flush(hdev);
780 	hci_dev_unlock(hdev);
781 
782 	hci_notify(hdev, HCI_DEV_DOWN);
783 
784 	if (hdev->flush)
785 		hdev->flush(hdev);
786 
787 	/* Reset device */
788 	skb_queue_purge(&hdev->cmd_q);
789 	atomic_set(&hdev->cmd_cnt, 1);
790 	if (!test_bit(HCI_RAW, &hdev->flags) &&
791 				test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
792 		set_bit(HCI_INIT, &hdev->flags);
793 		__hci_request(hdev, hci_reset_req, 0,
794 					msecs_to_jiffies(250));
795 		clear_bit(HCI_INIT, &hdev->flags);
796 	}
797 
798 	/* flush cmd  work */
799 	flush_work(&hdev->cmd_work);
800 
801 	/* Drop queues */
802 	skb_queue_purge(&hdev->rx_q);
803 	skb_queue_purge(&hdev->cmd_q);
804 	skb_queue_purge(&hdev->raw_q);
805 
806 	/* Drop last sent command */
807 	if (hdev->sent_cmd) {
808 		del_timer_sync(&hdev->cmd_timer);
809 		kfree_skb(hdev->sent_cmd);
810 		hdev->sent_cmd = NULL;
811 	}
812 
813 	/* After this point our queues are empty
814 	 * and no tasks are scheduled. */
815 	hdev->close(hdev);
816 
817 	if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
818 		hci_dev_lock(hdev);
819 		mgmt_powered(hdev, 0);
820 		hci_dev_unlock(hdev);
821 	}
822 
823 	/* Clear flags */
824 	hdev->flags = 0;
825 
826 	memset(hdev->eir, 0, sizeof(hdev->eir));
827 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
828 
829 	hci_req_unlock(hdev);
830 
831 	hci_dev_put(hdev);
832 	return 0;
833 }
834 
835 int hci_dev_close(__u16 dev)
836 {
837 	struct hci_dev *hdev;
838 	int err;
839 
840 	hdev = hci_dev_get(dev);
841 	if (!hdev)
842 		return -ENODEV;
843 
844 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
845 		cancel_delayed_work(&hdev->power_off);
846 
847 	err = hci_dev_do_close(hdev);
848 
849 	hci_dev_put(hdev);
850 	return err;
851 }
852 
853 int hci_dev_reset(__u16 dev)
854 {
855 	struct hci_dev *hdev;
856 	int ret = 0;
857 
858 	hdev = hci_dev_get(dev);
859 	if (!hdev)
860 		return -ENODEV;
861 
862 	hci_req_lock(hdev);
863 
864 	if (!test_bit(HCI_UP, &hdev->flags))
865 		goto done;
866 
867 	/* Drop queues */
868 	skb_queue_purge(&hdev->rx_q);
869 	skb_queue_purge(&hdev->cmd_q);
870 
871 	hci_dev_lock(hdev);
872 	inquiry_cache_flush(hdev);
873 	hci_conn_hash_flush(hdev);
874 	hci_dev_unlock(hdev);
875 
876 	if (hdev->flush)
877 		hdev->flush(hdev);
878 
879 	atomic_set(&hdev->cmd_cnt, 1);
880 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
881 
882 	if (!test_bit(HCI_RAW, &hdev->flags))
883 		ret = __hci_request(hdev, hci_reset_req, 0,
884 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
885 
886 done:
887 	hci_req_unlock(hdev);
888 	hci_dev_put(hdev);
889 	return ret;
890 }
891 
892 int hci_dev_reset_stat(__u16 dev)
893 {
894 	struct hci_dev *hdev;
895 	int ret = 0;
896 
897 	hdev = hci_dev_get(dev);
898 	if (!hdev)
899 		return -ENODEV;
900 
901 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
902 
903 	hci_dev_put(hdev);
904 
905 	return ret;
906 }
907 
908 int hci_dev_cmd(unsigned int cmd, void __user *arg)
909 {
910 	struct hci_dev *hdev;
911 	struct hci_dev_req dr;
912 	int err = 0;
913 
914 	if (copy_from_user(&dr, arg, sizeof(dr)))
915 		return -EFAULT;
916 
917 	hdev = hci_dev_get(dr.dev_id);
918 	if (!hdev)
919 		return -ENODEV;
920 
921 	switch (cmd) {
922 	case HCISETAUTH:
923 		err = hci_request(hdev, hci_auth_req, dr.dev_opt,
924 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
925 		break;
926 
927 	case HCISETENCRYPT:
928 		if (!lmp_encrypt_capable(hdev)) {
929 			err = -EOPNOTSUPP;
930 			break;
931 		}
932 
933 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
934 			/* Auth must be enabled first */
935 			err = hci_request(hdev, hci_auth_req, dr.dev_opt,
936 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
937 			if (err)
938 				break;
939 		}
940 
941 		err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
942 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
943 		break;
944 
945 	case HCISETSCAN:
946 		err = hci_request(hdev, hci_scan_req, dr.dev_opt,
947 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
948 		break;
949 
950 	case HCISETLINKPOL:
951 		err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
952 					msecs_to_jiffies(HCI_INIT_TIMEOUT));
953 		break;
954 
955 	case HCISETLINKMODE:
956 		hdev->link_mode = ((__u16) dr.dev_opt) &
957 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
958 		break;
959 
960 	case HCISETPTYPE:
961 		hdev->pkt_type = (__u16) dr.dev_opt;
962 		break;
963 
964 	case HCISETACLMTU:
965 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
966 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
967 		break;
968 
969 	case HCISETSCOMTU:
970 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
971 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
972 		break;
973 
974 	default:
975 		err = -EINVAL;
976 		break;
977 	}
978 
979 	hci_dev_put(hdev);
980 	return err;
981 }
982 
983 int hci_get_dev_list(void __user *arg)
984 {
985 	struct hci_dev *hdev;
986 	struct hci_dev_list_req *dl;
987 	struct hci_dev_req *dr;
988 	int n = 0, size, err;
989 	__u16 dev_num;
990 
991 	if (get_user(dev_num, (__u16 __user *) arg))
992 		return -EFAULT;
993 
994 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
995 		return -EINVAL;
996 
997 	size = sizeof(*dl) + dev_num * sizeof(*dr);
998 
999 	dl = kzalloc(size, GFP_KERNEL);
1000 	if (!dl)
1001 		return -ENOMEM;
1002 
1003 	dr = dl->dev_req;
1004 
1005 	read_lock(&hci_dev_list_lock);
1006 	list_for_each_entry(hdev, &hci_dev_list, list) {
1007 		if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1008 			cancel_delayed_work(&hdev->power_off);
1009 
1010 		if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1011 			set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1012 
1013 		(dr + n)->dev_id  = hdev->id;
1014 		(dr + n)->dev_opt = hdev->flags;
1015 
1016 		if (++n >= dev_num)
1017 			break;
1018 	}
1019 	read_unlock(&hci_dev_list_lock);
1020 
1021 	dl->dev_num = n;
1022 	size = sizeof(*dl) + n * sizeof(*dr);
1023 
1024 	err = copy_to_user(arg, dl, size);
1025 	kfree(dl);
1026 
1027 	return err ? -EFAULT : 0;
1028 }
1029 
1030 int hci_get_dev_info(void __user *arg)
1031 {
1032 	struct hci_dev *hdev;
1033 	struct hci_dev_info di;
1034 	int err = 0;
1035 
1036 	if (copy_from_user(&di, arg, sizeof(di)))
1037 		return -EFAULT;
1038 
1039 	hdev = hci_dev_get(di.dev_id);
1040 	if (!hdev)
1041 		return -ENODEV;
1042 
1043 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1044 		cancel_delayed_work_sync(&hdev->power_off);
1045 
1046 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1047 		set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1048 
1049 	strcpy(di.name, hdev->name);
1050 	di.bdaddr   = hdev->bdaddr;
1051 	di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1052 	di.flags    = hdev->flags;
1053 	di.pkt_type = hdev->pkt_type;
1054 	di.acl_mtu  = hdev->acl_mtu;
1055 	di.acl_pkts = hdev->acl_pkts;
1056 	di.sco_mtu  = hdev->sco_mtu;
1057 	di.sco_pkts = hdev->sco_pkts;
1058 	di.link_policy = hdev->link_policy;
1059 	di.link_mode   = hdev->link_mode;
1060 
1061 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1062 	memcpy(&di.features, &hdev->features, sizeof(di.features));
1063 
1064 	if (copy_to_user(arg, &di, sizeof(di)))
1065 		err = -EFAULT;
1066 
1067 	hci_dev_put(hdev);
1068 
1069 	return err;
1070 }
1071 
1072 /* ---- Interface to HCI drivers ---- */
1073 
1074 static int hci_rfkill_set_block(void *data, bool blocked)
1075 {
1076 	struct hci_dev *hdev = data;
1077 
1078 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1079 
1080 	if (!blocked)
1081 		return 0;
1082 
1083 	hci_dev_do_close(hdev);
1084 
1085 	return 0;
1086 }
1087 
1088 static const struct rfkill_ops hci_rfkill_ops = {
1089 	.set_block = hci_rfkill_set_block,
1090 };
1091 
1092 /* Alloc HCI device */
1093 struct hci_dev *hci_alloc_dev(void)
1094 {
1095 	struct hci_dev *hdev;
1096 
1097 	hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1098 	if (!hdev)
1099 		return NULL;
1100 
1101 	hci_init_sysfs(hdev);
1102 	skb_queue_head_init(&hdev->driver_init);
1103 
1104 	return hdev;
1105 }
1106 EXPORT_SYMBOL(hci_alloc_dev);
1107 
1108 /* Free HCI device */
1109 void hci_free_dev(struct hci_dev *hdev)
1110 {
1111 	skb_queue_purge(&hdev->driver_init);
1112 
1113 	/* will free via device release */
1114 	put_device(&hdev->dev);
1115 }
1116 EXPORT_SYMBOL(hci_free_dev);
1117 
1118 static void hci_power_on(struct work_struct *work)
1119 {
1120 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1121 
1122 	BT_DBG("%s", hdev->name);
1123 
1124 	if (hci_dev_open(hdev->id) < 0)
1125 		return;
1126 
1127 	if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1128 		schedule_delayed_work(&hdev->power_off,
1129 					msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1130 
1131 	if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1132 		mgmt_index_added(hdev);
1133 }
1134 
1135 static void hci_power_off(struct work_struct *work)
1136 {
1137 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1138 							power_off.work);
1139 
1140 	BT_DBG("%s", hdev->name);
1141 
1142 	hci_dev_do_close(hdev);
1143 }
1144 
1145 static void hci_discov_off(struct work_struct *work)
1146 {
1147 	struct hci_dev *hdev;
1148 	u8 scan = SCAN_PAGE;
1149 
1150 	hdev = container_of(work, struct hci_dev, discov_off.work);
1151 
1152 	BT_DBG("%s", hdev->name);
1153 
1154 	hci_dev_lock(hdev);
1155 
1156 	hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1157 
1158 	hdev->discov_timeout = 0;
1159 
1160 	hci_dev_unlock(hdev);
1161 }
1162 
1163 int hci_uuids_clear(struct hci_dev *hdev)
1164 {
1165 	struct list_head *p, *n;
1166 
1167 	list_for_each_safe(p, n, &hdev->uuids) {
1168 		struct bt_uuid *uuid;
1169 
1170 		uuid = list_entry(p, struct bt_uuid, list);
1171 
1172 		list_del(p);
1173 		kfree(uuid);
1174 	}
1175 
1176 	return 0;
1177 }
1178 
1179 int hci_link_keys_clear(struct hci_dev *hdev)
1180 {
1181 	struct list_head *p, *n;
1182 
1183 	list_for_each_safe(p, n, &hdev->link_keys) {
1184 		struct link_key *key;
1185 
1186 		key = list_entry(p, struct link_key, list);
1187 
1188 		list_del(p);
1189 		kfree(key);
1190 	}
1191 
1192 	return 0;
1193 }
1194 
1195 int hci_smp_ltks_clear(struct hci_dev *hdev)
1196 {
1197 	struct smp_ltk *k, *tmp;
1198 
1199 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1200 		list_del(&k->list);
1201 		kfree(k);
1202 	}
1203 
1204 	return 0;
1205 }
1206 
1207 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1208 {
1209 	struct link_key *k;
1210 
1211 	list_for_each_entry(k, &hdev->link_keys, list)
1212 		if (bacmp(bdaddr, &k->bdaddr) == 0)
1213 			return k;
1214 
1215 	return NULL;
1216 }
1217 
1218 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1219 						u8 key_type, u8 old_key_type)
1220 {
1221 	/* Legacy key */
1222 	if (key_type < 0x03)
1223 		return 1;
1224 
1225 	/* Debug keys are insecure so don't store them persistently */
1226 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1227 		return 0;
1228 
1229 	/* Changed combination key and there's no previous one */
1230 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1231 		return 0;
1232 
1233 	/* Security mode 3 case */
1234 	if (!conn)
1235 		return 1;
1236 
1237 	/* Neither local nor remote side had no-bonding as requirement */
1238 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1239 		return 1;
1240 
1241 	/* Local side had dedicated bonding as requirement */
1242 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1243 		return 1;
1244 
1245 	/* Remote side had dedicated bonding as requirement */
1246 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1247 		return 1;
1248 
1249 	/* If none of the above criteria match, then don't store the key
1250 	 * persistently */
1251 	return 0;
1252 }
1253 
1254 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1255 {
1256 	struct smp_ltk *k;
1257 
1258 	list_for_each_entry(k, &hdev->long_term_keys, list) {
1259 		if (k->ediv != ediv ||
1260 				memcmp(rand, k->rand, sizeof(k->rand)))
1261 			continue;
1262 
1263 		return k;
1264 	}
1265 
1266 	return NULL;
1267 }
1268 EXPORT_SYMBOL(hci_find_ltk);
1269 
1270 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1271 				     u8 addr_type)
1272 {
1273 	struct smp_ltk *k;
1274 
1275 	list_for_each_entry(k, &hdev->long_term_keys, list)
1276 		if (addr_type == k->bdaddr_type &&
1277 					bacmp(bdaddr, &k->bdaddr) == 0)
1278 			return k;
1279 
1280 	return NULL;
1281 }
1282 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1283 
1284 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1285 		     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1286 {
1287 	struct link_key *key, *old_key;
1288 	u8 old_key_type, persistent;
1289 
1290 	old_key = hci_find_link_key(hdev, bdaddr);
1291 	if (old_key) {
1292 		old_key_type = old_key->type;
1293 		key = old_key;
1294 	} else {
1295 		old_key_type = conn ? conn->key_type : 0xff;
1296 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1297 		if (!key)
1298 			return -ENOMEM;
1299 		list_add(&key->list, &hdev->link_keys);
1300 	}
1301 
1302 	BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1303 
1304 	/* Some buggy controller combinations generate a changed
1305 	 * combination key for legacy pairing even when there's no
1306 	 * previous key */
1307 	if (type == HCI_LK_CHANGED_COMBINATION &&
1308 					(!conn || conn->remote_auth == 0xff) &&
1309 					old_key_type == 0xff) {
1310 		type = HCI_LK_COMBINATION;
1311 		if (conn)
1312 			conn->key_type = type;
1313 	}
1314 
1315 	bacpy(&key->bdaddr, bdaddr);
1316 	memcpy(key->val, val, 16);
1317 	key->pin_len = pin_len;
1318 
1319 	if (type == HCI_LK_CHANGED_COMBINATION)
1320 		key->type = old_key_type;
1321 	else
1322 		key->type = type;
1323 
1324 	if (!new_key)
1325 		return 0;
1326 
1327 	persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1328 
1329 	mgmt_new_link_key(hdev, key, persistent);
1330 
1331 	if (!persistent) {
1332 		list_del(&key->list);
1333 		kfree(key);
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1340 		int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1341 		ediv, u8 rand[8])
1342 {
1343 	struct smp_ltk *key, *old_key;
1344 
1345 	if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1346 		return 0;
1347 
1348 	old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1349 	if (old_key)
1350 		key = old_key;
1351 	else {
1352 		key = kzalloc(sizeof(*key), GFP_ATOMIC);
1353 		if (!key)
1354 			return -ENOMEM;
1355 		list_add(&key->list, &hdev->long_term_keys);
1356 	}
1357 
1358 	bacpy(&key->bdaddr, bdaddr);
1359 	key->bdaddr_type = addr_type;
1360 	memcpy(key->val, tk, sizeof(key->val));
1361 	key->authenticated = authenticated;
1362 	key->ediv = ediv;
1363 	key->enc_size = enc_size;
1364 	key->type = type;
1365 	memcpy(key->rand, rand, sizeof(key->rand));
1366 
1367 	if (!new_key)
1368 		return 0;
1369 
1370 	if (type & HCI_SMP_LTK)
1371 		mgmt_new_ltk(hdev, key, 1);
1372 
1373 	return 0;
1374 }
1375 
1376 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1377 {
1378 	struct link_key *key;
1379 
1380 	key = hci_find_link_key(hdev, bdaddr);
1381 	if (!key)
1382 		return -ENOENT;
1383 
1384 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1385 
1386 	list_del(&key->list);
1387 	kfree(key);
1388 
1389 	return 0;
1390 }
1391 
1392 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1393 {
1394 	struct smp_ltk *k, *tmp;
1395 
1396 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1397 		if (bacmp(bdaddr, &k->bdaddr))
1398 			continue;
1399 
1400 		BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1401 
1402 		list_del(&k->list);
1403 		kfree(k);
1404 	}
1405 
1406 	return 0;
1407 }
1408 
1409 /* HCI command timer function */
1410 static void hci_cmd_timer(unsigned long arg)
1411 {
1412 	struct hci_dev *hdev = (void *) arg;
1413 
1414 	BT_ERR("%s command tx timeout", hdev->name);
1415 	atomic_set(&hdev->cmd_cnt, 1);
1416 	queue_work(hdev->workqueue, &hdev->cmd_work);
1417 }
1418 
1419 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1420 					  bdaddr_t *bdaddr)
1421 {
1422 	struct oob_data *data;
1423 
1424 	list_for_each_entry(data, &hdev->remote_oob_data, list)
1425 		if (bacmp(bdaddr, &data->bdaddr) == 0)
1426 			return data;
1427 
1428 	return NULL;
1429 }
1430 
1431 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1432 {
1433 	struct oob_data *data;
1434 
1435 	data = hci_find_remote_oob_data(hdev, bdaddr);
1436 	if (!data)
1437 		return -ENOENT;
1438 
1439 	BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1440 
1441 	list_del(&data->list);
1442 	kfree(data);
1443 
1444 	return 0;
1445 }
1446 
1447 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1448 {
1449 	struct oob_data *data, *n;
1450 
1451 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1452 		list_del(&data->list);
1453 		kfree(data);
1454 	}
1455 
1456 	return 0;
1457 }
1458 
1459 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1460 			    u8 *randomizer)
1461 {
1462 	struct oob_data *data;
1463 
1464 	data = hci_find_remote_oob_data(hdev, bdaddr);
1465 
1466 	if (!data) {
1467 		data = kmalloc(sizeof(*data), GFP_ATOMIC);
1468 		if (!data)
1469 			return -ENOMEM;
1470 
1471 		bacpy(&data->bdaddr, bdaddr);
1472 		list_add(&data->list, &hdev->remote_oob_data);
1473 	}
1474 
1475 	memcpy(data->hash, hash, sizeof(data->hash));
1476 	memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1477 
1478 	BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1479 
1480 	return 0;
1481 }
1482 
1483 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1484 {
1485 	struct bdaddr_list *b;
1486 
1487 	list_for_each_entry(b, &hdev->blacklist, list)
1488 		if (bacmp(bdaddr, &b->bdaddr) == 0)
1489 			return b;
1490 
1491 	return NULL;
1492 }
1493 
1494 int hci_blacklist_clear(struct hci_dev *hdev)
1495 {
1496 	struct list_head *p, *n;
1497 
1498 	list_for_each_safe(p, n, &hdev->blacklist) {
1499 		struct bdaddr_list *b;
1500 
1501 		b = list_entry(p, struct bdaddr_list, list);
1502 
1503 		list_del(p);
1504 		kfree(b);
1505 	}
1506 
1507 	return 0;
1508 }
1509 
1510 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1511 {
1512 	struct bdaddr_list *entry;
1513 
1514 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1515 		return -EBADF;
1516 
1517 	if (hci_blacklist_lookup(hdev, bdaddr))
1518 		return -EEXIST;
1519 
1520 	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1521 	if (!entry)
1522 		return -ENOMEM;
1523 
1524 	bacpy(&entry->bdaddr, bdaddr);
1525 
1526 	list_add(&entry->list, &hdev->blacklist);
1527 
1528 	return mgmt_device_blocked(hdev, bdaddr, type);
1529 }
1530 
1531 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1532 {
1533 	struct bdaddr_list *entry;
1534 
1535 	if (bacmp(bdaddr, BDADDR_ANY) == 0)
1536 		return hci_blacklist_clear(hdev);
1537 
1538 	entry = hci_blacklist_lookup(hdev, bdaddr);
1539 	if (!entry)
1540 		return -ENOENT;
1541 
1542 	list_del(&entry->list);
1543 	kfree(entry);
1544 
1545 	return mgmt_device_unblocked(hdev, bdaddr, type);
1546 }
1547 
1548 static void hci_clear_adv_cache(struct work_struct *work)
1549 {
1550 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1551 					    adv_work.work);
1552 
1553 	hci_dev_lock(hdev);
1554 
1555 	hci_adv_entries_clear(hdev);
1556 
1557 	hci_dev_unlock(hdev);
1558 }
1559 
1560 int hci_adv_entries_clear(struct hci_dev *hdev)
1561 {
1562 	struct adv_entry *entry, *tmp;
1563 
1564 	list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1565 		list_del(&entry->list);
1566 		kfree(entry);
1567 	}
1568 
1569 	BT_DBG("%s adv cache cleared", hdev->name);
1570 
1571 	return 0;
1572 }
1573 
1574 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1575 {
1576 	struct adv_entry *entry;
1577 
1578 	list_for_each_entry(entry, &hdev->adv_entries, list)
1579 		if (bacmp(bdaddr, &entry->bdaddr) == 0)
1580 			return entry;
1581 
1582 	return NULL;
1583 }
1584 
1585 static inline int is_connectable_adv(u8 evt_type)
1586 {
1587 	if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1588 		return 1;
1589 
1590 	return 0;
1591 }
1592 
1593 int hci_add_adv_entry(struct hci_dev *hdev,
1594 					struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
1595 		return -EINVAL;
1596 
1597 	/* Only new entries should be added to adv_entries. So, if
1598 	 * bdaddr was found, don't add it. */
1599 	if (hci_find_adv_entry(hdev, &ev->bdaddr))
1600 		return 0;
1601 
1602 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1603 	if (!entry)
1604 		return -ENOMEM;
1605 
1606 	bacpy(&entry->bdaddr, &ev->bdaddr);
1607 	entry->bdaddr_type = ev->bdaddr_type;
1608 
1609 	list_add(&entry->list, &hdev->adv_entries);
1610 
1611 	BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1612 				batostr(&entry->bdaddr), entry->bdaddr_type);
1613 
1614 	return 0;
1615 }
1616 
1617 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1618 {
1619 	struct le_scan_params *param =  (struct le_scan_params *) opt;
1620 	struct hci_cp_le_set_scan_param cp;
1621 
1622 	memset(&cp, 0, sizeof(cp));
1623 	cp.type = param->type;
1624 	cp.interval = cpu_to_le16(param->interval);
1625 	cp.window = cpu_to_le16(param->window);
1626 
1627 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1628 }
1629 
1630 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1631 {
1632 	struct hci_cp_le_set_scan_enable cp;
1633 
1634 	memset(&cp, 0, sizeof(cp));
1635 	cp.enable = 1;
1636 
1637 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1638 }
1639 
1640 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1641 			  u16 window, int timeout)
1642 {
1643 	long timeo = msecs_to_jiffies(3000);
1644 	struct le_scan_params param;
1645 	int err;
1646 
1647 	BT_DBG("%s", hdev->name);
1648 
1649 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1650 		return -EINPROGRESS;
1651 
1652 	param.type = type;
1653 	param.interval = interval;
1654 	param.window = window;
1655 
1656 	hci_req_lock(hdev);
1657 
1658 	err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1659 			    timeo);
1660 	if (!err)
1661 		err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1662 
1663 	hci_req_unlock(hdev);
1664 
1665 	if (err < 0)
1666 		return err;
1667 
1668 	schedule_delayed_work(&hdev->le_scan_disable,
1669 			      msecs_to_jiffies(timeout));
1670 
1671 	return 0;
1672 }
1673 
1674 static void le_scan_disable_work(struct work_struct *work)
1675 {
1676 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1677 					    le_scan_disable.work);
1678 	struct hci_cp_le_set_scan_enable cp;
1679 
1680 	BT_DBG("%s", hdev->name);
1681 
1682 	memset(&cp, 0, sizeof(cp));
1683 
1684 	hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1685 }
1686 
1687 static void le_scan_work(struct work_struct *work)
1688 {
1689 	struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1690 	struct le_scan_params *param = &hdev->le_scan_params;
1691 
1692 	BT_DBG("%s", hdev->name);
1693 
1694 	hci_do_le_scan(hdev, param->type, param->interval, param->window,
1695 		       param->timeout);
1696 }
1697 
1698 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1699 		int timeout)
1700 {
1701 	struct le_scan_params *param = &hdev->le_scan_params;
1702 
1703 	BT_DBG("%s", hdev->name);
1704 
1705 	if (work_busy(&hdev->le_scan))
1706 		return -EINPROGRESS;
1707 
1708 	param->type = type;
1709 	param->interval = interval;
1710 	param->window = window;
1711 	param->timeout = timeout;
1712 
1713 	queue_work(system_long_wq, &hdev->le_scan);
1714 
1715 	return 0;
1716 }
1717 
1718 /* Register HCI device */
1719 int hci_register_dev(struct hci_dev *hdev)
1720 {
1721 	struct list_head *head = &hci_dev_list, *p;
1722 	int i, id, error;
1723 
1724 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1725 
1726 	if (!hdev->open || !hdev->close)
1727 		return -EINVAL;
1728 
1729 	/* Do not allow HCI_AMP devices to register at index 0,
1730 	 * so the index can be used as the AMP controller ID.
1731 	 */
1732 	id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1733 
1734 	write_lock(&hci_dev_list_lock);
1735 
1736 	/* Find first available device id */
1737 	list_for_each(p, &hci_dev_list) {
1738 		if (list_entry(p, struct hci_dev, list)->id != id)
1739 			break;
1740 		head = p; id++;
1741 	}
1742 
1743 	sprintf(hdev->name, "hci%d", id);
1744 	hdev->id = id;
1745 	list_add_tail(&hdev->list, head);
1746 
1747 	mutex_init(&hdev->lock);
1748 
1749 	hdev->flags = 0;
1750 	hdev->dev_flags = 0;
1751 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1752 	hdev->esco_type = (ESCO_HV1);
1753 	hdev->link_mode = (HCI_LM_ACCEPT);
1754 	hdev->io_capability = 0x03; /* No Input No Output */
1755 
1756 	hdev->idle_timeout = 0;
1757 	hdev->sniff_max_interval = 800;
1758 	hdev->sniff_min_interval = 80;
1759 
1760 	INIT_WORK(&hdev->rx_work, hci_rx_work);
1761 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1762 	INIT_WORK(&hdev->tx_work, hci_tx_work);
1763 
1764 
1765 	skb_queue_head_init(&hdev->rx_q);
1766 	skb_queue_head_init(&hdev->cmd_q);
1767 	skb_queue_head_init(&hdev->raw_q);
1768 
1769 	setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1770 
1771 	for (i = 0; i < NUM_REASSEMBLY; i++)
1772 		hdev->reassembly[i] = NULL;
1773 
1774 	init_waitqueue_head(&hdev->req_wait_q);
1775 	mutex_init(&hdev->req_lock);
1776 
1777 	discovery_init(hdev);
1778 
1779 	hci_conn_hash_init(hdev);
1780 
1781 	INIT_LIST_HEAD(&hdev->mgmt_pending);
1782 
1783 	INIT_LIST_HEAD(&hdev->blacklist);
1784 
1785 	INIT_LIST_HEAD(&hdev->uuids);
1786 
1787 	INIT_LIST_HEAD(&hdev->link_keys);
1788 	INIT_LIST_HEAD(&hdev->long_term_keys);
1789 
1790 	INIT_LIST_HEAD(&hdev->remote_oob_data);
1791 
1792 	INIT_LIST_HEAD(&hdev->adv_entries);
1793 
1794 	INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1795 	INIT_WORK(&hdev->power_on, hci_power_on);
1796 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1797 
1798 	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1799 
1800 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1801 
1802 	atomic_set(&hdev->promisc, 0);
1803 
1804 	INIT_WORK(&hdev->le_scan, le_scan_work);
1805 
1806 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1807 
1808 	write_unlock(&hci_dev_list_lock);
1809 
1810 	hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1811 							WQ_MEM_RECLAIM, 1);
1812 	if (!hdev->workqueue) {
1813 		error = -ENOMEM;
1814 		goto err;
1815 	}
1816 
1817 	error = hci_add_sysfs(hdev);
1818 	if (error < 0)
1819 		goto err_wqueue;
1820 
1821 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1822 				RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1823 	if (hdev->rfkill) {
1824 		if (rfkill_register(hdev->rfkill) < 0) {
1825 			rfkill_destroy(hdev->rfkill);
1826 			hdev->rfkill = NULL;
1827 		}
1828 	}
1829 
1830 	set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1831 	set_bit(HCI_SETUP, &hdev->dev_flags);
1832 	schedule_work(&hdev->power_on);
1833 
1834 	hci_notify(hdev, HCI_DEV_REG);
1835 	hci_dev_hold(hdev);
1836 
1837 	return id;
1838 
1839 err_wqueue:
1840 	destroy_workqueue(hdev->workqueue);
1841 err:
1842 	write_lock(&hci_dev_list_lock);
1843 	list_del(&hdev->list);
1844 	write_unlock(&hci_dev_list_lock);
1845 
1846 	return error;
1847 }
1848 EXPORT_SYMBOL(hci_register_dev);
1849 
1850 /* Unregister HCI device */
1851 void hci_unregister_dev(struct hci_dev *hdev)
1852 {
1853 	int i;
1854 
1855 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1856 
1857 	set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1858 
1859 	write_lock(&hci_dev_list_lock);
1860 	list_del(&hdev->list);
1861 	write_unlock(&hci_dev_list_lock);
1862 
1863 	hci_dev_do_close(hdev);
1864 
1865 	for (i = 0; i < NUM_REASSEMBLY; i++)
1866 		kfree_skb(hdev->reassembly[i]);
1867 
1868 	if (!test_bit(HCI_INIT, &hdev->flags) &&
1869 				!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1870 		hci_dev_lock(hdev);
1871 		mgmt_index_removed(hdev);
1872 		hci_dev_unlock(hdev);
1873 	}
1874 
1875 	/* mgmt_index_removed should take care of emptying the
1876 	 * pending list */
1877 	BUG_ON(!list_empty(&hdev->mgmt_pending));
1878 
1879 	hci_notify(hdev, HCI_DEV_UNREG);
1880 
1881 	if (hdev->rfkill) {
1882 		rfkill_unregister(hdev->rfkill);
1883 		rfkill_destroy(hdev->rfkill);
1884 	}
1885 
1886 	hci_del_sysfs(hdev);
1887 
1888 	cancel_delayed_work_sync(&hdev->adv_work);
1889 
1890 	destroy_workqueue(hdev->workqueue);
1891 
1892 	hci_dev_lock(hdev);
1893 	hci_blacklist_clear(hdev);
1894 	hci_uuids_clear(hdev);
1895 	hci_link_keys_clear(hdev);
1896 	hci_smp_ltks_clear(hdev);
1897 	hci_remote_oob_data_clear(hdev);
1898 	hci_adv_entries_clear(hdev);
1899 	hci_dev_unlock(hdev);
1900 
1901 	hci_dev_put(hdev);
1902 }
1903 EXPORT_SYMBOL(hci_unregister_dev);
1904 
1905 /* Suspend HCI device */
1906 int hci_suspend_dev(struct hci_dev *hdev)
1907 {
1908 	hci_notify(hdev, HCI_DEV_SUSPEND);
1909 	return 0;
1910 }
1911 EXPORT_SYMBOL(hci_suspend_dev);
1912 
1913 /* Resume HCI device */
1914 int hci_resume_dev(struct hci_dev *hdev)
1915 {
1916 	hci_notify(hdev, HCI_DEV_RESUME);
1917 	return 0;
1918 }
1919 EXPORT_SYMBOL(hci_resume_dev);
1920 
1921 /* Receive frame from HCI drivers */
1922 int hci_recv_frame(struct sk_buff *skb)
1923 {
1924 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1925 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1926 				&& !test_bit(HCI_INIT, &hdev->flags))) {
1927 		kfree_skb(skb);
1928 		return -ENXIO;
1929 	}
1930 
1931 	/* Incomming skb */
1932 	bt_cb(skb)->incoming = 1;
1933 
1934 	/* Time stamp */
1935 	__net_timestamp(skb);
1936 
1937 	skb_queue_tail(&hdev->rx_q, skb);
1938 	queue_work(hdev->workqueue, &hdev->rx_work);
1939 
1940 	return 0;
1941 }
1942 EXPORT_SYMBOL(hci_recv_frame);
1943 
1944 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1945 						  int count, __u8 index)
1946 {
1947 	int len = 0;
1948 	int hlen = 0;
1949 	int remain = count;
1950 	struct sk_buff *skb;
1951 	struct bt_skb_cb *scb;
1952 
1953 	if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1954 				index >= NUM_REASSEMBLY)
1955 		return -EILSEQ;
1956 
1957 	skb = hdev->reassembly[index];
1958 
1959 	if (!skb) {
1960 		switch (type) {
1961 		case HCI_ACLDATA_PKT:
1962 			len = HCI_MAX_FRAME_SIZE;
1963 			hlen = HCI_ACL_HDR_SIZE;
1964 			break;
1965 		case HCI_EVENT_PKT:
1966 			len = HCI_MAX_EVENT_SIZE;
1967 			hlen = HCI_EVENT_HDR_SIZE;
1968 			break;
1969 		case HCI_SCODATA_PKT:
1970 			len = HCI_MAX_SCO_SIZE;
1971 			hlen = HCI_SCO_HDR_SIZE;
1972 			break;
1973 		}
1974 
1975 		skb = bt_skb_alloc(len, GFP_ATOMIC);
1976 		if (!skb)
1977 			return -ENOMEM;
1978 
1979 		scb = (void *) skb->cb;
1980 		scb->expect = hlen;
1981 		scb->pkt_type = type;
1982 
1983 		skb->dev = (void *) hdev;
1984 		hdev->reassembly[index] = skb;
1985 	}
1986 
1987 	while (count) {
1988 		scb = (void *) skb->cb;
1989 		len = min_t(uint, scb->expect, count);
1990 
1991 		memcpy(skb_put(skb, len), data, len);
1992 
1993 		count -= len;
1994 		data += len;
1995 		scb->expect -= len;
1996 		remain = count;
1997 
1998 		switch (type) {
1999 		case HCI_EVENT_PKT:
2000 			if (skb->len == HCI_EVENT_HDR_SIZE) {
2001 				struct hci_event_hdr *h = hci_event_hdr(skb);
2002 				scb->expect = h->plen;
2003 
2004 				if (skb_tailroom(skb) < scb->expect) {
2005 					kfree_skb(skb);
2006 					hdev->reassembly[index] = NULL;
2007 					return -ENOMEM;
2008 				}
2009 			}
2010 			break;
2011 
2012 		case HCI_ACLDATA_PKT:
2013 			if (skb->len  == HCI_ACL_HDR_SIZE) {
2014 				struct hci_acl_hdr *h = hci_acl_hdr(skb);
2015 				scb->expect = __le16_to_cpu(h->dlen);
2016 
2017 				if (skb_tailroom(skb) < scb->expect) {
2018 					kfree_skb(skb);
2019 					hdev->reassembly[index] = NULL;
2020 					return -ENOMEM;
2021 				}
2022 			}
2023 			break;
2024 
2025 		case HCI_SCODATA_PKT:
2026 			if (skb->len == HCI_SCO_HDR_SIZE) {
2027 				struct hci_sco_hdr *h = hci_sco_hdr(skb);
2028 				scb->expect = h->dlen;
2029 
2030 				if (skb_tailroom(skb) < scb->expect) {
2031 					kfree_skb(skb);
2032 					hdev->reassembly[index] = NULL;
2033 					return -ENOMEM;
2034 				}
2035 			}
2036 			break;
2037 		}
2038 
2039 		if (scb->expect == 0) {
2040 			/* Complete frame */
2041 
2042 			bt_cb(skb)->pkt_type = type;
2043 			hci_recv_frame(skb);
2044 
2045 			hdev->reassembly[index] = NULL;
2046 			return remain;
2047 		}
2048 	}
2049 
2050 	return remain;
2051 }
2052 
2053 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2054 {
2055 	int rem = 0;
2056 
2057 	if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2058 		return -EILSEQ;
2059 
2060 	while (count) {
2061 		rem = hci_reassembly(hdev, type, data, count, type - 1);
2062 		if (rem < 0)
2063 			return rem;
2064 
2065 		data += (count - rem);
2066 		count = rem;
2067 	}
2068 
2069 	return rem;
2070 }
2071 EXPORT_SYMBOL(hci_recv_fragment);
2072 
2073 #define STREAM_REASSEMBLY 0
2074 
2075 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2076 {
2077 	int type;
2078 	int rem = 0;
2079 
2080 	while (count) {
2081 		struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2082 
2083 		if (!skb) {
2084 			struct { char type; } *pkt;
2085 
2086 			/* Start of the frame */
2087 			pkt = data;
2088 			type = pkt->type;
2089 
2090 			data++;
2091 			count--;
2092 		} else
2093 			type = bt_cb(skb)->pkt_type;
2094 
2095 		rem = hci_reassembly(hdev, type, data, count,
2096 							STREAM_REASSEMBLY);
2097 		if (rem < 0)
2098 			return rem;
2099 
2100 		data += (count - rem);
2101 		count = rem;
2102 	}
2103 
2104 	return rem;
2105 }
2106 EXPORT_SYMBOL(hci_recv_stream_fragment);
2107 
2108 /* ---- Interface to upper protocols ---- */
2109 
2110 int hci_register_cb(struct hci_cb *cb)
2111 {
2112 	BT_DBG("%p name %s", cb, cb->name);
2113 
2114 	write_lock(&hci_cb_list_lock);
2115 	list_add(&cb->list, &hci_cb_list);
2116 	write_unlock(&hci_cb_list_lock);
2117 
2118 	return 0;
2119 }
2120 EXPORT_SYMBOL(hci_register_cb);
2121 
2122 int hci_unregister_cb(struct hci_cb *cb)
2123 {
2124 	BT_DBG("%p name %s", cb, cb->name);
2125 
2126 	write_lock(&hci_cb_list_lock);
2127 	list_del(&cb->list);
2128 	write_unlock(&hci_cb_list_lock);
2129 
2130 	return 0;
2131 }
2132 EXPORT_SYMBOL(hci_unregister_cb);
2133 
2134 static int hci_send_frame(struct sk_buff *skb)
2135 {
2136 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2137 
2138 	if (!hdev) {
2139 		kfree_skb(skb);
2140 		return -ENODEV;
2141 	}
2142 
2143 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2144 
2145 	/* Time stamp */
2146 	__net_timestamp(skb);
2147 
2148 	/* Send copy to monitor */
2149 	hci_send_to_monitor(hdev, skb);
2150 
2151 	if (atomic_read(&hdev->promisc)) {
2152 		/* Send copy to the sockets */
2153 		hci_send_to_sock(hdev, skb);
2154 	}
2155 
2156 	/* Get rid of skb owner, prior to sending to the driver. */
2157 	skb_orphan(skb);
2158 
2159 	return hdev->send(skb);
2160 }
2161 
2162 /* Send HCI command */
2163 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2164 {
2165 	int len = HCI_COMMAND_HDR_SIZE + plen;
2166 	struct hci_command_hdr *hdr;
2167 	struct sk_buff *skb;
2168 
2169 	BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2170 
2171 	skb = bt_skb_alloc(len, GFP_ATOMIC);
2172 	if (!skb) {
2173 		BT_ERR("%s no memory for command", hdev->name);
2174 		return -ENOMEM;
2175 	}
2176 
2177 	hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2178 	hdr->opcode = cpu_to_le16(opcode);
2179 	hdr->plen   = plen;
2180 
2181 	if (plen)
2182 		memcpy(skb_put(skb, plen), param, plen);
2183 
2184 	BT_DBG("skb len %d", skb->len);
2185 
2186 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2187 	skb->dev = (void *) hdev;
2188 
2189 	if (test_bit(HCI_INIT, &hdev->flags))
2190 		hdev->init_last_cmd = opcode;
2191 
2192 	skb_queue_tail(&hdev->cmd_q, skb);
2193 	queue_work(hdev->workqueue, &hdev->cmd_work);
2194 
2195 	return 0;
2196 }
2197 
2198 /* Get data from the previously sent command */
2199 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2200 {
2201 	struct hci_command_hdr *hdr;
2202 
2203 	if (!hdev->sent_cmd)
2204 		return NULL;
2205 
2206 	hdr = (void *) hdev->sent_cmd->data;
2207 
2208 	if (hdr->opcode != cpu_to_le16(opcode))
2209 		return NULL;
2210 
2211 	BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2212 
2213 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2214 }
2215 
2216 /* Send ACL data */
2217 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2218 {
2219 	struct hci_acl_hdr *hdr;
2220 	int len = skb->len;
2221 
2222 	skb_push(skb, HCI_ACL_HDR_SIZE);
2223 	skb_reset_transport_header(skb);
2224 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2225 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2226 	hdr->dlen   = cpu_to_le16(len);
2227 }
2228 
2229 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2230 				struct sk_buff *skb, __u16 flags)
2231 {
2232 	struct hci_dev *hdev = conn->hdev;
2233 	struct sk_buff *list;
2234 
2235 	list = skb_shinfo(skb)->frag_list;
2236 	if (!list) {
2237 		/* Non fragmented */
2238 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2239 
2240 		skb_queue_tail(queue, skb);
2241 	} else {
2242 		/* Fragmented */
2243 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2244 
2245 		skb_shinfo(skb)->frag_list = NULL;
2246 
2247 		/* Queue all fragments atomically */
2248 		spin_lock(&queue->lock);
2249 
2250 		__skb_queue_tail(queue, skb);
2251 
2252 		flags &= ~ACL_START;
2253 		flags |= ACL_CONT;
2254 		do {
2255 			skb = list; list = list->next;
2256 
2257 			skb->dev = (void *) hdev;
2258 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2259 			hci_add_acl_hdr(skb, conn->handle, flags);
2260 
2261 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2262 
2263 			__skb_queue_tail(queue, skb);
2264 		} while (list);
2265 
2266 		spin_unlock(&queue->lock);
2267 	}
2268 }
2269 
2270 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2271 {
2272 	struct hci_conn *conn = chan->conn;
2273 	struct hci_dev *hdev = conn->hdev;
2274 
2275 	BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2276 
2277 	skb->dev = (void *) hdev;
2278 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2279 	hci_add_acl_hdr(skb, conn->handle, flags);
2280 
2281 	hci_queue_acl(conn, &chan->data_q, skb, flags);
2282 
2283 	queue_work(hdev->workqueue, &hdev->tx_work);
2284 }
2285 EXPORT_SYMBOL(hci_send_acl);
2286 
2287 /* Send SCO data */
2288 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2289 {
2290 	struct hci_dev *hdev = conn->hdev;
2291 	struct hci_sco_hdr hdr;
2292 
2293 	BT_DBG("%s len %d", hdev->name, skb->len);
2294 
2295 	hdr.handle = cpu_to_le16(conn->handle);
2296 	hdr.dlen   = skb->len;
2297 
2298 	skb_push(skb, HCI_SCO_HDR_SIZE);
2299 	skb_reset_transport_header(skb);
2300 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2301 
2302 	skb->dev = (void *) hdev;
2303 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2304 
2305 	skb_queue_tail(&conn->data_q, skb);
2306 	queue_work(hdev->workqueue, &hdev->tx_work);
2307 }
2308 EXPORT_SYMBOL(hci_send_sco);
2309 
2310 /* ---- HCI TX task (outgoing data) ---- */
2311 
2312 /* HCI Connection scheduler */
2313 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2314 {
2315 	struct hci_conn_hash *h = &hdev->conn_hash;
2316 	struct hci_conn *conn = NULL, *c;
2317 	int num = 0, min = ~0;
2318 
2319 	/* We don't have to lock device here. Connections are always
2320 	 * added and removed with TX task disabled. */
2321 
2322 	rcu_read_lock();
2323 
2324 	list_for_each_entry_rcu(c, &h->list, list) {
2325 		if (c->type != type || skb_queue_empty(&c->data_q))
2326 			continue;
2327 
2328 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2329 			continue;
2330 
2331 		num++;
2332 
2333 		if (c->sent < min) {
2334 			min  = c->sent;
2335 			conn = c;
2336 		}
2337 
2338 		if (hci_conn_num(hdev, type) == num)
2339 			break;
2340 	}
2341 
2342 	rcu_read_unlock();
2343 
2344 	if (conn) {
2345 		int cnt, q;
2346 
2347 		switch (conn->type) {
2348 		case ACL_LINK:
2349 			cnt = hdev->acl_cnt;
2350 			break;
2351 		case SCO_LINK:
2352 		case ESCO_LINK:
2353 			cnt = hdev->sco_cnt;
2354 			break;
2355 		case LE_LINK:
2356 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2357 			break;
2358 		default:
2359 			cnt = 0;
2360 			BT_ERR("Unknown link type");
2361 		}
2362 
2363 		q = cnt / num;
2364 		*quote = q ? q : 1;
2365 	} else
2366 		*quote = 0;
2367 
2368 	BT_DBG("conn %p quote %d", conn, *quote);
2369 	return conn;
2370 }
2371 
2372 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2373 {
2374 	struct hci_conn_hash *h = &hdev->conn_hash;
2375 	struct hci_conn *c;
2376 
2377 	BT_ERR("%s link tx timeout", hdev->name);
2378 
2379 	rcu_read_lock();
2380 
2381 	/* Kill stalled connections */
2382 	list_for_each_entry_rcu(c, &h->list, list) {
2383 		if (c->type == type && c->sent) {
2384 			BT_ERR("%s killing stalled connection %s",
2385 				hdev->name, batostr(&c->dst));
2386 			hci_acl_disconn(c, 0x13);
2387 		}
2388 	}
2389 
2390 	rcu_read_unlock();
2391 }
2392 
2393 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2394 						int *quote)
2395 {
2396 	struct hci_conn_hash *h = &hdev->conn_hash;
2397 	struct hci_chan *chan = NULL;
2398 	int num = 0, min = ~0, cur_prio = 0;
2399 	struct hci_conn *conn;
2400 	int cnt, q, conn_num = 0;
2401 
2402 	BT_DBG("%s", hdev->name);
2403 
2404 	rcu_read_lock();
2405 
2406 	list_for_each_entry_rcu(conn, &h->list, list) {
2407 		struct hci_chan *tmp;
2408 
2409 		if (conn->type != type)
2410 			continue;
2411 
2412 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2413 			continue;
2414 
2415 		conn_num++;
2416 
2417 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2418 			struct sk_buff *skb;
2419 
2420 			if (skb_queue_empty(&tmp->data_q))
2421 				continue;
2422 
2423 			skb = skb_peek(&tmp->data_q);
2424 			if (skb->priority < cur_prio)
2425 				continue;
2426 
2427 			if (skb->priority > cur_prio) {
2428 				num = 0;
2429 				min = ~0;
2430 				cur_prio = skb->priority;
2431 			}
2432 
2433 			num++;
2434 
2435 			if (conn->sent < min) {
2436 				min  = conn->sent;
2437 				chan = tmp;
2438 			}
2439 		}
2440 
2441 		if (hci_conn_num(hdev, type) == conn_num)
2442 			break;
2443 	}
2444 
2445 	rcu_read_unlock();
2446 
2447 	if (!chan)
2448 		return NULL;
2449 
2450 	switch (chan->conn->type) {
2451 	case ACL_LINK:
2452 		cnt = hdev->acl_cnt;
2453 		break;
2454 	case SCO_LINK:
2455 	case ESCO_LINK:
2456 		cnt = hdev->sco_cnt;
2457 		break;
2458 	case LE_LINK:
2459 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2460 		break;
2461 	default:
2462 		cnt = 0;
2463 		BT_ERR("Unknown link type");
2464 	}
2465 
2466 	q = cnt / num;
2467 	*quote = q ? q : 1;
2468 	BT_DBG("chan %p quote %d", chan, *quote);
2469 	return chan;
2470 }
2471 
2472 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2473 {
2474 	struct hci_conn_hash *h = &hdev->conn_hash;
2475 	struct hci_conn *conn;
2476 	int num = 0;
2477 
2478 	BT_DBG("%s", hdev->name);
2479 
2480 	rcu_read_lock();
2481 
2482 	list_for_each_entry_rcu(conn, &h->list, list) {
2483 		struct hci_chan *chan;
2484 
2485 		if (conn->type != type)
2486 			continue;
2487 
2488 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2489 			continue;
2490 
2491 		num++;
2492 
2493 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2494 			struct sk_buff *skb;
2495 
2496 			if (chan->sent) {
2497 				chan->sent = 0;
2498 				continue;
2499 			}
2500 
2501 			if (skb_queue_empty(&chan->data_q))
2502 				continue;
2503 
2504 			skb = skb_peek(&chan->data_q);
2505 			if (skb->priority >= HCI_PRIO_MAX - 1)
2506 				continue;
2507 
2508 			skb->priority = HCI_PRIO_MAX - 1;
2509 
2510 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2511 								skb->priority);
2512 		}
2513 
2514 		if (hci_conn_num(hdev, type) == num)
2515 			break;
2516 	}
2517 
2518 	rcu_read_unlock();
2519 
2520 }
2521 
2522 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2523 {
2524 	/* Calculate count of blocks used by this packet */
2525 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2526 }
2527 
2528 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2529 {
2530 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2531 		/* ACL tx timeout must be longer than maximum
2532 		 * link supervision timeout (40.9 seconds) */
2533 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2534 					msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2535 			hci_link_tx_to(hdev, ACL_LINK);
2536 	}
2537 }
2538 
2539 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2540 {
2541 	unsigned int cnt = hdev->acl_cnt;
2542 	struct hci_chan *chan;
2543 	struct sk_buff *skb;
2544 	int quote;
2545 
2546 	__check_timeout(hdev, cnt);
2547 
2548 	while (hdev->acl_cnt &&
2549 			(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2550 		u32 priority = (skb_peek(&chan->data_q))->priority;
2551 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2552 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2553 					skb->len, skb->priority);
2554 
2555 			/* Stop if priority has changed */
2556 			if (skb->priority < priority)
2557 				break;
2558 
2559 			skb = skb_dequeue(&chan->data_q);
2560 
2561 			hci_conn_enter_active_mode(chan->conn,
2562 						   bt_cb(skb)->force_active);
2563 
2564 			hci_send_frame(skb);
2565 			hdev->acl_last_tx = jiffies;
2566 
2567 			hdev->acl_cnt--;
2568 			chan->sent++;
2569 			chan->conn->sent++;
2570 		}
2571 	}
2572 
2573 	if (cnt != hdev->acl_cnt)
2574 		hci_prio_recalculate(hdev, ACL_LINK);
2575 }
2576 
2577 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2578 {
2579 	unsigned int cnt = hdev->block_cnt;
2580 	struct hci_chan *chan;
2581 	struct sk_buff *skb;
2582 	int quote;
2583 
2584 	__check_timeout(hdev, cnt);
2585 
2586 	while (hdev->block_cnt > 0 &&
2587 			(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2588 		u32 priority = (skb_peek(&chan->data_q))->priority;
2589 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2590 			int blocks;
2591 
2592 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2593 						skb->len, skb->priority);
2594 
2595 			/* Stop if priority has changed */
2596 			if (skb->priority < priority)
2597 				break;
2598 
2599 			skb = skb_dequeue(&chan->data_q);
2600 
2601 			blocks = __get_blocks(hdev, skb);
2602 			if (blocks > hdev->block_cnt)
2603 				return;
2604 
2605 			hci_conn_enter_active_mode(chan->conn,
2606 						bt_cb(skb)->force_active);
2607 
2608 			hci_send_frame(skb);
2609 			hdev->acl_last_tx = jiffies;
2610 
2611 			hdev->block_cnt -= blocks;
2612 			quote -= blocks;
2613 
2614 			chan->sent += blocks;
2615 			chan->conn->sent += blocks;
2616 		}
2617 	}
2618 
2619 	if (cnt != hdev->block_cnt)
2620 		hci_prio_recalculate(hdev, ACL_LINK);
2621 }
2622 
2623 static inline void hci_sched_acl(struct hci_dev *hdev)
2624 {
2625 	BT_DBG("%s", hdev->name);
2626 
2627 	if (!hci_conn_num(hdev, ACL_LINK))
2628 		return;
2629 
2630 	switch (hdev->flow_ctl_mode) {
2631 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
2632 		hci_sched_acl_pkt(hdev);
2633 		break;
2634 
2635 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2636 		hci_sched_acl_blk(hdev);
2637 		break;
2638 	}
2639 }
2640 
2641 /* Schedule SCO */
2642 static inline void hci_sched_sco(struct hci_dev *hdev)
2643 {
2644 	struct hci_conn *conn;
2645 	struct sk_buff *skb;
2646 	int quote;
2647 
2648 	BT_DBG("%s", hdev->name);
2649 
2650 	if (!hci_conn_num(hdev, SCO_LINK))
2651 		return;
2652 
2653 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2654 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2655 			BT_DBG("skb %p len %d", skb, skb->len);
2656 			hci_send_frame(skb);
2657 
2658 			conn->sent++;
2659 			if (conn->sent == ~0)
2660 				conn->sent = 0;
2661 		}
2662 	}
2663 }
2664 
2665 static inline void hci_sched_esco(struct hci_dev *hdev)
2666 {
2667 	struct hci_conn *conn;
2668 	struct sk_buff *skb;
2669 	int quote;
2670 
2671 	BT_DBG("%s", hdev->name);
2672 
2673 	if (!hci_conn_num(hdev, ESCO_LINK))
2674 		return;
2675 
2676 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2677 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2678 			BT_DBG("skb %p len %d", skb, skb->len);
2679 			hci_send_frame(skb);
2680 
2681 			conn->sent++;
2682 			if (conn->sent == ~0)
2683 				conn->sent = 0;
2684 		}
2685 	}
2686 }
2687 
2688 static inline void hci_sched_le(struct hci_dev *hdev)
2689 {
2690 	struct hci_chan *chan;
2691 	struct sk_buff *skb;
2692 	int quote, cnt, tmp;
2693 
2694 	BT_DBG("%s", hdev->name);
2695 
2696 	if (!hci_conn_num(hdev, LE_LINK))
2697 		return;
2698 
2699 	if (!test_bit(HCI_RAW, &hdev->flags)) {
2700 		/* LE tx timeout must be longer than maximum
2701 		 * link supervision timeout (40.9 seconds) */
2702 		if (!hdev->le_cnt && hdev->le_pkts &&
2703 				time_after(jiffies, hdev->le_last_tx + HZ * 45))
2704 			hci_link_tx_to(hdev, LE_LINK);
2705 	}
2706 
2707 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2708 	tmp = cnt;
2709 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2710 		u32 priority = (skb_peek(&chan->data_q))->priority;
2711 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
2712 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2713 					skb->len, skb->priority);
2714 
2715 			/* Stop if priority has changed */
2716 			if (skb->priority < priority)
2717 				break;
2718 
2719 			skb = skb_dequeue(&chan->data_q);
2720 
2721 			hci_send_frame(skb);
2722 			hdev->le_last_tx = jiffies;
2723 
2724 			cnt--;
2725 			chan->sent++;
2726 			chan->conn->sent++;
2727 		}
2728 	}
2729 
2730 	if (hdev->le_pkts)
2731 		hdev->le_cnt = cnt;
2732 	else
2733 		hdev->acl_cnt = cnt;
2734 
2735 	if (cnt != tmp)
2736 		hci_prio_recalculate(hdev, LE_LINK);
2737 }
2738 
2739 static void hci_tx_work(struct work_struct *work)
2740 {
2741 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2742 	struct sk_buff *skb;
2743 
2744 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2745 		hdev->sco_cnt, hdev->le_cnt);
2746 
2747 	/* Schedule queues and send stuff to HCI driver */
2748 
2749 	hci_sched_acl(hdev);
2750 
2751 	hci_sched_sco(hdev);
2752 
2753 	hci_sched_esco(hdev);
2754 
2755 	hci_sched_le(hdev);
2756 
2757 	/* Send next queued raw (unknown type) packet */
2758 	while ((skb = skb_dequeue(&hdev->raw_q)))
2759 		hci_send_frame(skb);
2760 }
2761 
2762 /* ----- HCI RX task (incoming data processing) ----- */
2763 
2764 /* ACL data packet */
2765 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2766 {
2767 	struct hci_acl_hdr *hdr = (void *) skb->data;
2768 	struct hci_conn *conn;
2769 	__u16 handle, flags;
2770 
2771 	skb_pull(skb, HCI_ACL_HDR_SIZE);
2772 
2773 	handle = __le16_to_cpu(hdr->handle);
2774 	flags  = hci_flags(handle);
2775 	handle = hci_handle(handle);
2776 
2777 	BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2778 
2779 	hdev->stat.acl_rx++;
2780 
2781 	hci_dev_lock(hdev);
2782 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2783 	hci_dev_unlock(hdev);
2784 
2785 	if (conn) {
2786 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2787 
2788 		/* Send to upper protocol */
2789 		l2cap_recv_acldata(conn, skb, flags);
2790 		return;
2791 	} else {
2792 		BT_ERR("%s ACL packet for unknown connection handle %d",
2793 			hdev->name, handle);
2794 	}
2795 
2796 	kfree_skb(skb);
2797 }
2798 
2799 /* SCO data packet */
2800 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2801 {
2802 	struct hci_sco_hdr *hdr = (void *) skb->data;
2803 	struct hci_conn *conn;
2804 	__u16 handle;
2805 
2806 	skb_pull(skb, HCI_SCO_HDR_SIZE);
2807 
2808 	handle = __le16_to_cpu(hdr->handle);
2809 
2810 	BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2811 
2812 	hdev->stat.sco_rx++;
2813 
2814 	hci_dev_lock(hdev);
2815 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2816 	hci_dev_unlock(hdev);
2817 
2818 	if (conn) {
2819 		/* Send to upper protocol */
2820 		sco_recv_scodata(conn, skb);
2821 		return;
2822 	} else {
2823 		BT_ERR("%s SCO packet for unknown connection handle %d",
2824 			hdev->name, handle);
2825 	}
2826 
2827 	kfree_skb(skb);
2828 }
2829 
2830 static void hci_rx_work(struct work_struct *work)
2831 {
2832 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2833 	struct sk_buff *skb;
2834 
2835 	BT_DBG("%s", hdev->name);
2836 
2837 	while ((skb = skb_dequeue(&hdev->rx_q))) {
2838 		/* Send copy to monitor */
2839 		hci_send_to_monitor(hdev, skb);
2840 
2841 		if (atomic_read(&hdev->promisc)) {
2842 			/* Send copy to the sockets */
2843 			hci_send_to_sock(hdev, skb);
2844 		}
2845 
2846 		if (test_bit(HCI_RAW, &hdev->flags)) {
2847 			kfree_skb(skb);
2848 			continue;
2849 		}
2850 
2851 		if (test_bit(HCI_INIT, &hdev->flags)) {
2852 			/* Don't process data packets in this states. */
2853 			switch (bt_cb(skb)->pkt_type) {
2854 			case HCI_ACLDATA_PKT:
2855 			case HCI_SCODATA_PKT:
2856 				kfree_skb(skb);
2857 				continue;
2858 			}
2859 		}
2860 
2861 		/* Process frame */
2862 		switch (bt_cb(skb)->pkt_type) {
2863 		case HCI_EVENT_PKT:
2864 			BT_DBG("%s Event packet", hdev->name);
2865 			hci_event_packet(hdev, skb);
2866 			break;
2867 
2868 		case HCI_ACLDATA_PKT:
2869 			BT_DBG("%s ACL data packet", hdev->name);
2870 			hci_acldata_packet(hdev, skb);
2871 			break;
2872 
2873 		case HCI_SCODATA_PKT:
2874 			BT_DBG("%s SCO data packet", hdev->name);
2875 			hci_scodata_packet(hdev, skb);
2876 			break;
2877 
2878 		default:
2879 			kfree_skb(skb);
2880 			break;
2881 		}
2882 	}
2883 }
2884 
2885 static void hci_cmd_work(struct work_struct *work)
2886 {
2887 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2888 	struct sk_buff *skb;
2889 
2890 	BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2891 
2892 	/* Send queued commands */
2893 	if (atomic_read(&hdev->cmd_cnt)) {
2894 		skb = skb_dequeue(&hdev->cmd_q);
2895 		if (!skb)
2896 			return;
2897 
2898 		kfree_skb(hdev->sent_cmd);
2899 
2900 		hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2901 		if (hdev->sent_cmd) {
2902 			atomic_dec(&hdev->cmd_cnt);
2903 			hci_send_frame(skb);
2904 			if (test_bit(HCI_RESET, &hdev->flags))
2905 				del_timer(&hdev->cmd_timer);
2906 			else
2907 				mod_timer(&hdev->cmd_timer,
2908 				  jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2909 		} else {
2910 			skb_queue_head(&hdev->cmd_q, skb);
2911 			queue_work(hdev->workqueue, &hdev->cmd_work);
2912 		}
2913 	}
2914 }
2915 
2916 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2917 {
2918 	/* General inquiry access code (GIAC) */
2919 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
2920 	struct hci_cp_inquiry cp;
2921 
2922 	BT_DBG("%s", hdev->name);
2923 
2924 	if (test_bit(HCI_INQUIRY, &hdev->flags))
2925 		return -EINPROGRESS;
2926 
2927 	inquiry_cache_flush(hdev);
2928 
2929 	memset(&cp, 0, sizeof(cp));
2930 	memcpy(&cp.lap, lap, sizeof(cp.lap));
2931 	cp.length  = length;
2932 
2933 	return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2934 }
2935 
2936 int hci_cancel_inquiry(struct hci_dev *hdev)
2937 {
2938 	BT_DBG("%s", hdev->name);
2939 
2940 	if (!test_bit(HCI_INQUIRY, &hdev->flags))
2941 		return -EPERM;
2942 
2943 	return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2944 }
2945